From 1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c Mon Sep 17 00:00:00 2001 From: Lorry Tar Creator Date: Tue, 27 Jun 2017 06:07:23 +0000 Subject: webkitgtk-2.16.5 --- Source/JavaScriptCore/API/APICallbackFunction.h | 24 +- Source/JavaScriptCore/API/APICast.h | 7 +- Source/JavaScriptCore/API/APIShims.h | 125 - Source/JavaScriptCore/API/APIUtils.h | 65 + Source/JavaScriptCore/API/JSAPIWrapperObject.h | 2 - Source/JavaScriptCore/API/JSBase.cpp | 63 +- Source/JavaScriptCore/API/JSBase.h | 21 +- Source/JavaScriptCore/API/JSBasePrivate.h | 6 +- Source/JavaScriptCore/API/JSCTestRunnerUtils.cpp | 19 +- Source/JavaScriptCore/API/JSCTestRunnerUtils.h | 2 + .../JavaScriptCore/API/JSCallbackConstructor.cpp | 14 +- Source/JavaScriptCore/API/JSCallbackConstructor.h | 6 +- Source/JavaScriptCore/API/JSCallbackFunction.cpp | 19 +- Source/JavaScriptCore/API/JSCallbackFunction.h | 4 +- Source/JavaScriptCore/API/JSCallbackObject.cpp | 24 +- Source/JavaScriptCore/API/JSCallbackObject.h | 68 +- .../JavaScriptCore/API/JSCallbackObjectFunctions.h | 216 +- Source/JavaScriptCore/API/JSClassRef.cpp | 40 +- Source/JavaScriptCore/API/JSClassRef.h | 16 +- Source/JavaScriptCore/API/JSContext.h | 238 + Source/JavaScriptCore/API/JSContextInternal.h | 80 + Source/JavaScriptCore/API/JSContextPrivate.h | 57 + Source/JavaScriptCore/API/JSContextRef.cpp | 222 +- Source/JavaScriptCore/API/JSContextRef.h | 12 +- .../API/JSContextRefInspectorSupport.h | 43 + Source/JavaScriptCore/API/JSContextRefInternal.h | 60 + Source/JavaScriptCore/API/JSContextRefPrivate.h | 46 +- Source/JavaScriptCore/API/JSExport.h | 146 + Source/JavaScriptCore/API/JSManagedValue.h | 81 + Source/JavaScriptCore/API/JSManagedValueInternal.h | 42 + Source/JavaScriptCore/API/JSObjectRef.cpp | 299 +- Source/JavaScriptCore/API/JSObjectRef.h | 6 +- Source/JavaScriptCore/API/JSRemoteInspector.cpp | 78 + Source/JavaScriptCore/API/JSRemoteInspector.h | 49 + Source/JavaScriptCore/API/JSRetainPtr.h | 17 +- Source/JavaScriptCore/API/JSScriptRef.cpp | 84 +- Source/JavaScriptCore/API/JSStringRef.cpp | 22 +- Source/JavaScriptCore/API/JSStringRef.h | 15 +- Source/JavaScriptCore/API/JSStringRefBSTR.cpp | 42 + Source/JavaScriptCore/API/JSStringRefBSTR.h | 62 + Source/JavaScriptCore/API/JSStringRefCF.cpp | 67 + Source/JavaScriptCore/API/JSStringRefCF.h | 60 + Source/JavaScriptCore/API/JSTypedArray.cpp | 345 + Source/JavaScriptCore/API/JSTypedArray.h | 180 + Source/JavaScriptCore/API/JSValue.h | 668 + Source/JavaScriptCore/API/JSValueInternal.h | 58 + Source/JavaScriptCore/API/JSValueRef.cpp | 187 +- Source/JavaScriptCore/API/JSValueRef.h | 66 +- Source/JavaScriptCore/API/JSVirtualMachine.h | 82 + .../JavaScriptCore/API/JSVirtualMachineInternal.h | 58 + .../JavaScriptCore/API/JSVirtualMachinePrivate.h | 45 + .../API/JSWeakObjectMapRefInternal.h | 9 +- .../API/JSWeakObjectMapRefPrivate.cpp | 101 + .../JavaScriptCore/API/JSWeakObjectMapRefPrivate.h | 92 + Source/JavaScriptCore/API/JSWrapperMap.h | 48 + Source/JavaScriptCore/API/JavaScript.h | 5 +- Source/JavaScriptCore/API/JavaScriptCore.h | 42 + Source/JavaScriptCore/API/ObjCCallbackFunction.h | 10 +- Source/JavaScriptCore/API/ObjcRuntimeExtras.h | 242 + Source/JavaScriptCore/API/OpaqueJSString.cpp | 29 +- Source/JavaScriptCore/API/OpaqueJSString.h | 26 +- Source/JavaScriptCore/API/WebKitAvailability.h | 49 +- .../API/tests/CompareAndSwapTest.cpp | 118 + .../JavaScriptCore/API/tests/CompareAndSwapTest.h | 37 + .../API/tests/CurrentThisInsideBlockGetterTest.h | 34 + .../API/tests/CustomGlobalObjectClassTest.c | 145 + .../API/tests/CustomGlobalObjectClassTest.h | 30 + Source/JavaScriptCore/API/tests/DateTests.h | 32 + .../API/tests/ExecutionTimeLimitTest.cpp | 374 + .../API/tests/ExecutionTimeLimitTest.h | 37 + .../API/tests/FunctionOverridesTest.cpp | 91 + .../API/tests/FunctionOverridesTest.h | 37 + .../API/tests/GlobalContextWithFinalizerTest.cpp | 55 + .../API/tests/GlobalContextWithFinalizerTest.h | 39 + Source/JavaScriptCore/API/tests/JSExportTests.h | 34 + Source/JavaScriptCore/API/tests/JSNode.c | 8 +- Source/JavaScriptCore/API/tests/JSNode.h | 11 +- Source/JavaScriptCore/API/tests/JSNodeList.c | 8 +- Source/JavaScriptCore/API/tests/JSNodeList.h | 11 +- Source/JavaScriptCore/API/tests/JSONParseTest.cpp | 69 + Source/JavaScriptCore/API/tests/JSONParseTest.h | 36 + Source/JavaScriptCore/API/tests/Node.c | 6 +- Source/JavaScriptCore/API/tests/Node.h | 11 +- Source/JavaScriptCore/API/tests/NodeList.c | 6 +- Source/JavaScriptCore/API/tests/NodeList.h | 11 +- .../API/tests/PingPongStackOverflowTest.cpp | 182 + .../API/tests/PingPongStackOverflowTest.h | 36 + Source/JavaScriptCore/API/tests/Regress141275.h | 34 + Source/JavaScriptCore/API/tests/Regress141809.h | 34 + .../JavaScriptCore/API/tests/TypedArrayCTest.cpp | 268 + Source/JavaScriptCore/API/tests/TypedArrayCTest.h | 36 + Source/JavaScriptCore/API/tests/minidom.c | 8 +- Source/JavaScriptCore/API/tests/minidom.html | 9 + Source/JavaScriptCore/API/tests/minidom.js | 110 + .../API/tests/testapi-function-overrides.js | 16 + Source/JavaScriptCore/API/tests/testapi.c | 1989 +++ Source/JavaScriptCore/API/tests/testapi.js | 307 + Source/JavaScriptCore/CMakeLists.txt | 1538 ++ Source/JavaScriptCore/ChangeLog | 16275 ------------------- Source/JavaScriptCore/DerivedSources.make | 319 + .../ForwardingHeaders/JavaScriptCore/APICast.h | 2 +- .../ForwardingHeaders/JavaScriptCore/APIShims.h | 1 - .../ForwardingHeaders/JavaScriptCore/JSBase.h | 2 +- .../JavaScriptCore/JSCTestRunnerUtils.h | 2 +- .../JavaScriptCore/JSContextRef.h | 2 +- .../ForwardingHeaders/JavaScriptCore/JSObjectRef.h | 2 +- .../JavaScriptCore/JSObjectRefPrivate.h | 1 + .../ForwardingHeaders/JavaScriptCore/JSRetainPtr.h | 2 +- .../ForwardingHeaders/JavaScriptCore/JSStringRef.h | 2 +- .../JavaScriptCore/JSStringRefCF.h | 1 + .../JavaScriptCore/JSTypedArray.h | 1 + .../ForwardingHeaders/JavaScriptCore/JSValueRef.h | 2 +- .../ForwardingHeaders/JavaScriptCore/JavaScript.h | 2 +- .../JavaScriptCore/JavaScriptCore.h | 1 + .../JavaScriptCore/OpaqueJSString.h | 2 +- .../JavaScriptCore/WebKitAvailability.h | 2 +- Source/JavaScriptCore/GNUmakefile.am | 275 - Source/JavaScriptCore/GNUmakefile.list.am | 1282 -- Source/JavaScriptCore/KeywordLookupGenerator.py | 6 +- Source/JavaScriptCore/PlatformGTK.cmake | 46 + Source/JavaScriptCore/PlatformJSCOnly.cmake | 10 + Source/JavaScriptCore/Scripts/UpdateContents.py | 48 + Source/JavaScriptCore/Scripts/builtins/__init__.py | 3 + Source/JavaScriptCore/Scripts/builtins/builtins.py | 15 + .../builtins/builtins_generate_combined_header.py | 170 + .../builtins_generate_combined_implementation.py | 97 + .../builtins_generate_internals_wrapper_header.py | 114 + ...ns_generate_internals_wrapper_implementation.py | 156 + .../builtins/builtins_generate_separate_header.py | 197 + .../builtins_generate_separate_implementation.py | 109 + .../builtins/builtins_generate_wrapper_header.py | 119 + .../builtins_generate_wrapper_implementation.py | 61 + .../Scripts/builtins/builtins_generator.py | 181 + .../Scripts/builtins/builtins_model.py | 288 + .../Scripts/builtins/builtins_templates.py | 212 + Source/JavaScriptCore/Scripts/cssmin.py | 49 + .../Scripts/generate-combined-inspector-json.py | 69 + .../JavaScriptCore/Scripts/generate-js-builtins.py | 176 + .../inline-and-minify-stylesheets-and-scripts.py | 81 + Source/JavaScriptCore/Scripts/jsmin.py | 238 + Source/JavaScriptCore/Scripts/lazywriter.py | 58 + .../JavaScriptCore-Builtin.Promise-Combined.js | 54 + .../JavaScriptCore-Builtin.Promise-Separate.js | 54 + .../JavaScriptCore-Builtin.prototype-Combined.js | 78 + .../JavaScriptCore-Builtin.prototype-Separate.js | 78 + .../JavaScriptCore-BuiltinConstructor-Combined.js | 110 + .../JavaScriptCore-BuiltinConstructor-Separate.js | 110 + ...avaScriptCore-InternalClashingNames-Combined.js | 41 + ...bCore-AnotherGuardedInternalBuiltin-Separate.js | 34 + .../WebCore-ArbitraryConditionalGuard-Separate.js | 33 + .../WebCore-DuplicateFlagAnnotation-Separate.js | 27 + ...WebCore-DuplicateKeyValueAnnotation-Separate.js | 34 + .../builtins/WebCore-GuardedBuiltin-Separate.js | 33 + .../WebCore-GuardedInternalBuiltin-Separate.js | 34 + .../builtins/WebCore-UnguardedBuiltin-Separate.js | 31 + .../builtins/WebCore-xmlCasingTest-Separate.js | 65 + ...vaScriptCore-Builtin.Promise-Combined.js-result | 161 + ...vaScriptCore-Builtin.Promise-Separate.js-result | 160 + ...ScriptCore-Builtin.prototype-Combined.js-result | 185 + ...ScriptCore-Builtin.prototype-Separate.js-result | 184 + ...criptCore-BuiltinConstructor-Combined.js-result | 198 + ...criptCore-BuiltinConstructor-Separate.js-result | 197 + ...iptCore-InternalClashingNames-Combined.js-error | 1 + ...ptCore-InternalClashingNames-Combined.js-result | 148 + ...notherGuardedInternalBuiltin-Separate.js-result | 227 + ...re-ArbitraryConditionalGuard-Separate.js-result | 197 + ...bCore-DuplicateFlagAnnotation-Separate.js-error | 1 + ...e-DuplicateKeyValueAnnotation-Separate.js-error | 1 + .../WebCore-GuardedBuiltin-Separate.js-result | 197 + ...bCore-GuardedInternalBuiltin-Separate.js-result | 229 + .../WebCore-UnguardedBuiltin-Separate.js-result | 188 + .../WebCore-xmlCasingTest-Separate.js-result | 280 + .../builtins/expected/WebCoreJSBuiltins.h-result | 351 + Source/JavaScriptCore/Scripts/xxd.pl | 45 + Source/JavaScriptCore/assembler/ARM64Assembler.h | 950 +- Source/JavaScriptCore/assembler/ARMAssembler.cpp | 2 - Source/JavaScriptCore/assembler/ARMAssembler.h | 176 +- Source/JavaScriptCore/assembler/ARMv7Assembler.cpp | 36 - Source/JavaScriptCore/assembler/ARMv7Assembler.h | 647 +- Source/JavaScriptCore/assembler/AbortReason.h | 79 + .../assembler/AbstractMacroAssembler.h | 423 +- .../assembler/AllowMacroScratchRegisterUsage.h | 55 + Source/JavaScriptCore/assembler/AssemblerBuffer.h | 217 +- .../assembler/AssemblerBufferWithConstantPool.h | 5 +- Source/JavaScriptCore/assembler/AssemblerCommon.h | 290 + Source/JavaScriptCore/assembler/CPU.h | 94 + Source/JavaScriptCore/assembler/CodeLocation.h | 24 +- .../assembler/DisallowMacroScratchRegisterUsage.h | 55 + Source/JavaScriptCore/assembler/LinkBuffer.cpp | 220 +- Source/JavaScriptCore/assembler/LinkBuffer.h | 92 +- Source/JavaScriptCore/assembler/MIPSAssembler.h | 43 +- Source/JavaScriptCore/assembler/MacroAssembler.cpp | 124 + Source/JavaScriptCore/assembler/MacroAssembler.h | 355 +- .../JavaScriptCore/assembler/MacroAssemblerARM.cpp | 291 +- .../JavaScriptCore/assembler/MacroAssemblerARM.h | 294 +- .../assembler/MacroAssemblerARM64.cpp | 507 + .../JavaScriptCore/assembler/MacroAssemblerARM64.h | 1545 +- .../assembler/MacroAssemblerARMv7.cpp | 348 + .../JavaScriptCore/assembler/MacroAssemblerARMv7.h | 391 +- .../assembler/MacroAssemblerCodeRef.cpp | 86 + .../assembler/MacroAssemblerCodeRef.h | 107 +- .../assembler/MacroAssemblerHelpers.h | 131 + .../JavaScriptCore/assembler/MacroAssemblerMIPS.h | 390 +- .../assembler/MacroAssemblerPrinter.cpp | 216 + .../assembler/MacroAssemblerPrinter.h | 302 + .../JavaScriptCore/assembler/MacroAssemblerSH4.h | 2543 --- .../JavaScriptCore/assembler/MacroAssemblerX86.h | 135 +- .../assembler/MacroAssemblerX86Common.cpp | 557 +- .../assembler/MacroAssemblerX86Common.h | 1931 ++- .../assembler/MacroAssemblerX86_64.h | 1089 +- .../assembler/MaxFrameExtentForSlowPathCall.h | 79 + Source/JavaScriptCore/assembler/RepatchBuffer.h | 191 - Source/JavaScriptCore/assembler/SH4Assembler.h | 2225 --- Source/JavaScriptCore/assembler/X86Assembler.h | 2013 ++- Source/JavaScriptCore/b3/B3ArgumentRegValue.cpp | 49 + Source/JavaScriptCore/b3/B3ArgumentRegValue.h | 63 + Source/JavaScriptCore/b3/B3BasicBlock.cpp | 202 + Source/JavaScriptCore/b3/B3BasicBlock.h | 200 + Source/JavaScriptCore/b3/B3BasicBlockInlines.h | 98 + Source/JavaScriptCore/b3/B3BasicBlockUtils.h | 150 + Source/JavaScriptCore/b3/B3BlockInsertionSet.cpp | 134 + Source/JavaScriptCore/b3/B3BlockInsertionSet.h | 93 + Source/JavaScriptCore/b3/B3BlockWorklist.h | 57 + Source/JavaScriptCore/b3/B3BottomProvider.h | 57 + Source/JavaScriptCore/b3/B3BreakCriticalEdges.cpp | 67 + Source/JavaScriptCore/b3/B3BreakCriticalEdges.h | 38 + Source/JavaScriptCore/b3/B3CCallValue.cpp | 45 + Source/JavaScriptCore/b3/B3CCallValue.h | 68 + Source/JavaScriptCore/b3/B3CFG.h | 76 + Source/JavaScriptCore/b3/B3CaseCollection.cpp | 48 + Source/JavaScriptCore/b3/B3CaseCollection.h | 116 + Source/JavaScriptCore/b3/B3CaseCollectionInlines.h | 53 + Source/JavaScriptCore/b3/B3CheckSpecial.cpp | 248 + Source/JavaScriptCore/b3/B3CheckSpecial.h | 165 + Source/JavaScriptCore/b3/B3CheckValue.cpp | 69 + Source/JavaScriptCore/b3/B3CheckValue.h | 68 + Source/JavaScriptCore/b3/B3Common.cpp | 76 + Source/JavaScriptCore/b3/B3Common.h | 175 + Source/JavaScriptCore/b3/B3Commutativity.cpp | 52 + Source/JavaScriptCore/b3/B3Commutativity.h | 46 + Source/JavaScriptCore/b3/B3Compilation.cpp | 55 + Source/JavaScriptCore/b3/B3Compilation.h | 67 + Source/JavaScriptCore/b3/B3Compile.cpp | 57 + Source/JavaScriptCore/b3/B3Compile.h | 52 + Source/JavaScriptCore/b3/B3ComputeDivisionMagic.h | 139 + Source/JavaScriptCore/b3/B3Const32Value.cpp | 289 + Source/JavaScriptCore/b3/B3Const32Value.h | 97 + Source/JavaScriptCore/b3/B3Const64Value.cpp | 289 + Source/JavaScriptCore/b3/B3Const64Value.h | 97 + Source/JavaScriptCore/b3/B3ConstDoubleValue.cpp | 207 + Source/JavaScriptCore/b3/B3ConstDoubleValue.h | 86 + Source/JavaScriptCore/b3/B3ConstFloatValue.cpp | 188 + Source/JavaScriptCore/b3/B3ConstFloatValue.h | 84 + Source/JavaScriptCore/b3/B3ConstPtrValue.h | 69 + Source/JavaScriptCore/b3/B3ConstrainedValue.cpp | 43 + Source/JavaScriptCore/b3/B3ConstrainedValue.h | 68 + Source/JavaScriptCore/b3/B3DataSection.cpp | 52 + Source/JavaScriptCore/b3/B3DataSection.h | 51 + Source/JavaScriptCore/b3/B3Dominators.h | 50 + Source/JavaScriptCore/b3/B3DuplicateTails.cpp | 162 + Source/JavaScriptCore/b3/B3DuplicateTails.h | 42 + Source/JavaScriptCore/b3/B3Effects.cpp | 132 + Source/JavaScriptCore/b3/B3Effects.h | 121 + .../b3/B3EliminateCommonSubexpressions.cpp | 703 + .../b3/B3EliminateCommonSubexpressions.h | 40 + Source/JavaScriptCore/b3/B3FenceValue.cpp | 57 + Source/JavaScriptCore/b3/B3FenceValue.h | 89 + Source/JavaScriptCore/b3/B3FixSSA.cpp | 270 + Source/JavaScriptCore/b3/B3FixSSA.h | 48 + Source/JavaScriptCore/b3/B3FoldPathConstants.cpp | 275 + Source/JavaScriptCore/b3/B3FoldPathConstants.h | 40 + Source/JavaScriptCore/b3/B3FrequencyClass.cpp | 53 + Source/JavaScriptCore/b3/B3FrequencyClass.h | 62 + Source/JavaScriptCore/b3/B3FrequentedBlock.h | 40 + Source/JavaScriptCore/b3/B3Generate.cpp | 127 + Source/JavaScriptCore/b3/B3Generate.h | 55 + .../JavaScriptCore/b3/B3GenericFrequentedBlock.h | 85 + Source/JavaScriptCore/b3/B3HeapRange.cpp | 49 + Source/JavaScriptCore/b3/B3HeapRange.h | 110 + Source/JavaScriptCore/b3/B3InferSwitches.cpp | 337 + Source/JavaScriptCore/b3/B3InferSwitches.h | 40 + Source/JavaScriptCore/b3/B3InsertionSet.cpp | 71 + Source/JavaScriptCore/b3/B3InsertionSet.h | 86 + Source/JavaScriptCore/b3/B3InsertionSetInlines.h | 43 + Source/JavaScriptCore/b3/B3Kind.cpp | 51 + Source/JavaScriptCore/b3/B3Kind.h | 236 + .../JavaScriptCore/b3/B3LegalizeMemoryOffsets.cpp | 93 + Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.h | 40 + Source/JavaScriptCore/b3/B3LowerMacros.cpp | 500 + Source/JavaScriptCore/b3/B3LowerMacros.h | 41 + .../b3/B3LowerMacrosAfterOptimizations.cpp | 205 + .../b3/B3LowerMacrosAfterOptimizations.h | 40 + Source/JavaScriptCore/b3/B3LowerToAir.cpp | 2899 ++++ Source/JavaScriptCore/b3/B3LowerToAir.h | 41 + Source/JavaScriptCore/b3/B3MathExtras.cpp | 124 + Source/JavaScriptCore/b3/B3MathExtras.h | 44 + Source/JavaScriptCore/b3/B3MemoryValue.cpp | 74 + Source/JavaScriptCore/b3/B3MemoryValue.h | 154 + Source/JavaScriptCore/b3/B3MoveConstants.cpp | 363 + Source/JavaScriptCore/b3/B3MoveConstants.h | 40 + Source/JavaScriptCore/b3/B3OpaqueByproduct.h | 48 + Source/JavaScriptCore/b3/B3OpaqueByproducts.cpp | 60 + Source/JavaScriptCore/b3/B3OpaqueByproducts.h | 55 + Source/JavaScriptCore/b3/B3Opcode.cpp | 323 + Source/JavaScriptCore/b3/B3Opcode.h | 314 + Source/JavaScriptCore/b3/B3Origin.cpp | 40 + Source/JavaScriptCore/b3/B3Origin.h | 60 + Source/JavaScriptCore/b3/B3OriginDump.cpp | 46 + Source/JavaScriptCore/b3/B3OriginDump.h | 53 + Source/JavaScriptCore/b3/B3PCToOriginMap.h | 69 + Source/JavaScriptCore/b3/B3PatchpointSpecial.cpp | 175 + Source/JavaScriptCore/b3/B3PatchpointSpecial.h | 67 + Source/JavaScriptCore/b3/B3PatchpointValue.cpp | 61 + Source/JavaScriptCore/b3/B3PatchpointValue.h | 77 + Source/JavaScriptCore/b3/B3PhaseScope.cpp | 62 + Source/JavaScriptCore/b3/B3PhaseScope.h | 53 + Source/JavaScriptCore/b3/B3PhiChildren.cpp | 56 + Source/JavaScriptCore/b3/B3PhiChildren.h | 177 + Source/JavaScriptCore/b3/B3Procedure.cpp | 362 + Source/JavaScriptCore/b3/B3Procedure.h | 259 + Source/JavaScriptCore/b3/B3ProcedureInlines.h | 43 + Source/JavaScriptCore/b3/B3PureCSE.cpp | 95 + Source/JavaScriptCore/b3/B3PureCSE.h | 61 + Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.cpp | 496 + Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.h | 40 + Source/JavaScriptCore/b3/B3ReduceStrength.cpp | 2518 +++ Source/JavaScriptCore/b3/B3ReduceStrength.h | 46 + Source/JavaScriptCore/b3/B3SSACalculator.cpp | 150 + Source/JavaScriptCore/b3/B3SSACalculator.h | 167 + Source/JavaScriptCore/b3/B3SlotBaseValue.cpp | 51 + Source/JavaScriptCore/b3/B3SlotBaseValue.h | 63 + Source/JavaScriptCore/b3/B3SparseCollection.h | 142 + Source/JavaScriptCore/b3/B3StackSlot.cpp | 55 + Source/JavaScriptCore/b3/B3StackSlot.h | 105 + .../b3/B3StackmapGenerationParams.cpp | 99 + .../JavaScriptCore/b3/B3StackmapGenerationParams.h | 127 + Source/JavaScriptCore/b3/B3StackmapSpecial.cpp | 304 + Source/JavaScriptCore/b3/B3StackmapSpecial.h | 89 + Source/JavaScriptCore/b3/B3StackmapValue.cpp | 95 + Source/JavaScriptCore/b3/B3StackmapValue.h | 308 + Source/JavaScriptCore/b3/B3SuccessorCollection.h | 142 + Source/JavaScriptCore/b3/B3SwitchCase.cpp | 42 + Source/JavaScriptCore/b3/B3SwitchCase.h | 63 + Source/JavaScriptCore/b3/B3SwitchValue.cpp | 124 + Source/JavaScriptCore/b3/B3SwitchValue.h | 85 + Source/JavaScriptCore/b3/B3TimingScope.cpp | 55 + Source/JavaScriptCore/b3/B3TimingScope.h | 47 + Source/JavaScriptCore/b3/B3Type.cpp | 61 + Source/JavaScriptCore/b3/B3Type.h | 94 + Source/JavaScriptCore/b3/B3TypeMap.h | 108 + Source/JavaScriptCore/b3/B3UpsilonValue.cpp | 55 + Source/JavaScriptCore/b3/B3UpsilonValue.h | 72 + Source/JavaScriptCore/b3/B3UseCounts.cpp | 63 + Source/JavaScriptCore/b3/B3UseCounts.h | 56 + Source/JavaScriptCore/b3/B3Validate.cpp | 595 + Source/JavaScriptCore/b3/B3Validate.h | 38 + Source/JavaScriptCore/b3/B3Value.cpp | 870 + Source/JavaScriptCore/b3/B3Value.h | 515 + Source/JavaScriptCore/b3/B3ValueInlines.h | 247 + Source/JavaScriptCore/b3/B3ValueKey.cpp | 122 + Source/JavaScriptCore/b3/B3ValueKey.h | 199 + Source/JavaScriptCore/b3/B3ValueKeyInlines.h | 67 + Source/JavaScriptCore/b3/B3ValueRep.cpp | 202 + Source/JavaScriptCore/b3/B3ValueRep.h | 288 + Source/JavaScriptCore/b3/B3Variable.cpp | 56 + Source/JavaScriptCore/b3/B3Variable.h | 89 + Source/JavaScriptCore/b3/B3VariableValue.cpp | 66 + Source/JavaScriptCore/b3/B3VariableValue.h | 63 + Source/JavaScriptCore/b3/B3WasmAddressValue.cpp | 56 + Source/JavaScriptCore/b3/B3WasmAddressValue.h | 58 + .../JavaScriptCore/b3/B3WasmBoundsCheckValue.cpp | 56 + Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.h | 68 + Source/JavaScriptCore/b3/air/AirAllocateStack.cpp | 308 + Source/JavaScriptCore/b3/air/AirAllocateStack.h | 43 + Source/JavaScriptCore/b3/air/AirArg.cpp | 350 + Source/JavaScriptCore/b3/air/AirArg.h | 1383 ++ Source/JavaScriptCore/b3/air/AirArgInlines.h | 194 + Source/JavaScriptCore/b3/air/AirBasicBlock.cpp | 87 + Source/JavaScriptCore/b3/air/AirBasicBlock.h | 172 + Source/JavaScriptCore/b3/air/AirBlockWorklist.h | 52 + Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp | 167 + Source/JavaScriptCore/b3/air/AirCCallSpecial.h | 84 + .../b3/air/AirCCallingConvention.cpp | 127 + .../JavaScriptCore/b3/air/AirCCallingConvention.h | 51 + Source/JavaScriptCore/b3/air/AirCode.cpp | 229 + Source/JavaScriptCore/b3/air/AirCode.h | 321 + Source/JavaScriptCore/b3/air/AirCustom.cpp | 195 + Source/JavaScriptCore/b3/air/AirCustom.h | 328 + Source/JavaScriptCore/b3/air/AirDumpAsJS.cpp | 245 + Source/JavaScriptCore/b3/air/AirDumpAsJS.h | 43 + .../JavaScriptCore/b3/air/AirEliminateDeadCode.cpp | 153 + .../JavaScriptCore/b3/air/AirEliminateDeadCode.h | 43 + Source/JavaScriptCore/b3/air/AirEmitShuffle.cpp | 543 + Source/JavaScriptCore/b3/air/AirEmitShuffle.h | 116 + .../JavaScriptCore/b3/air/AirFixObviousSpills.cpp | 569 + Source/JavaScriptCore/b3/air/AirFixObviousSpills.h | 41 + .../b3/air/AirFixPartialRegisterStalls.cpp | 239 + .../b3/air/AirFixPartialRegisterStalls.h | 46 + Source/JavaScriptCore/b3/air/AirFrequentedBlock.h | 40 + Source/JavaScriptCore/b3/air/AirGenerate.cpp | 292 + Source/JavaScriptCore/b3/air/AirGenerate.h | 48 + Source/JavaScriptCore/b3/air/AirGenerated.cpp | 33 + .../JavaScriptCore/b3/air/AirGenerationContext.h | 59 + .../JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp | 78 + .../JavaScriptCore/b3/air/AirHandleCalleeSaves.h | 46 + Source/JavaScriptCore/b3/air/AirInsertionSet.cpp | 51 + Source/JavaScriptCore/b3/air/AirInsertionSet.h | 85 + Source/JavaScriptCore/b3/air/AirInst.cpp | 72 + Source/JavaScriptCore/b3/air/AirInst.h | 207 + Source/JavaScriptCore/b3/air/AirInstInlines.h | 282 + .../b3/air/AirIteratedRegisterCoalescing.cpp | 1656 ++ .../b3/air/AirIteratedRegisterCoalescing.h | 40 + Source/JavaScriptCore/b3/air/AirKind.cpp | 49 + Source/JavaScriptCore/b3/air/AirKind.h | 97 + Source/JavaScriptCore/b3/air/AirLiveness.h | 392 + .../b3/air/AirLogRegisterPressure.cpp | 103 + .../JavaScriptCore/b3/air/AirLogRegisterPressure.h | 39 + .../b3/air/AirLowerAfterRegAlloc.cpp | 250 + .../JavaScriptCore/b3/air/AirLowerAfterRegAlloc.h | 41 + .../JavaScriptCore/b3/air/AirLowerEntrySwitch.cpp | 114 + Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.h | 41 + Source/JavaScriptCore/b3/air/AirLowerMacros.cpp | 108 + Source/JavaScriptCore/b3/air/AirLowerMacros.h | 41 + Source/JavaScriptCore/b3/air/AirOpcode.opcodes | 943 ++ .../b3/air/AirOptimizeBlockOrder.cpp | 194 + .../JavaScriptCore/b3/air/AirOptimizeBlockOrder.h | 49 + .../JavaScriptCore/b3/air/AirPadInterference.cpp | 88 + Source/JavaScriptCore/b3/air/AirPadInterference.h | 48 + Source/JavaScriptCore/b3/air/AirPhaseScope.cpp | 60 + Source/JavaScriptCore/b3/air/AirPhaseScope.h | 53 + .../b3/air/AirReportUsedRegisters.cpp | 96 + .../JavaScriptCore/b3/air/AirReportUsedRegisters.h | 41 + Source/JavaScriptCore/b3/air/AirSimplifyCFG.cpp | 171 + Source/JavaScriptCore/b3/air/AirSimplifyCFG.h | 40 + Source/JavaScriptCore/b3/air/AirSpecial.cpp | 89 + Source/JavaScriptCore/b3/air/AirSpecial.h | 140 + .../JavaScriptCore/b3/air/AirSpillEverything.cpp | 190 + Source/JavaScriptCore/b3/air/AirSpillEverything.h | 49 + Source/JavaScriptCore/b3/air/AirStackSlot.cpp | 74 + Source/JavaScriptCore/b3/air/AirStackSlot.h | 133 + Source/JavaScriptCore/b3/air/AirStackSlotKind.cpp | 52 + Source/JavaScriptCore/b3/air/AirStackSlotKind.h | 63 + Source/JavaScriptCore/b3/air/AirTmp.cpp | 55 + Source/JavaScriptCore/b3/air/AirTmp.h | 298 + Source/JavaScriptCore/b3/air/AirTmpInlines.h | 97 + Source/JavaScriptCore/b3/air/AirTmpWidth.cpp | 183 + Source/JavaScriptCore/b3/air/AirTmpWidth.h | 114 + Source/JavaScriptCore/b3/air/AirUseCounts.h | 118 + Source/JavaScriptCore/b3/air/AirValidate.cpp | 159 + Source/JavaScriptCore/b3/air/AirValidate.h | 38 + Source/JavaScriptCore/b3/air/opcode_generator.rb | 1228 ++ Source/JavaScriptCore/b3/air/testair.cpp | 1964 +++ Source/JavaScriptCore/b3/testb3.cpp | 15923 ++++++++++++++++++ .../JavaScriptCore/bindings/ScriptFunctionCall.cpp | 49 +- .../JavaScriptCore/bindings/ScriptFunctionCall.h | 13 +- Source/JavaScriptCore/bindings/ScriptObject.cpp | 2 + Source/JavaScriptCore/bindings/ScriptObject.h | 13 +- Source/JavaScriptCore/bindings/ScriptValue.cpp | 143 +- Source/JavaScriptCore/bindings/ScriptValue.h | 16 +- Source/JavaScriptCore/builtins/ArrayConstructor.js | 114 + .../builtins/ArrayIteratorPrototype.js | 108 + Source/JavaScriptCore/builtins/ArrayPrototype.js | 782 + .../builtins/AsyncFunctionPrototype.js | 58 + .../builtins/BuiltinExecutableCreator.cpp | 38 + .../builtins/BuiltinExecutableCreator.h | 36 + .../JavaScriptCore/builtins/BuiltinExecutables.cpp | 127 + .../JavaScriptCore/builtins/BuiltinExecutables.h | 69 + Source/JavaScriptCore/builtins/BuiltinNames.h | 278 + Source/JavaScriptCore/builtins/BuiltinUtils.h | 51 + Source/JavaScriptCore/builtins/DatePrototype.js | 182 + .../JavaScriptCore/builtins/FunctionPrototype.js | 95 + .../JavaScriptCore/builtins/GeneratorPrototype.js | 86 + Source/JavaScriptCore/builtins/GlobalObject.js | 46 + Source/JavaScriptCore/builtins/GlobalOperations.js | 81 + .../builtins/InspectorInstrumentationObject.js | 40 + .../builtins/InternalPromiseConstructor.js | 86 + Source/JavaScriptCore/builtins/IteratorHelpers.js | 46 + .../JavaScriptCore/builtins/IteratorPrototype.js | 31 + Source/JavaScriptCore/builtins/MapPrototype.js | 46 + .../builtins/ModuleLoaderPrototype.js | 477 + .../JavaScriptCore/builtins/NumberConstructor.js | 45 + Source/JavaScriptCore/builtins/NumberPrototype.js | 45 + .../JavaScriptCore/builtins/ObjectConstructor.js | 93 + .../JavaScriptCore/builtins/PromiseConstructor.js | 131 + .../JavaScriptCore/builtins/PromiseOperations.js | 224 + Source/JavaScriptCore/builtins/PromisePrototype.js | 59 + Source/JavaScriptCore/builtins/ReflectObject.js | 61 + Source/JavaScriptCore/builtins/RegExpPrototype.js | 529 + Source/JavaScriptCore/builtins/SetPrototype.js | 46 + .../JavaScriptCore/builtins/StringConstructor.js | 60 + .../builtins/StringIteratorPrototype.js | 64 + Source/JavaScriptCore/builtins/StringPrototype.js | 300 + .../builtins/TypedArrayConstructor.js | 167 + .../JavaScriptCore/builtins/TypedArrayPrototype.js | 413 + Source/JavaScriptCore/bytecode/AccessCase.cpp | 1029 ++ Source/JavaScriptCore/bytecode/AccessCase.h | 233 + ...AdaptiveInferredPropertyValueWatchpointBase.cpp | 86 + .../AdaptiveInferredPropertyValueWatchpointBase.h | 72 + Source/JavaScriptCore/bytecode/ArithProfile.cpp | 143 + Source/JavaScriptCore/bytecode/ArithProfile.h | 241 + .../bytecode/ArrayAllocationProfile.cpp | 4 +- .../bytecode/ArrayAllocationProfile.h | 8 +- Source/JavaScriptCore/bytecode/ArrayProfile.cpp | 46 +- Source/JavaScriptCore/bytecode/ArrayProfile.h | 93 +- Source/JavaScriptCore/bytecode/ByValInfo.h | 90 +- .../JavaScriptCore/bytecode/BytecodeBasicBlock.cpp | 170 +- .../JavaScriptCore/bytecode/BytecodeBasicBlock.h | 69 +- .../JavaScriptCore/bytecode/BytecodeConventions.h | 10 +- .../bytecode/BytecodeGeneratorification.cpp | 268 + .../bytecode/BytecodeGeneratorification.h | 37 + Source/JavaScriptCore/bytecode/BytecodeGraph.h | 125 + .../bytecode/BytecodeIntrinsicRegistry.cpp | 93 + .../bytecode/BytecodeIntrinsicRegistry.h | 108 + Source/JavaScriptCore/bytecode/BytecodeKills.h | 177 + Source/JavaScriptCore/bytecode/BytecodeList.json | 200 + .../bytecode/BytecodeLivenessAnalysis.cpp | 350 +- .../bytecode/BytecodeLivenessAnalysis.h | 51 +- .../bytecode/BytecodeLivenessAnalysisInlines.h | 175 +- .../JavaScriptCore/bytecode/BytecodeRewriter.cpp | 116 + Source/JavaScriptCore/bytecode/BytecodeRewriter.h | 235 + Source/JavaScriptCore/bytecode/BytecodeUseDef.h | 334 +- Source/JavaScriptCore/bytecode/CallEdge.cpp | 37 + Source/JavaScriptCore/bytecode/CallEdge.h | 67 + Source/JavaScriptCore/bytecode/CallLinkInfo.cpp | 249 +- Source/JavaScriptCore/bytecode/CallLinkInfo.h | 353 +- Source/JavaScriptCore/bytecode/CallLinkStatus.cpp | 340 +- Source/JavaScriptCore/bytecode/CallLinkStatus.h | 124 +- Source/JavaScriptCore/bytecode/CallMode.cpp | 49 + Source/JavaScriptCore/bytecode/CallMode.h | 51 + .../bytecode/CallReturnOffsetToBytecodeOffset.h | 8 +- Source/JavaScriptCore/bytecode/CallVariant.cpp | 97 + Source/JavaScriptCore/bytecode/CallVariant.h | 214 + Source/JavaScriptCore/bytecode/CodeBlock.cpp | 3587 ++-- Source/JavaScriptCore/bytecode/CodeBlock.h | 974 +- Source/JavaScriptCore/bytecode/CodeBlockHash.h | 5 +- .../bytecode/CodeBlockJettisoningWatchpoint.cpp | 12 +- .../bytecode/CodeBlockJettisoningWatchpoint.h | 13 +- .../JavaScriptCore/bytecode/CodeBlockWithJITType.h | 6 +- Source/JavaScriptCore/bytecode/CodeOrigin.cpp | 133 +- Source/JavaScriptCore/bytecode/CodeOrigin.h | 102 +- Source/JavaScriptCore/bytecode/CodeType.cpp | 3 + Source/JavaScriptCore/bytecode/CodeType.h | 10 +- .../JavaScriptCore/bytecode/ComplexGetStatus.cpp | 78 + Source/JavaScriptCore/bytecode/ComplexGetStatus.h | 110 + Source/JavaScriptCore/bytecode/DFGExitProfile.cpp | 27 +- Source/JavaScriptCore/bytecode/DFGExitProfile.h | 65 +- .../bytecode/DOMJITAccessCasePatchpointParams.cpp | 125 + .../bytecode/DOMJITAccessCasePatchpointParams.h | 60 + Source/JavaScriptCore/bytecode/DataFormat.cpp | 39 + Source/JavaScriptCore/bytecode/DataFormat.h | 17 +- .../bytecode/DeferredCompilationCallback.cpp | 38 +- .../bytecode/DeferredCompilationCallback.h | 19 +- .../JavaScriptCore/bytecode/DeferredSourceDump.cpp | 66 + .../JavaScriptCore/bytecode/DeferredSourceDump.h | 49 + .../bytecode/DirectEvalCodeCache.cpp | 54 + .../JavaScriptCore/bytecode/DirectEvalCodeCache.h | 114 + Source/JavaScriptCore/bytecode/EvalCodeBlock.cpp | 45 + Source/JavaScriptCore/bytecode/EvalCodeBlock.h | 84 + Source/JavaScriptCore/bytecode/EvalCodeCache.h | 83 - Source/JavaScriptCore/bytecode/ExecutableInfo.h | 85 + .../JavaScriptCore/bytecode/ExecutionCounter.cpp | 55 +- Source/JavaScriptCore/bytecode/ExecutionCounter.h | 63 +- Source/JavaScriptCore/bytecode/ExitKind.cpp | 47 +- Source/JavaScriptCore/bytecode/ExitKind.h | 41 +- Source/JavaScriptCore/bytecode/ExitingJITType.cpp | 52 + Source/JavaScriptCore/bytecode/ExitingJITType.h | 58 + .../JavaScriptCore/bytecode/ExpressionRangeInfo.h | 12 +- .../JavaScriptCore/bytecode/FullBytecodeLiveness.h | 29 +- .../JavaScriptCore/bytecode/FunctionCodeBlock.cpp | 45 + Source/JavaScriptCore/bytecode/FunctionCodeBlock.h | 79 + Source/JavaScriptCore/bytecode/GetByIdStatus.cpp | 563 +- Source/JavaScriptCore/bytecode/GetByIdStatus.h | 114 +- Source/JavaScriptCore/bytecode/GetByIdVariant.cpp | 153 + Source/JavaScriptCore/bytecode/GetByIdVariant.h | 91 + .../bytecode/GetterSetterAccessCase.cpp | 238 + .../bytecode/GetterSetterAccessCase.h | 84 + Source/JavaScriptCore/bytecode/GlobalCodeBlock.h | 54 + Source/JavaScriptCore/bytecode/HandlerInfo.h | 91 +- Source/JavaScriptCore/bytecode/InlineAccess.cpp | 299 + Source/JavaScriptCore/bytecode/InlineAccess.h | 123 + Source/JavaScriptCore/bytecode/InlineCallFrame.cpp | 123 + Source/JavaScriptCore/bytecode/InlineCallFrame.h | 265 + .../JavaScriptCore/bytecode/InlineCallFrameSet.cpp | 3 + .../JavaScriptCore/bytecode/InlineCallFrameSet.h | 13 +- Source/JavaScriptCore/bytecode/Instruction.h | 53 +- .../bytecode/InternalFunctionAllocationProfile.h | 64 + .../bytecode/IntrinsicGetterAccessCase.cpp | 59 + .../bytecode/IntrinsicGetterAccessCase.h | 59 + Source/JavaScriptCore/bytecode/JumpTable.cpp | 2 +- Source/JavaScriptCore/bytecode/JumpTable.h | 13 +- Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h | 8 +- ...IntPrototypeLoadAdaptiveStructureWatchpoint.cpp | 65 + ...LLIntPrototypeLoadAdaptiveStructureWatchpoint.h | 48 + .../bytecode/LazyOperandValueProfile.cpp | 12 +- .../bytecode/LazyOperandValueProfile.h | 20 +- .../bytecode/MethodOfGettingAValueProfile.cpp | 27 +- .../bytecode/MethodOfGettingAValueProfile.h | 38 +- .../bytecode/ModuleNamespaceAccessCase.cpp | 84 + .../bytecode/ModuleNamespaceAccessCase.h | 64 + .../bytecode/ModuleProgramCodeBlock.cpp | 45 + .../bytecode/ModuleProgramCodeBlock.h | 79 + .../bytecode/ObjectAllocationProfile.h | 58 +- .../bytecode/ObjectPropertyCondition.cpp | 174 + .../bytecode/ObjectPropertyCondition.h | 269 + .../bytecode/ObjectPropertyConditionSet.cpp | 431 + .../bytecode/ObjectPropertyConditionSet.h | 181 + Source/JavaScriptCore/bytecode/Opcode.cpp | 15 +- Source/JavaScriptCore/bytecode/Opcode.h | 245 +- Source/JavaScriptCore/bytecode/Operands.h | 73 +- Source/JavaScriptCore/bytecode/OperandsInlines.h | 32 +- .../JavaScriptCore/bytecode/PolymorphicAccess.cpp | 677 + Source/JavaScriptCore/bytecode/PolymorphicAccess.h | 277 + .../bytecode/PolymorphicAccessStructureList.h | 139 - .../bytecode/PolymorphicPutByIdList.cpp | 148 - .../bytecode/PolymorphicPutByIdList.h | 195 - .../JavaScriptCore/bytecode/PreciseJumpTargets.cpp | 121 +- .../JavaScriptCore/bytecode/PreciseJumpTargets.h | 16 +- .../bytecode/PreciseJumpTargetsInlines.h | 82 + .../ProfiledCodeBlockJettisoningWatchpoint.cpp | 62 - .../ProfiledCodeBlockJettisoningWatchpoint.h | 65 - .../JavaScriptCore/bytecode/ProgramCodeBlock.cpp | 45 + Source/JavaScriptCore/bytecode/ProgramCodeBlock.h | 79 + .../JavaScriptCore/bytecode/PropertyCondition.cpp | 364 + Source/JavaScriptCore/bytecode/PropertyCondition.h | 334 + .../bytecode/ProxyableAccessCase.cpp | 66 + .../JavaScriptCore/bytecode/ProxyableAccessCase.h | 59 + Source/JavaScriptCore/bytecode/PutByIdFlags.cpp | 50 + Source/JavaScriptCore/bytecode/PutByIdFlags.h | 101 + Source/JavaScriptCore/bytecode/PutByIdStatus.cpp | 461 +- Source/JavaScriptCore/bytecode/PutByIdStatus.h | 100 +- Source/JavaScriptCore/bytecode/PutByIdVariant.cpp | 249 + Source/JavaScriptCore/bytecode/PutByIdVariant.h | 147 + Source/JavaScriptCore/bytecode/PutKind.h | 6 +- Source/JavaScriptCore/bytecode/ReduceWhitespace.h | 5 +- Source/JavaScriptCore/bytecode/SamplingTool.cpp | 478 - Source/JavaScriptCore/bytecode/SamplingTool.h | 347 - Source/JavaScriptCore/bytecode/SpecialPointer.cpp | 1 + Source/JavaScriptCore/bytecode/SpecialPointer.h | 11 +- Source/JavaScriptCore/bytecode/SpeculatedType.cpp | 313 +- Source/JavaScriptCore/bytecode/SpeculatedType.h | 256 +- Source/JavaScriptCore/bytecode/StructureSet.cpp | 48 + Source/JavaScriptCore/bytecode/StructureSet.h | 157 +- .../bytecode/StructureStubClearingWatchpoint.cpp | 48 +- .../bytecode/StructureStubClearingWatchpoint.h | 50 +- .../JavaScriptCore/bytecode/StructureStubInfo.cpp | 308 +- Source/JavaScriptCore/bytecode/StructureStubInfo.h | 378 +- Source/JavaScriptCore/bytecode/SuperSampler.cpp | 92 + Source/JavaScriptCore/bytecode/SuperSampler.h | 58 + Source/JavaScriptCore/bytecode/ToThisStatus.cpp | 72 + Source/JavaScriptCore/bytecode/ToThisStatus.h | 46 + .../JavaScriptCore/bytecode/TrackedReferences.cpp | 81 + Source/JavaScriptCore/bytecode/TrackedReferences.h | 52 + Source/JavaScriptCore/bytecode/TypeLocation.h | 60 + .../JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp | 368 +- Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h | 536 +- .../bytecode/UnlinkedEvalCodeBlock.cpp | 40 + .../bytecode/UnlinkedEvalCodeBlock.h | 71 + .../bytecode/UnlinkedFunctionCodeBlock.cpp | 40 + .../bytecode/UnlinkedFunctionCodeBlock.h | 61 + .../bytecode/UnlinkedFunctionExecutable.cpp | 234 + .../bytecode/UnlinkedFunctionExecutable.h | 199 + .../bytecode/UnlinkedGlobalCodeBlock.h | 43 + .../bytecode/UnlinkedInstructionStream.cpp | 84 +- .../bytecode/UnlinkedInstructionStream.h | 85 +- .../bytecode/UnlinkedModuleProgramCodeBlock.cpp | 48 + .../bytecode/UnlinkedModuleProgramCodeBlock.h | 95 + .../bytecode/UnlinkedProgramCodeBlock.cpp | 48 + .../bytecode/UnlinkedProgramCodeBlock.h | 72 + Source/JavaScriptCore/bytecode/ValueProfile.h | 17 +- Source/JavaScriptCore/bytecode/ValueRecovery.cpp | 28 +- Source/JavaScriptCore/bytecode/ValueRecovery.h | 206 +- .../bytecode/VariableWatchpointSet.h | 109 - .../bytecode/VariableWriteFireDetail.cpp | 44 + .../bytecode/VariableWriteFireDetail.h | 52 + Source/JavaScriptCore/bytecode/VirtualRegister.cpp | 65 + Source/JavaScriptCore/bytecode/VirtualRegister.h | 60 +- Source/JavaScriptCore/bytecode/Watchpoint.cpp | 77 +- Source/JavaScriptCore/bytecode/Watchpoint.h | 215 +- .../bytecompiler/BytecodeGenerator.cpp | 4635 ++++-- .../bytecompiler/BytecodeGenerator.h | 1017 +- Source/JavaScriptCore/bytecompiler/Label.h | 12 +- Source/JavaScriptCore/bytecompiler/LabelScope.h | 26 +- .../JavaScriptCore/bytecompiler/NodesCodegen.cpp | 3014 +++- Source/JavaScriptCore/bytecompiler/RegisterID.h | 8 +- .../bytecompiler/StaticPropertyAnalysis.h | 10 +- .../bytecompiler/StaticPropertyAnalyzer.h | 9 +- Source/JavaScriptCore/config.h | 20 +- Source/JavaScriptCore/create_hash_table | 127 +- Source/JavaScriptCore/create_regex_tables | 16 +- Source/JavaScriptCore/debugger/Breakpoint.h | 66 +- Source/JavaScriptCore/debugger/Debugger.cpp | 611 +- Source/JavaScriptCore/debugger/Debugger.h | 123 +- .../JavaScriptCore/debugger/DebuggerActivation.cpp | 98 - .../JavaScriptCore/debugger/DebuggerActivation.h | 71 - .../JavaScriptCore/debugger/DebuggerCallFrame.cpp | 253 +- Source/JavaScriptCore/debugger/DebuggerCallFrame.h | 46 +- .../JavaScriptCore/debugger/DebuggerEvalEnabler.h | 60 + .../JavaScriptCore/debugger/DebuggerLocation.cpp | 46 + Source/JavaScriptCore/debugger/DebuggerLocation.h | 53 + .../JavaScriptCore/debugger/DebuggerParseData.cpp | 185 + Source/JavaScriptCore/debugger/DebuggerParseData.h | 81 + .../JavaScriptCore/debugger/DebuggerPrimitives.h | 7 +- Source/JavaScriptCore/debugger/DebuggerScope.cpp | 253 + Source/JavaScriptCore/debugger/DebuggerScope.h | 118 + .../JavaScriptCore/debugger/ScriptProfilingScope.h | 90 + Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp | 10 +- Source/JavaScriptCore/dfg/DFGAbstractHeap.h | 128 +- Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h | 92 +- .../dfg/DFGAbstractInterpreterInlines.h | 2659 ++- Source/JavaScriptCore/dfg/DFGAbstractValue.cpp | 422 +- Source/JavaScriptCore/dfg/DFGAbstractValue.h | 344 +- .../DFGAdaptiveInferredPropertyValueWatchpoint.cpp | 60 + .../DFGAdaptiveInferredPropertyValueWatchpoint.h | 47 + .../dfg/DFGAdaptiveStructureWatchpoint.cpp | 77 + .../dfg/DFGAdaptiveStructureWatchpoint.h | 53 + Source/JavaScriptCore/dfg/DFGAdjacencyList.h | 65 +- Source/JavaScriptCore/dfg/DFGAllocator.h | 32 +- Source/JavaScriptCore/dfg/DFGAnalysis.h | 75 - Source/JavaScriptCore/dfg/DFGArgumentPosition.h | 20 +- .../dfg/DFGArgumentsEliminationPhase.cpp | 1087 ++ .../dfg/DFGArgumentsEliminationPhase.h | 41 + .../dfg/DFGArgumentsSimplificationPhase.cpp | 798 - .../dfg/DFGArgumentsSimplificationPhase.h | 49 - .../JavaScriptCore/dfg/DFGArgumentsUtilities.cpp | 118 + Source/JavaScriptCore/dfg/DFGArgumentsUtilities.h | 43 + Source/JavaScriptCore/dfg/DFGArithMode.cpp | 17 + Source/JavaScriptCore/dfg/DFGArithMode.h | 50 +- Source/JavaScriptCore/dfg/DFGArrayMode.cpp | 297 +- Source/JavaScriptCore/dfg/DFGArrayMode.h | 91 +- .../dfg/DFGArrayifySlowPathGenerator.h | 36 +- .../JavaScriptCore/dfg/DFGAtTailAbstractState.cpp | 27 +- Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h | 29 +- Source/JavaScriptCore/dfg/DFGAvailability.cpp | 1 + Source/JavaScriptCore/dfg/DFGAvailability.h | 33 +- Source/JavaScriptCore/dfg/DFGAvailabilityMap.cpp | 109 + Source/JavaScriptCore/dfg/DFGAvailabilityMap.h | 87 + Source/JavaScriptCore/dfg/DFGBackwardsCFG.h | 47 + Source/JavaScriptCore/dfg/DFGBackwardsDominators.h | 49 + .../dfg/DFGBackwardsPropagationPhase.cpp | 118 +- .../dfg/DFGBackwardsPropagationPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGBasicBlock.cpp | 36 +- Source/JavaScriptCore/dfg/DFGBasicBlock.h | 183 +- Source/JavaScriptCore/dfg/DFGBasicBlockInlines.h | 50 +- Source/JavaScriptCore/dfg/DFGBinarySwitch.cpp | 196 - Source/JavaScriptCore/dfg/DFGBinarySwitch.h | 142 - Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp | 29 +- Source/JavaScriptCore/dfg/DFGBlockInsertionSet.h | 22 +- Source/JavaScriptCore/dfg/DFGBlockMap.h | 107 + Source/JavaScriptCore/dfg/DFGBlockMapInlines.h | 43 + Source/JavaScriptCore/dfg/DFGBlockSet.cpp | 43 + Source/JavaScriptCore/dfg/DFGBlockSet.h | 148 + Source/JavaScriptCore/dfg/DFGBlockSetInlines.h | 42 + Source/JavaScriptCore/dfg/DFGBlockWorklist.h | 57 + Source/JavaScriptCore/dfg/DFGBranchDirection.h | 23 +- Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp | 5492 +++++-- Source/JavaScriptCore/dfg/DFGByteCodeParser.h | 15 +- Source/JavaScriptCore/dfg/DFGCFAPhase.cpp | 88 +- Source/JavaScriptCore/dfg/DFGCFAPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGCFG.h | 76 + .../dfg/DFGCFGSimplificationPhase.cpp | 127 +- .../JavaScriptCore/dfg/DFGCFGSimplificationPhase.h | 8 +- .../JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp | 164 +- Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGCSEPhase.cpp | 2032 +-- Source/JavaScriptCore/dfg/DFGCSEPhase.h | 32 +- .../dfg/DFGCallArrayAllocatorSlowPathGenerator.h | 96 +- ...DFGCallCreateDirectArgumentsSlowPathGenerator.h | 80 + Source/JavaScriptCore/dfg/DFGCapabilities.cpp | 167 +- Source/JavaScriptCore/dfg/DFGCapabilities.h | 88 +- Source/JavaScriptCore/dfg/DFGCleanUpPhase.cpp | 91 + Source/JavaScriptCore/dfg/DFGCleanUpPhase.h | 40 + Source/JavaScriptCore/dfg/DFGClobberSet.cpp | 46 +- Source/JavaScriptCore/dfg/DFGClobberSet.h | 18 +- Source/JavaScriptCore/dfg/DFGClobberize.cpp | 39 +- Source/JavaScriptCore/dfg/DFGClobberize.h | 1342 +- Source/JavaScriptCore/dfg/DFGClobbersExitState.cpp | 108 + Source/JavaScriptCore/dfg/DFGClobbersExitState.h | 65 + Source/JavaScriptCore/dfg/DFGCombinedLiveness.cpp | 83 + Source/JavaScriptCore/dfg/DFGCombinedLiveness.h | 54 + Source/JavaScriptCore/dfg/DFGCommon.cpp | 58 +- Source/JavaScriptCore/dfg/DFGCommon.h | 173 +- Source/JavaScriptCore/dfg/DFGCommonData.cpp | 60 +- Source/JavaScriptCore/dfg/DFGCommonData.h | 42 +- Source/JavaScriptCore/dfg/DFGCompilationKey.cpp | 8 +- Source/JavaScriptCore/dfg/DFGCompilationKey.h | 9 +- Source/JavaScriptCore/dfg/DFGCompilationMode.cpp | 2 + Source/JavaScriptCore/dfg/DFGCompilationMode.h | 17 +- .../JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp | 896 +- .../JavaScriptCore/dfg/DFGConstantFoldingPhase.h | 8 +- .../dfg/DFGConstantHoistingPhase.cpp | 149 + .../JavaScriptCore/dfg/DFGConstantHoistingPhase.h | 40 + .../dfg/DFGControlEquivalenceAnalysis.h | 84 + .../dfg/DFGCriticalEdgeBreakingPhase.cpp | 8 +- .../dfg/DFGCriticalEdgeBreakingPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGDCEPhase.cpp | 220 +- Source/JavaScriptCore/dfg/DFGDCEPhase.h | 8 +- .../dfg/DFGDOMJITPatchpointParams.cpp | 53 + .../JavaScriptCore/dfg/DFGDOMJITPatchpointParams.h | 54 + .../JavaScriptCore/dfg/DFGDesiredIdentifiers.cpp | 38 +- Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.h | 21 +- Source/JavaScriptCore/dfg/DFGDesiredInferredType.h | 128 + .../dfg/DFGDesiredStructureChains.cpp | 48 - .../JavaScriptCore/dfg/DFGDesiredStructureChains.h | 58 - .../JavaScriptCore/dfg/DFGDesiredTransitions.cpp | 25 +- Source/JavaScriptCore/dfg/DFGDesiredTransitions.h | 17 +- .../JavaScriptCore/dfg/DFGDesiredWatchpoints.cpp | 76 +- Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.h | 233 +- .../dfg/DFGDesiredWeakReferences.cpp | 48 +- .../JavaScriptCore/dfg/DFGDesiredWeakReferences.h | 19 +- .../JavaScriptCore/dfg/DFGDesiredWriteBarriers.cpp | 87 - .../JavaScriptCore/dfg/DFGDesiredWriteBarriers.h | 102 - Source/JavaScriptCore/dfg/DFGDisassembler.cpp | 17 +- Source/JavaScriptCore/dfg/DFGDisassembler.h | 37 +- Source/JavaScriptCore/dfg/DFGDoesGC.cpp | 331 + Source/JavaScriptCore/dfg/DFGDoesGC.h | 39 + Source/JavaScriptCore/dfg/DFGDominators.cpp | 134 - Source/JavaScriptCore/dfg/DFGDominators.h | 47 +- Source/JavaScriptCore/dfg/DFGDoubleFormatState.h | 6 +- Source/JavaScriptCore/dfg/DFGDriver.cpp | 87 +- Source/JavaScriptCore/dfg/DFGDriver.h | 17 +- Source/JavaScriptCore/dfg/DFGEdge.cpp | 13 +- Source/JavaScriptCore/dfg/DFGEdge.h | 66 +- Source/JavaScriptCore/dfg/DFGEdgeDominates.h | 13 +- Source/JavaScriptCore/dfg/DFGEdgeUsesStructure.h | 8 +- Source/JavaScriptCore/dfg/DFGEpoch.cpp | 43 + Source/JavaScriptCore/dfg/DFGEpoch.h | 120 + Source/JavaScriptCore/dfg/DFGFailedFinalizer.cpp | 7 + Source/JavaScriptCore/dfg/DFGFailedFinalizer.h | 13 +- Source/JavaScriptCore/dfg/DFGFiltrationResult.h | 22 +- Source/JavaScriptCore/dfg/DFGFinalizer.cpp | 1 + Source/JavaScriptCore/dfg/DFGFinalizer.h | 9 +- Source/JavaScriptCore/dfg/DFGFixupPhase.cpp | 2689 ++- Source/JavaScriptCore/dfg/DFGFixupPhase.h | 7 +- Source/JavaScriptCore/dfg/DFGFlowIndexing.cpp | 73 + Source/JavaScriptCore/dfg/DFGFlowIndexing.h | 112 + Source/JavaScriptCore/dfg/DFGFlowMap.h | 139 + Source/JavaScriptCore/dfg/DFGFlushFormat.cpp | 7 +- Source/JavaScriptCore/dfg/DFGFlushFormat.h | 64 +- .../dfg/DFGFlushLivenessAnalysisPhase.cpp | 208 - .../dfg/DFGFlushLivenessAnalysisPhase.h | 48 - Source/JavaScriptCore/dfg/DFGFlushedAt.cpp | 6 +- Source/JavaScriptCore/dfg/DFGFlushedAt.h | 10 +- Source/JavaScriptCore/dfg/DFGForAllKills.h | 170 + Source/JavaScriptCore/dfg/DFGFrozenValue.cpp | 61 + Source/JavaScriptCore/dfg/DFGFrozenValue.h | 130 + Source/JavaScriptCore/dfg/DFGGenerationInfo.h | 46 +- Source/JavaScriptCore/dfg/DFGGraph.cpp | 1395 +- Source/JavaScriptCore/dfg/DFGGraph.h | 1090 +- Source/JavaScriptCore/dfg/DFGGraphSafepoint.cpp | 48 + Source/JavaScriptCore/dfg/DFGGraphSafepoint.h | 47 + Source/JavaScriptCore/dfg/DFGHeapLocation.cpp | 166 + Source/JavaScriptCore/dfg/DFGHeapLocation.h | 159 + .../JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp | 320 +- .../JavaScriptCore/dfg/DFGInPlaceAbstractState.h | 68 +- Source/JavaScriptCore/dfg/DFGInferredTypeCheck.cpp | 102 + Source/JavaScriptCore/dfg/DFGInferredTypeCheck.h | 41 + Source/JavaScriptCore/dfg/DFGInlineCacheWrapper.h | 7 +- .../dfg/DFGInlineCacheWrapperInlines.h | 6 +- Source/JavaScriptCore/dfg/DFGInsertionSet.cpp | 56 + Source/JavaScriptCore/dfg/DFGInsertionSet.h | 96 +- .../dfg/DFGIntegerCheckCombiningPhase.cpp | 414 + .../dfg/DFGIntegerCheckCombiningPhase.h | 40 + .../dfg/DFGIntegerRangeOptimizationPhase.cpp | 1846 +++ .../dfg/DFGIntegerRangeOptimizationPhase.h | 43 + .../dfg/DFGInvalidationPointInjectionPhase.cpp | 26 +- .../dfg/DFGInvalidationPointInjectionPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGJITCode.cpp | 86 +- Source/JavaScriptCore/dfg/DFGJITCode.h | 67 +- Source/JavaScriptCore/dfg/DFGJITCompiler.cpp | 431 +- Source/JavaScriptCore/dfg/DFGJITCompiler.h | 207 +- Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp | 40 +- Source/JavaScriptCore/dfg/DFGJITFinalizer.h | 19 +- Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp | 3 +- Source/JavaScriptCore/dfg/DFGJumpReplacement.h | 6 +- Source/JavaScriptCore/dfg/DFGLICMPhase.cpp | 166 +- Source/JavaScriptCore/dfg/DFGLICMPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp | 149 +- Source/JavaScriptCore/dfg/DFGLazyJSValue.h | 78 +- Source/JavaScriptCore/dfg/DFGLazyNode.cpp | 47 + Source/JavaScriptCore/dfg/DFGLazyNode.h | 185 + .../dfg/DFGLiveCatchVariablePreservationPhase.cpp | 173 + .../dfg/DFGLiveCatchVariablePreservationPhase.h | 49 + .../dfg/DFGLivenessAnalysisPhase.cpp | 197 +- .../JavaScriptCore/dfg/DFGLivenessAnalysisPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGLongLivedState.cpp | 2 + Source/JavaScriptCore/dfg/DFGLongLivedState.h | 8 +- .../dfg/DFGLoopPreHeaderCreationPhase.cpp | 102 +- .../dfg/DFGLoopPreHeaderCreationPhase.h | 8 +- .../dfg/DFGMaximalFlushInsertionPhase.cpp | 157 + .../dfg/DFGMaximalFlushInsertionPhase.h | 54 + Source/JavaScriptCore/dfg/DFGMayExit.cpp | 206 + Source/JavaScriptCore/dfg/DFGMayExit.h | 92 + Source/JavaScriptCore/dfg/DFGMergeMode.h | 47 - Source/JavaScriptCore/dfg/DFGMinifiedGraph.cpp | 53 + Source/JavaScriptCore/dfg/DFGMinifiedGraph.h | 24 +- Source/JavaScriptCore/dfg/DFGMinifiedID.h | 17 +- Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp | 13 +- Source/JavaScriptCore/dfg/DFGMinifiedNode.h | 43 +- .../JavaScriptCore/dfg/DFGMovHintRemovalPhase.cpp | 145 + Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.h | 41 + .../JavaScriptCore/dfg/DFGMultiGetByOffsetData.cpp | 99 + .../JavaScriptCore/dfg/DFGMultiGetByOffsetData.h | 154 + Source/JavaScriptCore/dfg/DFGNaturalLoops.cpp | 22 +- Source/JavaScriptCore/dfg/DFGNaturalLoops.h | 34 +- Source/JavaScriptCore/dfg/DFGNode.cpp | 217 +- Source/JavaScriptCore/dfg/DFGNode.h | 1942 ++- .../dfg/DFGNodeAbstractValuePair.cpp | 41 + .../JavaScriptCore/dfg/DFGNodeAbstractValuePair.h | 53 + Source/JavaScriptCore/dfg/DFGNodeAllocator.h | 8 +- Source/JavaScriptCore/dfg/DFGNodeFlags.cpp | 47 +- Source/JavaScriptCore/dfg/DFGNodeFlags.h | 127 +- .../JavaScriptCore/dfg/DFGNodeFlowProjection.cpp | 49 + Source/JavaScriptCore/dfg/DFGNodeFlowProjection.h | 155 + Source/JavaScriptCore/dfg/DFGNodeOrigin.cpp | 41 + Source/JavaScriptCore/dfg/DFGNodeOrigin.h | 136 + Source/JavaScriptCore/dfg/DFGNodeType.h | 367 +- Source/JavaScriptCore/dfg/DFGNullAbstractState.h | 62 + .../dfg/DFGOSRAvailabilityAnalysisPhase.cpp | 250 +- .../dfg/DFGOSRAvailabilityAnalysisPhase.h | 31 +- Source/JavaScriptCore/dfg/DFGOSREntry.cpp | 181 +- Source/JavaScriptCore/dfg/DFGOSREntry.h | 16 +- .../dfg/DFGOSREntrypointCreationPhase.cpp | 65 +- .../dfg/DFGOSREntrypointCreationPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGOSRExit.cpp | 13 +- Source/JavaScriptCore/dfg/DFGOSRExit.h | 51 +- Source/JavaScriptCore/dfg/DFGOSRExitBase.cpp | 19 +- Source/JavaScriptCore/dfg/DFGOSRExitBase.h | 43 +- .../JavaScriptCore/dfg/DFGOSRExitCompilationInfo.h | 8 +- Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp | 118 +- Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h | 30 +- .../JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp | 287 +- Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp | 260 +- .../dfg/DFGOSRExitCompilerCommon.cpp | 276 +- .../JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h | 100 +- Source/JavaScriptCore/dfg/DFGOSRExitFuzz.cpp | 50 + Source/JavaScriptCore/dfg/DFGOSRExitFuzz.h | 48 + .../dfg/DFGOSRExitJumpPlaceholder.cpp | 1 + .../JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.h | 8 +- .../JavaScriptCore/dfg/DFGOSRExitPreparation.cpp | 20 +- Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h | 7 +- .../dfg/DFGObjectAllocationSinkingPhase.cpp | 2286 +++ .../dfg/DFGObjectAllocationSinkingPhase.h | 43 + .../dfg/DFGObjectMaterializationData.cpp | 42 + .../dfg/DFGObjectMaterializationData.h | 47 + Source/JavaScriptCore/dfg/DFGOpInfo.h | 63 + Source/JavaScriptCore/dfg/DFGOperations.cpp | 2180 ++- Source/JavaScriptCore/dfg/DFGOperations.h | 179 +- .../dfg/DFGPhantomInsertionPhase.cpp | 207 + .../JavaScriptCore/dfg/DFGPhantomInsertionPhase.h | 40 + Source/JavaScriptCore/dfg/DFGPhase.cpp | 17 +- Source/JavaScriptCore/dfg/DFGPhase.h | 27 +- Source/JavaScriptCore/dfg/DFGPhiChildren.cpp | 64 + Source/JavaScriptCore/dfg/DFGPhiChildren.h | 89 + Source/JavaScriptCore/dfg/DFGPlan.cpp | 619 +- Source/JavaScriptCore/dfg/DFGPlan.h | 76 +- Source/JavaScriptCore/dfg/DFGPlanInlines.h | 53 + Source/JavaScriptCore/dfg/DFGPrePostNumbering.cpp | 88 + Source/JavaScriptCore/dfg/DFGPrePostNumbering.h | 105 + .../JavaScriptCore/dfg/DFGPreciseLocalClobberize.h | 267 + .../dfg/DFGPredictionInjectionPhase.cpp | 7 +- .../dfg/DFGPredictionInjectionPhase.h | 7 +- .../dfg/DFGPredictionPropagationPhase.cpp | 1335 +- .../dfg/DFGPredictionPropagationPhase.h | 9 +- .../JavaScriptCore/dfg/DFGPromotedHeapLocation.cpp | 133 + .../JavaScriptCore/dfg/DFGPromotedHeapLocation.h | 234 + Source/JavaScriptCore/dfg/DFGPropertyTypeKey.h | 125 + Source/JavaScriptCore/dfg/DFGPureValue.cpp | 52 + Source/JavaScriptCore/dfg/DFGPureValue.h | 141 + .../JavaScriptCore/dfg/DFGPutStackSinkingPhase.cpp | 590 + .../JavaScriptCore/dfg/DFGPutStackSinkingPhase.h | 42 + Source/JavaScriptCore/dfg/DFGRegisterBank.h | 4 +- Source/JavaScriptCore/dfg/DFGRegisteredStructure.h | 78 + .../dfg/DFGRegisteredStructureSet.cpp | 98 + .../JavaScriptCore/dfg/DFGRegisteredStructureSet.h | 84 + .../dfg/DFGResurrectionForValidationPhase.cpp | 79 - .../dfg/DFGResurrectionForValidationPhase.h | 52 - Source/JavaScriptCore/dfg/DFGSSACalculator.cpp | 150 + Source/JavaScriptCore/dfg/DFGSSACalculator.h | 259 + .../JavaScriptCore/dfg/DFGSSAConversionPhase.cpp | 612 +- Source/JavaScriptCore/dfg/DFGSSAConversionPhase.h | 23 +- Source/JavaScriptCore/dfg/DFGSSALoweringPhase.cpp | 36 +- Source/JavaScriptCore/dfg/DFGSSALoweringPhase.h | 6 +- Source/JavaScriptCore/dfg/DFGSafeToExecute.h | 320 +- Source/JavaScriptCore/dfg/DFGSafepoint.cpp | 129 + Source/JavaScriptCore/dfg/DFGSafepoint.h | 86 + .../dfg/DFGSaneStringGetByValSlowPathGenerator.h | 11 +- Source/JavaScriptCore/dfg/DFGScannable.h | 46 + Source/JavaScriptCore/dfg/DFGScoreBoard.h | 21 +- .../JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h | 8 +- Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h | 335 +- Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp | 7529 +++++++-- Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h | 2086 ++- .../JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp | 4173 +++-- Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp | 4798 +++--- Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp | 167 +- Source/JavaScriptCore/dfg/DFGStackLayoutPhase.h | 8 +- .../dfg/DFGStaticExecutionCountEstimationPhase.cpp | 108 + .../dfg/DFGStaticExecutionCountEstimationPhase.h | 45 + .../dfg/DFGStoreBarrierClusteringPhase.cpp | 173 + .../dfg/DFGStoreBarrierClusteringPhase.h | 91 + .../dfg/DFGStoreBarrierElisionPhase.cpp | 155 - .../dfg/DFGStoreBarrierElisionPhase.h | 37 - .../dfg/DFGStoreBarrierInsertionPhase.cpp | 512 + .../dfg/DFGStoreBarrierInsertionPhase.h | 46 + .../dfg/DFGStrengthReductionPhase.cpp | 810 +- .../JavaScriptCore/dfg/DFGStrengthReductionPhase.h | 6 +- .../dfg/DFGStructureAbstractValue.cpp | 388 + .../JavaScriptCore/dfg/DFGStructureAbstractValue.h | 400 +- .../JavaScriptCore/dfg/DFGStructureClobberState.h | 70 + Source/JavaScriptCore/dfg/DFGThreadData.cpp | 48 + Source/JavaScriptCore/dfg/DFGThreadData.h | 56 + Source/JavaScriptCore/dfg/DFGThunks.cpp | 55 +- Source/JavaScriptCore/dfg/DFGThunks.h | 10 +- .../dfg/DFGTierUpCheckInjectionPhase.cpp | 137 +- .../dfg/DFGTierUpCheckInjectionPhase.h | 8 +- .../dfg/DFGToFTLDeferredCompilationCallback.cpp | 35 +- .../dfg/DFGToFTLDeferredCompilationCallback.h | 21 +- ...ToFTLForOSREntryDeferredCompilationCallback.cpp | 55 +- ...FGToFTLForOSREntryDeferredCompilationCallback.h | 21 +- Source/JavaScriptCore/dfg/DFGTransition.cpp | 48 + Source/JavaScriptCore/dfg/DFGTransition.h | 61 + .../dfg/DFGTypeCheckHoistingPhase.cpp | 107 +- .../JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGUnificationPhase.cpp | 14 +- Source/JavaScriptCore/dfg/DFGUnificationPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGUseKind.cpp | 112 +- Source/JavaScriptCore/dfg/DFGUseKind.h | 167 +- Source/JavaScriptCore/dfg/DFGValidate.cpp | 459 +- Source/JavaScriptCore/dfg/DFGValidate.h | 10 +- .../JavaScriptCore/dfg/DFGValueRecoveryOverride.h | 57 - Source/JavaScriptCore/dfg/DFGValueSource.cpp | 24 +- Source/JavaScriptCore/dfg/DFGValueSource.h | 25 +- Source/JavaScriptCore/dfg/DFGValueStrength.cpp | 51 + Source/JavaScriptCore/dfg/DFGValueStrength.h | 66 + .../dfg/DFGVarargsForwardingPhase.cpp | 334 + .../JavaScriptCore/dfg/DFGVarargsForwardingPhase.h | 41 + .../JavaScriptCore/dfg/DFGVariableAccessData.cpp | 222 + Source/JavaScriptCore/dfg/DFGVariableAccessData.h | 211 +- .../dfg/DFGVariableAccessDataDump.cpp | 7 +- .../JavaScriptCore/dfg/DFGVariableAccessDataDump.h | 10 +- Source/JavaScriptCore/dfg/DFGVariableEvent.cpp | 10 +- Source/JavaScriptCore/dfg/DFGVariableEvent.h | 29 +- .../JavaScriptCore/dfg/DFGVariableEventStream.cpp | 49 +- Source/JavaScriptCore/dfg/DFGVariableEventStream.h | 13 +- Source/JavaScriptCore/dfg/DFGVariadicFunction.h | 57 - .../dfg/DFGVirtualRegisterAllocationPhase.cpp | 13 +- .../dfg/DFGVirtualRegisterAllocationPhase.h | 8 +- .../dfg/DFGWatchpointCollectionPhase.cpp | 120 +- .../dfg/DFGWatchpointCollectionPhase.h | 8 +- Source/JavaScriptCore/dfg/DFGWorklist.cpp | 495 +- Source/JavaScriptCore/dfg/DFGWorklist.h | 99 +- Source/JavaScriptCore/dfg/DFGWorklistInlines.h | 66 + .../disassembler/ARM64/A64DOpcode.cpp | 330 +- .../JavaScriptCore/disassembler/ARM64/A64DOpcode.h | 94 +- .../disassembler/ARM64Disassembler.cpp | 4 +- .../disassembler/ARMLLVMDisassembler.cpp | 76 + .../disassembler/ARMv7/ARMv7DOpcode.cpp | 1733 ++ .../disassembler/ARMv7/ARMv7DOpcode.h | 1234 ++ .../disassembler/ARMv7Disassembler.cpp | 55 + .../JavaScriptCore/disassembler/Disassembler.cpp | 117 +- Source/JavaScriptCore/disassembler/Disassembler.h | 26 +- .../disassembler/LLVMDisassembler.cpp | 131 - .../JavaScriptCore/disassembler/LLVMDisassembler.h | 46 - .../disassembler/UDis86Disassembler.cpp | 63 + .../disassembler/UDis86Disassembler.h | 42 + .../disassembler/X86Disassembler.cpp | 40 +- .../disassembler/udis86/differences.txt | 9 + .../JavaScriptCore/disassembler/udis86/optable.xml | 10099 ++++++++++++ .../JavaScriptCore/disassembler/udis86/ud_itab.py | 379 + .../disassembler/udis86/ud_opcode.py | 622 + Source/JavaScriptCore/disassembler/udis86/udis86.c | 462 + Source/JavaScriptCore/disassembler/udis86/udis86.h | 33 + .../disassembler/udis86/udis86_decode.c | 1273 ++ .../disassembler/udis86/udis86_decode.h | 197 + .../disassembler/udis86/udis86_extern.h | 113 + .../disassembler/udis86/udis86_itab_holder.c | 33 + .../disassembler/udis86/udis86_syn-att.c | 235 + .../disassembler/udis86/udis86_syn-intel.c | 231 + .../disassembler/udis86/udis86_syn.c | 219 + .../disassembler/udis86/udis86_syn.h | 53 + .../disassembler/udis86/udis86_types.h | 260 + .../disassembler/udis86/udis86_udint.h | 98 + .../JavaScriptCore/domjit/DOMJITAbstractHeap.cpp | 84 + Source/JavaScriptCore/domjit/DOMJITAbstractHeap.h | 70 + .../domjit/DOMJITCallDOMGetterPatchpoint.h | 57 + Source/JavaScriptCore/domjit/DOMJITEffect.h | 89 + Source/JavaScriptCore/domjit/DOMJITGetterSetter.h | 67 + Source/JavaScriptCore/domjit/DOMJITHeapRange.cpp | 36 + Source/JavaScriptCore/domjit/DOMJITHeapRange.h | 130 + Source/JavaScriptCore/domjit/DOMJITPatchpoint.h | 74 + .../JavaScriptCore/domjit/DOMJITPatchpointParams.h | 75 + Source/JavaScriptCore/domjit/DOMJITReg.h | 93 + Source/JavaScriptCore/domjit/DOMJITSignature.h | 74 + Source/JavaScriptCore/domjit/DOMJITSlowPathCalls.h | 35 + Source/JavaScriptCore/domjit/DOMJITValue.h | 71 + Source/JavaScriptCore/dynbench.cpp | 242 + Source/JavaScriptCore/features.json | 404 + Source/JavaScriptCore/ftl/FTLAbbreviatedTypes.h | 32 +- Source/JavaScriptCore/ftl/FTLAbbreviations.h | 313 - Source/JavaScriptCore/ftl/FTLAbstractHeap.cpp | 176 +- Source/JavaScriptCore/ftl/FTLAbstractHeap.h | 144 +- .../ftl/FTLAbstractHeapRepository.cpp | 119 +- .../JavaScriptCore/ftl/FTLAbstractHeapRepository.h | 179 +- Source/JavaScriptCore/ftl/FTLAvailableRecovery.cpp | 41 + Source/JavaScriptCore/ftl/FTLAvailableRecovery.h | 75 + Source/JavaScriptCore/ftl/FTLCapabilities.cpp | 348 +- Source/JavaScriptCore/ftl/FTLCapabilities.h | 8 +- Source/JavaScriptCore/ftl/FTLCommonValues.cpp | 62 +- Source/JavaScriptCore/ftl/FTLCommonValues.h | 77 +- Source/JavaScriptCore/ftl/FTLCompile.cpp | 458 +- Source/JavaScriptCore/ftl/FTLCompile.h | 13 +- .../ftl/FTLDOMJITPatchpointParams.cpp | 63 + .../JavaScriptCore/ftl/FTLDOMJITPatchpointParams.h | 61 + Source/JavaScriptCore/ftl/FTLExceptionTarget.cpp | 76 + Source/JavaScriptCore/ftl/FTLExceptionTarget.h | 64 + Source/JavaScriptCore/ftl/FTLExitArgument.cpp | 2 +- Source/JavaScriptCore/ftl/FTLExitArgument.h | 24 +- .../ftl/FTLExitArgumentForOperand.cpp | 2 +- .../JavaScriptCore/ftl/FTLExitArgumentForOperand.h | 8 +- Source/JavaScriptCore/ftl/FTLExitArgumentList.h | 45 - Source/JavaScriptCore/ftl/FTLExitPropertyValue.cpp | 51 + Source/JavaScriptCore/ftl/FTLExitPropertyValue.h | 70 + .../JavaScriptCore/ftl/FTLExitThunkGenerator.cpp | 69 - Source/JavaScriptCore/ftl/FTLExitThunkGenerator.h | 60 - .../ftl/FTLExitTimeObjectMaterialization.cpp | 82 + .../ftl/FTLExitTimeObjectMaterialization.h | 69 + Source/JavaScriptCore/ftl/FTLExitValue.cpp | 75 +- Source/JavaScriptCore/ftl/FTLExitValue.h | 129 +- Source/JavaScriptCore/ftl/FTLFail.cpp | 6 +- Source/JavaScriptCore/ftl/FTLFail.h | 8 +- .../JavaScriptCore/ftl/FTLForOSREntryJITCode.cpp | 6 +- Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.h | 10 +- Source/JavaScriptCore/ftl/FTLFormattedValue.h | 37 +- Source/JavaScriptCore/ftl/FTLGeneratedFunction.h | 7 +- .../JavaScriptCore/ftl/FTLIntrinsicRepository.cpp | 65 - Source/JavaScriptCore/ftl/FTLIntrinsicRepository.h | 135 - Source/JavaScriptCore/ftl/FTLJITCode.cpp | 96 +- Source/JavaScriptCore/ftl/FTLJITCode.h | 72 +- Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp | 88 +- Source/JavaScriptCore/ftl/FTLJITFinalizer.h | 41 +- Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp | 79 + Source/JavaScriptCore/ftl/FTLLazySlowPath.h | 96 + Source/JavaScriptCore/ftl/FTLLazySlowPathCall.h | 52 + Source/JavaScriptCore/ftl/FTLLink.cpp | 229 +- Source/JavaScriptCore/ftl/FTLLink.h | 8 +- Source/JavaScriptCore/ftl/FTLLocation.cpp | 195 + Source/JavaScriptCore/ftl/FTLLocation.h | 213 + Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp | 13970 ++++++++++++++++ Source/JavaScriptCore/ftl/FTLLowerDFGToB3.h | 39 + Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp | 4515 ----- Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.h | 45 - Source/JavaScriptCore/ftl/FTLLoweredNodeValue.h | 8 +- Source/JavaScriptCore/ftl/FTLOSREntry.cpp | 28 +- Source/JavaScriptCore/ftl/FTLOSREntry.h | 8 +- Source/JavaScriptCore/ftl/FTLOSRExit.cpp | 83 +- Source/JavaScriptCore/ftl/FTLOSRExit.h | 206 +- .../JavaScriptCore/ftl/FTLOSRExitCompilationInfo.h | 55 - Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp | 554 +- Source/JavaScriptCore/ftl/FTLOSRExitCompiler.h | 8 +- Source/JavaScriptCore/ftl/FTLOSRExitHandle.cpp | 62 + Source/JavaScriptCore/ftl/FTLOSRExitHandle.h | 63 + Source/JavaScriptCore/ftl/FTLOperations.cpp | 535 + Source/JavaScriptCore/ftl/FTLOperations.h | 51 + Source/JavaScriptCore/ftl/FTLOutput.cpp | 800 +- Source/JavaScriptCore/ftl/FTLOutput.h | 688 +- .../ftl/FTLPatchpointExceptionHandle.cpp | 121 + .../ftl/FTLPatchpointExceptionHandle.h | 102 + Source/JavaScriptCore/ftl/FTLRecoveryOpcode.cpp | 51 + Source/JavaScriptCore/ftl/FTLRecoveryOpcode.h | 46 + Source/JavaScriptCore/ftl/FTLSaveRestore.cpp | 149 + Source/JavaScriptCore/ftl/FTLSaveRestore.h | 55 + Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp | 148 + Source/JavaScriptCore/ftl/FTLSlowPathCall.h | 124 + Source/JavaScriptCore/ftl/FTLSlowPathCallKey.cpp | 41 + Source/JavaScriptCore/ftl/FTLSlowPathCallKey.h | 132 + .../JavaScriptCore/ftl/FTLStackmapArgumentList.h | 39 + Source/JavaScriptCore/ftl/FTLState.cpp | 32 +- Source/JavaScriptCore/ftl/FTLState.h | 55 +- Source/JavaScriptCore/ftl/FTLSwitchCase.h | 22 +- Source/JavaScriptCore/ftl/FTLThunks.cpp | 125 +- Source/JavaScriptCore/ftl/FTLThunks.h | 17 +- Source/JavaScriptCore/ftl/FTLTypedPointer.h | 18 +- Source/JavaScriptCore/ftl/FTLValueFormat.cpp | 123 - Source/JavaScriptCore/ftl/FTLValueFormat.h | 69 - Source/JavaScriptCore/ftl/FTLValueFromBlock.h | 10 +- Source/JavaScriptCore/ftl/FTLValueRange.cpp | 41 + Source/JavaScriptCore/ftl/FTLValueRange.h | 60 + Source/JavaScriptCore/ftl/FTLWeight.h | 71 + Source/JavaScriptCore/ftl/FTLWeightedTarget.h | 89 + Source/JavaScriptCore/generate-bytecode-files | 220 + .../JavaScriptCore/generateYarrCanonicalizeUnicode | 200 + Source/JavaScriptCore/heap/AllocatingScope.h | 52 + Source/JavaScriptCore/heap/AllocatorAttributes.cpp | 39 + Source/JavaScriptCore/heap/AllocatorAttributes.h | 51 + Source/JavaScriptCore/heap/BlockAllocator.cpp | 172 - Source/JavaScriptCore/heap/BlockAllocator.h | 300 - Source/JavaScriptCore/heap/CellContainer.cpp | 43 + Source/JavaScriptCore/heap/CellContainer.h | 102 + Source/JavaScriptCore/heap/CellContainerInlines.h | 104 + Source/JavaScriptCore/heap/CellState.h | 56 + Source/JavaScriptCore/heap/CodeBlockSet.cpp | 145 +- Source/JavaScriptCore/heap/CodeBlockSet.h | 67 +- Source/JavaScriptCore/heap/CodeBlockSetInlines.h | 89 + Source/JavaScriptCore/heap/CollectingScope.h | 52 + Source/JavaScriptCore/heap/CollectionScope.cpp | 55 + Source/JavaScriptCore/heap/CollectionScope.h | 43 + Source/JavaScriptCore/heap/CollectorPhase.cpp | 84 + Source/JavaScriptCore/heap/CollectorPhase.h | 75 + Source/JavaScriptCore/heap/ConservativeRoots.cpp | 87 +- Source/JavaScriptCore/heap/ConservativeRoots.h | 29 +- Source/JavaScriptCore/heap/ConstraintVolatility.h | 73 + Source/JavaScriptCore/heap/CopiedAllocator.h | 166 - Source/JavaScriptCore/heap/CopiedBlock.h | 288 - Source/JavaScriptCore/heap/CopiedBlockInlines.h | 86 - Source/JavaScriptCore/heap/CopiedSpace.cpp | 353 - Source/JavaScriptCore/heap/CopiedSpace.h | 147 - Source/JavaScriptCore/heap/CopiedSpaceInlines.h | 263 - Source/JavaScriptCore/heap/CopyToken.h | 40 - Source/JavaScriptCore/heap/CopyVisitor.cpp | 67 - Source/JavaScriptCore/heap/CopyVisitor.h | 62 - Source/JavaScriptCore/heap/CopyVisitorInlines.h | 106 - Source/JavaScriptCore/heap/CopyWorkList.h | 190 - Source/JavaScriptCore/heap/CopyWriteBarrier.h | 90 - Source/JavaScriptCore/heap/DeferGC.cpp | 2 + Source/JavaScriptCore/heap/DeferGC.h | 22 +- Source/JavaScriptCore/heap/DelayedReleaseScope.h | 100 - Source/JavaScriptCore/heap/DeleteAllCodeEffort.h | 36 + Source/JavaScriptCore/heap/DestructionMode.cpp | 50 + Source/JavaScriptCore/heap/DestructionMode.h | 43 + .../JavaScriptCore/heap/EdenGCActivityCallback.cpp | 100 + .../JavaScriptCore/heap/EdenGCActivityCallback.h | 49 + Source/JavaScriptCore/heap/FreeList.cpp | 37 + Source/JavaScriptCore/heap/FreeList.h | 91 + .../JavaScriptCore/heap/FullGCActivityCallback.cpp | 115 + .../JavaScriptCore/heap/FullGCActivityCallback.h | 54 + Source/JavaScriptCore/heap/GCActivityCallback.cpp | 164 + Source/JavaScriptCore/heap/GCActivityCallback.h | 106 + Source/JavaScriptCore/heap/GCAssertions.h | 30 +- Source/JavaScriptCore/heap/GCConductor.cpp | 66 + Source/JavaScriptCore/heap/GCConductor.h | 49 + Source/JavaScriptCore/heap/GCDeferralContext.h | 46 + .../JavaScriptCore/heap/GCDeferralContextInlines.h | 49 + Source/JavaScriptCore/heap/GCIncomingRefCounted.h | 6 +- .../heap/GCIncomingRefCountedInlines.h | 6 +- .../JavaScriptCore/heap/GCIncomingRefCountedSet.h | 11 +- .../heap/GCIncomingRefCountedSetInlines.h | 7 +- Source/JavaScriptCore/heap/GCLogging.cpp | 76 + Source/JavaScriptCore/heap/GCLogging.h | 56 + Source/JavaScriptCore/heap/GCSegmentedArray.h | 167 + .../JavaScriptCore/heap/GCSegmentedArrayInlines.h | 227 + Source/JavaScriptCore/heap/GCThread.cpp | 136 - Source/JavaScriptCore/heap/GCThread.h | 63 - Source/JavaScriptCore/heap/GCThreadSharedData.cpp | 210 - Source/JavaScriptCore/heap/GCThreadSharedData.h | 124 - Source/JavaScriptCore/heap/GCTypeMap.h | 64 + Source/JavaScriptCore/heap/Handle.h | 11 +- Source/JavaScriptCore/heap/HandleBlock.h | 18 +- Source/JavaScriptCore/heap/HandleBlockInlines.h | 24 +- Source/JavaScriptCore/heap/HandleSet.cpp | 18 +- Source/JavaScriptCore/heap/HandleSet.h | 28 +- Source/JavaScriptCore/heap/HandleStack.cpp | 11 +- Source/JavaScriptCore/heap/HandleStack.h | 15 +- Source/JavaScriptCore/heap/HandleTypes.h | 5 +- Source/JavaScriptCore/heap/Heap.cpp | 3060 +++- Source/JavaScriptCore/heap/Heap.h | 1091 +- Source/JavaScriptCore/heap/HeapBlock.h | 73 - Source/JavaScriptCore/heap/HeapCell.cpp | 59 + Source/JavaScriptCore/heap/HeapCell.h | 92 + Source/JavaScriptCore/heap/HeapCellInlines.h | 95 + Source/JavaScriptCore/heap/HeapHelperPool.cpp | 47 + Source/JavaScriptCore/heap/HeapHelperPool.h | 34 + Source/JavaScriptCore/heap/HeapInlines.h | 272 + Source/JavaScriptCore/heap/HeapIterationScope.h | 6 +- Source/JavaScriptCore/heap/HeapObserver.h | 39 + Source/JavaScriptCore/heap/HeapOperation.h | 35 - Source/JavaScriptCore/heap/HeapProfiler.cpp | 66 + Source/JavaScriptCore/heap/HeapProfiler.h | 57 + Source/JavaScriptCore/heap/HeapRootVisitor.h | 86 - Source/JavaScriptCore/heap/HeapSnapshot.cpp | 184 + Source/JavaScriptCore/heap/HeapSnapshot.h | 64 + Source/JavaScriptCore/heap/HeapSnapshotBuilder.cpp | 393 + Source/JavaScriptCore/heap/HeapSnapshotBuilder.h | 140 + Source/JavaScriptCore/heap/HeapStatistics.cpp | 257 - Source/JavaScriptCore/heap/HeapStatistics.h | 61 - Source/JavaScriptCore/heap/HeapTimer.cpp | 160 +- Source/JavaScriptCore/heap/HeapTimer.h | 50 +- Source/JavaScriptCore/heap/HeapUtil.h | 189 + Source/JavaScriptCore/heap/HeapVerifier.cpp | 217 + Source/JavaScriptCore/heap/HeapVerifier.h | 95 + Source/JavaScriptCore/heap/IncrementalSweeper.cpp | 100 +- Source/JavaScriptCore/heap/IncrementalSweeper.h | 33 +- Source/JavaScriptCore/heap/JITStubRoutineSet.cpp | 2 +- Source/JavaScriptCore/heap/JITStubRoutineSet.h | 8 +- Source/JavaScriptCore/heap/LargeAllocation.cpp | 128 + Source/JavaScriptCore/heap/LargeAllocation.h | 163 + Source/JavaScriptCore/heap/ListableHandler.h | 26 +- Source/JavaScriptCore/heap/LiveObjectData.h | 43 + Source/JavaScriptCore/heap/LiveObjectList.cpp | 40 + Source/JavaScriptCore/heap/LiveObjectList.h | 53 + Source/JavaScriptCore/heap/Local.h | 9 +- Source/JavaScriptCore/heap/LocalScope.h | 7 +- Source/JavaScriptCore/heap/LockDuringMarking.h | 47 + Source/JavaScriptCore/heap/MachineStackMarker.cpp | 993 +- Source/JavaScriptCore/heap/MachineStackMarker.h | 188 +- Source/JavaScriptCore/heap/MarkStack.cpp | 146 +- Source/JavaScriptCore/heap/MarkStack.h | 107 +- Source/JavaScriptCore/heap/MarkStackInlines.h | 119 - Source/JavaScriptCore/heap/MarkedAllocator.cpp | 544 +- Source/JavaScriptCore/heap/MarkedAllocator.h | 341 +- .../JavaScriptCore/heap/MarkedAllocatorInlines.h | 85 + Source/JavaScriptCore/heap/MarkedBlock.cpp | 565 +- Source/JavaScriptCore/heap/MarkedBlock.h | 948 +- Source/JavaScriptCore/heap/MarkedBlockInlines.h | 394 + Source/JavaScriptCore/heap/MarkedBlockSet.h | 7 +- Source/JavaScriptCore/heap/MarkedSpace.cpp | 677 +- Source/JavaScriptCore/heap/MarkedSpace.h | 374 +- Source/JavaScriptCore/heap/MarkedSpaceInlines.h | 66 + Source/JavaScriptCore/heap/MarkingConstraint.cpp | 77 + Source/JavaScriptCore/heap/MarkingConstraint.h | 96 + .../JavaScriptCore/heap/MarkingConstraintSet.cpp | 249 + Source/JavaScriptCore/heap/MarkingConstraintSet.h | 87 + Source/JavaScriptCore/heap/MutatorScheduler.cpp | 76 + Source/JavaScriptCore/heap/MutatorScheduler.h | 76 + Source/JavaScriptCore/heap/MutatorState.cpp | 55 + Source/JavaScriptCore/heap/MutatorState.h | 53 + Source/JavaScriptCore/heap/OpaqueRootSet.h | 91 + .../JavaScriptCore/heap/PreventCollectionScope.h | 50 + .../JavaScriptCore/heap/RecursiveAllocationScope.h | 65 - Source/JavaScriptCore/heap/Region.h | 319 - Source/JavaScriptCore/heap/RegisterState.h | 158 + .../JavaScriptCore/heap/ReleaseHeapAccessScope.h | 58 + Source/JavaScriptCore/heap/RunningScope.h | 52 + Source/JavaScriptCore/heap/SlotVisitor.cpp | 926 +- Source/JavaScriptCore/heap/SlotVisitor.h | 215 +- Source/JavaScriptCore/heap/SlotVisitorInlines.h | 239 +- .../heap/SpaceTimeMutatorScheduler.cpp | 221 + .../heap/SpaceTimeMutatorScheduler.h | 86 + .../heap/StochasticSpaceTimeMutatorScheduler.cpp | 233 + .../heap/StochasticSpaceTimeMutatorScheduler.h | 92 + .../JavaScriptCore/heap/StopIfNecessaryTimer.cpp | 55 + Source/JavaScriptCore/heap/StopIfNecessaryTimer.h | 44 + Source/JavaScriptCore/heap/Strong.h | 11 +- Source/JavaScriptCore/heap/StrongInlines.h | 5 +- Source/JavaScriptCore/heap/Subspace.cpp | 196 + Source/JavaScriptCore/heap/Subspace.h | 122 + Source/JavaScriptCore/heap/SubspaceInlines.h | 76 + Source/JavaScriptCore/heap/SuperRegion.cpp | 82 - Source/JavaScriptCore/heap/SuperRegion.h | 58 - Source/JavaScriptCore/heap/SweepingScope.h | 52 + .../SynchronousStopTheWorldMutatorScheduler.cpp | 67 + .../heap/SynchronousStopTheWorldMutatorScheduler.h | 57 + Source/JavaScriptCore/heap/TinyBloomFilter.h | 5 +- .../JavaScriptCore/heap/UnconditionalFinalizer.h | 10 +- Source/JavaScriptCore/heap/VisitRaceKey.cpp | 40 + Source/JavaScriptCore/heap/VisitRaceKey.h | 107 + Source/JavaScriptCore/heap/VisitingTimeout.h | 68 + Source/JavaScriptCore/heap/Weak.cpp | 1 + Source/JavaScriptCore/heap/Weak.h | 57 +- Source/JavaScriptCore/heap/WeakBlock.cpp | 75 +- Source/JavaScriptCore/heap/WeakBlock.h | 55 +- Source/JavaScriptCore/heap/WeakHandleOwner.cpp | 2 + Source/JavaScriptCore/heap/WeakHandleOwner.h | 5 +- Source/JavaScriptCore/heap/WeakImpl.h | 7 +- Source/JavaScriptCore/heap/WeakInlines.h | 44 +- .../JavaScriptCore/heap/WeakReferenceHarvester.h | 7 +- Source/JavaScriptCore/heap/WeakSet.cpp | 47 +- Source/JavaScriptCore/heap/WeakSet.h | 38 +- Source/JavaScriptCore/heap/WeakSetInlines.h | 10 +- Source/JavaScriptCore/heap/WriteBarrierBuffer.cpp | 69 - Source/JavaScriptCore/heap/WriteBarrierBuffer.h | 71 - Source/JavaScriptCore/heap/WriteBarrierSupport.cpp | 2 + Source/JavaScriptCore/heap/WriteBarrierSupport.h | 6 +- Source/JavaScriptCore/icu/unicode/localpointer.h | 304 + Source/JavaScriptCore/icu/unicode/platform.h | 880 +- Source/JavaScriptCore/icu/unicode/ptypes.h | 126 + Source/JavaScriptCore/icu/unicode/putil.h | 35 +- Source/JavaScriptCore/icu/unicode/ucal.h | 1565 ++ Source/JavaScriptCore/icu/unicode/uchar.h | 329 +- Source/JavaScriptCore/icu/unicode/ucnv.h | 103 +- Source/JavaScriptCore/icu/unicode/ucol.h | 358 +- Source/JavaScriptCore/icu/unicode/uconfig.h | 191 +- Source/JavaScriptCore/icu/unicode/ucurr.h | 360 + Source/JavaScriptCore/icu/unicode/udat.h | 1433 ++ Source/JavaScriptCore/icu/unicode/udatpg.h | 588 + .../JavaScriptCore/icu/unicode/udisplaycontext.h | 124 + Source/JavaScriptCore/icu/unicode/uenum.h | 40 +- Source/JavaScriptCore/icu/unicode/uformattable.h | 283 + Source/JavaScriptCore/icu/unicode/uiter.h | 6 +- Source/JavaScriptCore/icu/unicode/uloc.h | 43 +- Source/JavaScriptCore/icu/unicode/umachine.h | 136 +- Source/JavaScriptCore/icu/unicode/umisc.h | 60 + Source/JavaScriptCore/icu/unicode/unorm2.h | 528 + Source/JavaScriptCore/icu/unicode/unum.h | 1247 ++ Source/JavaScriptCore/icu/unicode/unumsys.h | 174 + Source/JavaScriptCore/icu/unicode/urename.h | 686 +- Source/JavaScriptCore/icu/unicode/uscript.h | 627 + Source/JavaScriptCore/icu/unicode/uset.h | 22 +- Source/JavaScriptCore/icu/unicode/ustring.h | 43 +- Source/JavaScriptCore/icu/unicode/utf.h | 55 +- Source/JavaScriptCore/icu/unicode/utf16.h | 39 +- Source/JavaScriptCore/icu/unicode/utf8.h | 271 +- Source/JavaScriptCore/icu/unicode/utf_old.h | 1170 +- Source/JavaScriptCore/icu/unicode/utypes.h | 228 +- Source/JavaScriptCore/icu/unicode/uvernum.h | 167 + Source/JavaScriptCore/icu/unicode/uversion.h | 28 +- .../JavaScriptCore/inspector/AsyncStackTrace.cpp | 194 + Source/JavaScriptCore/inspector/AsyncStackTrace.h | 72 + Source/JavaScriptCore/inspector/ConsoleMessage.cpp | 284 + Source/JavaScriptCore/inspector/ConsoleMessage.h | 94 + .../inspector/ContentSearchUtilities.cpp | 107 +- .../inspector/ContentSearchUtilities.h | 17 +- Source/JavaScriptCore/inspector/EventLoop.cpp | 73 + Source/JavaScriptCore/inspector/EventLoop.h | 55 + .../inspector/IdentifiersFactory.cpp | 62 + .../JavaScriptCore/inspector/IdentifiersFactory.h | 44 + Source/JavaScriptCore/inspector/InjectedScript.cpp | 194 +- Source/JavaScriptCore/inspector/InjectedScript.h | 41 +- .../inspector/InjectedScriptBase.cpp | 80 +- .../JavaScriptCore/inspector/InjectedScriptBase.h | 16 +- .../inspector/InjectedScriptHost.cpp | 46 +- .../JavaScriptCore/inspector/InjectedScriptHost.h | 23 +- .../inspector/InjectedScriptManager.cpp | 93 +- .../inspector/InjectedScriptManager.h | 20 +- .../inspector/InjectedScriptModule.cpp | 18 +- .../inspector/InjectedScriptModule.h | 14 +- .../inspector/InjectedScriptSource.js | 1475 +- .../JavaScriptCore/inspector/InspectorAgentBase.h | 46 +- .../inspector/InspectorAgentRegistry.cpp | 47 +- .../inspector/InspectorAgentRegistry.h | 38 +- .../inspector/InspectorBackendDispatcher.cpp | 371 +- .../inspector/InspectorBackendDispatcher.h | 97 +- .../inspector/InspectorEnvironment.h | 22 +- .../inspector/InspectorFrontendChannel.h | 21 +- .../inspector/InspectorFrontendRouter.cpp | 101 + .../inspector/InspectorFrontendRouter.h | 56 + .../inspector/InspectorProtocolTypes.h | 180 + .../inspector/InspectorTypeBuilder.h | 337 - .../JavaScriptCore/inspector/InspectorValues.cpp | 491 +- Source/JavaScriptCore/inspector/InspectorValues.h | 322 +- .../inspector/JSGlobalObjectConsoleClient.cpp | 174 + .../inspector/JSGlobalObjectConsoleClient.h | 71 + .../JSGlobalObjectInspectorController.cpp | 315 + .../inspector/JSGlobalObjectInspectorController.h | 133 + .../inspector/JSGlobalObjectScriptDebugServer.cpp | 65 + .../inspector/JSGlobalObjectScriptDebugServer.h | 57 + .../inspector/JSInjectedScriptHost.cpp | 496 +- .../inspector/JSInjectedScriptHost.h | 36 +- .../inspector/JSInjectedScriptHostPrototype.cpp | 200 +- .../inspector/JSInjectedScriptHostPrototype.h | 13 +- .../inspector/JSJavaScriptCallFrame.cpp | 148 +- .../inspector/JSJavaScriptCallFrame.h | 34 +- .../inspector/JSJavaScriptCallFramePrototype.cpp | 199 +- .../inspector/JSJavaScriptCallFramePrototype.h | 13 +- .../inspector/JavaScriptCallFrame.cpp | 9 +- .../JavaScriptCore/inspector/JavaScriptCallFrame.h | 25 +- .../inspector/PerGlobalObjectWrapperWorld.cpp | 52 + .../inspector/PerGlobalObjectWrapperWorld.h | 45 + .../JavaScriptCore/inspector/ScriptArguments.cpp | 118 + Source/JavaScriptCore/inspector/ScriptArguments.h | 73 + Source/JavaScriptCore/inspector/ScriptBreakpoint.h | 24 +- .../JavaScriptCore/inspector/ScriptCallFrame.cpp | 78 + Source/JavaScriptCore/inspector/ScriptCallFrame.h | 65 + .../JavaScriptCore/inspector/ScriptCallStack.cpp | 116 + Source/JavaScriptCore/inspector/ScriptCallStack.h | 69 + .../inspector/ScriptCallStackFactory.cpp | 179 + .../inspector/ScriptCallStackFactory.h | 53 + .../JavaScriptCore/inspector/ScriptDebugListener.h | 46 +- .../JavaScriptCore/inspector/ScriptDebugServer.cpp | 259 +- .../JavaScriptCore/inspector/ScriptDebugServer.h | 65 +- .../inspector/agents/InspectorAgent.cpp | 95 +- .../inspector/agents/InspectorAgent.h | 55 +- .../inspector/agents/InspectorConsoleAgent.cpp | 238 + .../inspector/agents/InspectorConsoleAgent.h | 94 + .../inspector/agents/InspectorDebuggerAgent.cpp | 969 +- .../inspector/agents/InspectorDebuggerAgent.h | 180 +- .../inspector/agents/InspectorHeapAgent.cpp | 314 + .../inspector/agents/InspectorHeapAgent.h | 82 + .../inspector/agents/InspectorRuntimeAgent.cpp | 277 +- .../inspector/agents/InspectorRuntimeAgent.h | 65 +- .../agents/InspectorScriptProfilerAgent.cpp | 258 + .../agents/InspectorScriptProfilerAgent.h | 84 + .../agents/JSGlobalObjectConsoleAgent.cpp | 46 + .../inspector/agents/JSGlobalObjectConsoleAgent.h | 45 + .../agents/JSGlobalObjectDebuggerAgent.cpp | 63 + .../inspector/agents/JSGlobalObjectDebuggerAgent.h | 55 + .../agents/JSGlobalObjectRuntimeAgent.cpp | 61 + .../inspector/agents/JSGlobalObjectRuntimeAgent.h | 55 + .../augmentable/AlternateDispatchableAgent.h | 70 + .../augmentable/AugmentableInspectorController.h | 54 + .../AugmentableInspectorControllerClient.h | 42 + .../inspector/protocol/ApplicationCache.json | 87 + Source/JavaScriptCore/inspector/protocol/CSS.json | 457 + .../JavaScriptCore/inspector/protocol/Console.json | 105 + Source/JavaScriptCore/inspector/protocol/DOM.json | 581 + .../inspector/protocol/DOMDebugger.json | 73 + .../inspector/protocol/DOMStorage.json | 88 + .../inspector/protocol/Database.json | 71 + .../inspector/protocol/Debugger.json | 115 +- Source/JavaScriptCore/inspector/protocol/Heap.json | 100 + .../inspector/protocol/IndexedDB.json | 143 + .../inspector/protocol/Inspector.json | 51 + .../inspector/protocol/InspectorDomain.json | 41 - .../inspector/protocol/LayerTree.json | 114 + .../JavaScriptCore/inspector/protocol/Memory.json | 70 + .../JavaScriptCore/inspector/protocol/Network.json | 315 + .../inspector/protocol/OverlayTypes.json | 130 + Source/JavaScriptCore/inspector/protocol/Page.json | 305 + .../JavaScriptCore/inspector/protocol/Replay.json | 264 + .../JavaScriptCore/inspector/protocol/Runtime.json | 196 +- .../inspector/protocol/ScriptProfiler.json | 99 + .../inspector/protocol/Timeline.json | 118 + .../JavaScriptCore/inspector/protocol/Worker.json | 52 + .../inspector/remote/RemoteAutomationTarget.cpp | 47 + .../inspector/remote/RemoteAutomationTarget.h | 56 + .../inspector/remote/RemoteConnectionToTarget.h | 114 + .../inspector/remote/RemoteControllableTarget.cpp | 52 + .../inspector/remote/RemoteControllableTarget.h | 74 + .../inspector/remote/RemoteInspectionTarget.cpp | 72 + .../inspector/remote/RemoteInspectionTarget.h | 79 + .../inspector/remote/RemoteInspector.cpp | 224 + .../inspector/remote/RemoteInspector.h | 183 + .../inspector/remote/RemoteInspectorConstants.h | 102 + .../inspector/scripts/CodeGeneratorInspector.py | 2613 --- .../scripts/CodeGeneratorInspectorStrings.py | 342 - .../inspector/scripts/codegen/__init__.py | 25 + .../inspector/scripts/codegen/cpp_generator.py | 319 + .../scripts/codegen/cpp_generator_templates.py | 258 + ...rate_cpp_alternate_backend_dispatcher_header.py | 93 + .../generate_cpp_backend_dispatcher_header.py | 217 + ...nerate_cpp_backend_dispatcher_implementation.py | 324 + .../generate_cpp_frontend_dispatcher_header.py | 116 + ...erate_cpp_frontend_dispatcher_implementation.py | 125 + .../codegen/generate_cpp_protocol_types_header.py | 424 + .../generate_cpp_protocol_types_implementation.py | 260 + .../codegen/generate_js_backend_commands.py | 145 + .../generate_objc_backend_dispatcher_header.py | 106 + ...erate_objc_backend_dispatcher_implementation.py | 210 + .../codegen/generate_objc_configuration_header.py | 85 + .../generate_objc_configuration_implementation.py | 147 + ...rate_objc_frontend_dispatcher_implementation.py | 154 + .../scripts/codegen/generate_objc_header.py | 245 + .../codegen/generate_objc_internal_header.py | 74 + ...nerate_objc_protocol_type_conversions_header.py | 165 + ...bjc_protocol_type_conversions_implementation.py | 138 + .../generate_objc_protocol_types_implementation.py | 215 + .../inspector/scripts/codegen/generator.py | 274 + .../scripts/codegen/generator_templates.py | 61 + .../inspector/scripts/codegen/models.py | 680 + .../inspector/scripts/codegen/objc_generator.py | 554 + .../scripts/codegen/objc_generator_templates.py | 155 + Source/JavaScriptCore/inspector/scripts/cssmin.py | 44 - .../scripts/generate-combined-inspector-json.py | 68 - .../generate-inspector-protocol-bindings.py | 270 + .../inline-and-minify-stylesheets-and-scripts.py | 81 - Source/JavaScriptCore/inspector/scripts/jsmin.py | 238 - .../tests/all/definitions-with-mac-platform.json | 28 + .../definitions-with-mac-platform.json-result | 1203 ++ .../generic/commands-with-async-attribute.json | 109 + ...mands-with-optional-call-return-parameters.json | 85 + .../generic/definitions-with-mac-platform.json | 28 + .../scripts/tests/generic/domain-availability.json | 11 + .../domains-with-varying-command-sizes.json | 54 + .../scripts/tests/generic/enum-values.json | 35 + .../generic/events-with-optional-parameters.json | 59 + .../commands-with-async-attribute.json-result | 1780 ++ ...ith-optional-call-return-parameters.json-result | 1643 ++ .../definitions-with-mac-platform.json-result | 866 + .../expected/domain-availability.json-result | 1120 ++ .../domains-with-varying-command-sizes.json-result | 1399 ++ .../tests/generic/expected/enum-values.json-result | 1208 ++ .../events-with-optional-parameters.json-result | 1210 ++ ...ail-on-command-with-invalid-platform.json-error | 1 + .../fail-on-domain-availability.json-error | 1 + ...plicate-command-call-parameter-names.json-error | 1 + ...icate-command-return-parameter-names.json-error | 1 + ...l-on-duplicate-event-parameter-names.json-error | 1 + .../fail-on-duplicate-type-declarations.json-error | 1 + .../fail-on-duplicate-type-member-names.json-error | 1 + .../fail-on-enum-with-no-values.json-error | 1 + ...number-typed-optional-parameter-flag.json-error | 1 + ...on-number-typed-optional-type-member.json-error | 1 + ...string-typed-optional-parameter-flag.json-error | 1 + ...on-string-typed-optional-type-member.json-error | 1 + ...ype-declaration-using-type-reference.json-error | 1 + ...-on-type-reference-as-primitive-type.json-error | 1 + .../fail-on-type-with-invalid-platform.json-error | 1 + .../fail-on-type-with-lowercase-name.json-error | 1 + ...n-type-reference-in-type-declaration.json-error | 1 + ...nknown-type-reference-in-type-member.json-error | 1 + ...enerate-domains-with-feature-guards.json-result | 1230 ++ .../same-type-id-different-domain.json-result | 910 ++ .../shadowed-optional-type-setters.json-result | 1119 ++ ...-declaration-aliased-primitive-type.json-result | 887 + .../type-declaration-array-type.json-result | 1055 ++ .../type-declaration-enum-type.json-result | 1064 ++ .../type-declaration-object-type.json-result | 2012 +++ .../type-requiring-runtime-casts.json-result | 1617 ++ .../expected/worker-supported-domains.json-result | 1121 ++ .../fail-on-command-with-invalid-platform.json | 16 + .../tests/generic/fail-on-domain-availability.json | 9 + ...-on-duplicate-command-call-parameter-names.json | 16 + ...n-duplicate-command-return-parameter-names.json | 16 + .../fail-on-duplicate-event-parameter-names.json | 12 + .../fail-on-duplicate-type-declarations.json | 15 + .../fail-on-duplicate-type-member-names.json | 15 + .../tests/generic/fail-on-enum-with-no-values.json | 10 + ...il-on-number-typed-optional-parameter-flag.json | 18 + .../fail-on-number-typed-optional-type-member.json | 16 + ...il-on-string-typed-optional-parameter-flag.json | 18 + .../fail-on-string-typed-optional-type-member.json | 16 + ...l-on-type-declaration-using-type-reference.json | 13 + .../fail-on-type-reference-as-primitive-type.json | 18 + .../fail-on-type-with-invalid-platform.json | 11 + .../generic/fail-on-type-with-lowercase-name.json | 10 + ...unknown-type-reference-in-type-declaration.json | 12 + ...l-on-unknown-type-reference-in-type-member.json | 15 + .../generate-domains-with-feature-guards.json | 36 + .../generic/same-type-id-different-domain.json | 22 + .../generic/shadowed-optional-type-setters.json | 31 + .../type-declaration-aliased-primitive-type.json | 10 + .../tests/generic/type-declaration-array-type.json | 50 + .../tests/generic/type-declaration-enum-type.json | 15 + .../generic/type-declaration-object-type.json | 83 + .../generic/type-requiring-runtime-casts.json | 51 + .../tests/generic/worker-supported-domains.json | 11 + Source/JavaScriptCore/inspector/scripts/xxd.pl | 45 - Source/JavaScriptCore/interpreter/AbstractPC.cpp | 4 +- Source/JavaScriptCore/interpreter/AbstractPC.h | 7 +- Source/JavaScriptCore/interpreter/CLoopStack.cpp | 164 + Source/JavaScriptCore/interpreter/CLoopStack.h | 114 + .../JavaScriptCore/interpreter/CLoopStackInlines.h | 81 + Source/JavaScriptCore/interpreter/CachedCall.h | 38 +- Source/JavaScriptCore/interpreter/CallFrame.cpp | 281 +- Source/JavaScriptCore/interpreter/CallFrame.h | 319 +- .../JavaScriptCore/interpreter/CallFrameClosure.h | 10 +- .../JavaScriptCore/interpreter/CallFrameInlines.h | 143 - Source/JavaScriptCore/interpreter/FrameTracers.h | 107 + Source/JavaScriptCore/interpreter/Interpreter.cpp | 1203 +- Source/JavaScriptCore/interpreter/Interpreter.h | 232 +- .../interpreter/InterpreterInlines.h | 45 + Source/JavaScriptCore/interpreter/JSStack.cpp | 160 - Source/JavaScriptCore/interpreter/JSStack.h | 164 - Source/JavaScriptCore/interpreter/JSStackInlines.h | 295 - .../JavaScriptCore/interpreter/ProtoCallFrame.cpp | 20 +- Source/JavaScriptCore/interpreter/ProtoCallFrame.h | 27 +- Source/JavaScriptCore/interpreter/Register.h | 36 +- .../JavaScriptCore/interpreter/ShadowChicken.cpp | 468 + Source/JavaScriptCore/interpreter/ShadowChicken.h | 225 + .../interpreter/ShadowChickenInlines.h | 47 + Source/JavaScriptCore/interpreter/StackVisitor.cpp | 433 +- Source/JavaScriptCore/interpreter/StackVisitor.h | 108 +- Source/JavaScriptCore/interpreter/VMEntryRecord.h | 75 + Source/JavaScriptCore/interpreter/VMInspector.cpp | 572 - Source/JavaScriptCore/interpreter/VMInspector.h | 89 - Source/JavaScriptCore/javascriptcoregtk.pc.in | 10 +- Source/JavaScriptCore/jit/AssemblyHelpers.cpp | 630 +- Source/JavaScriptCore/jit/AssemblyHelpers.h | 1342 +- Source/JavaScriptCore/jit/BinarySwitch.cpp | 391 + Source/JavaScriptCore/jit/BinarySwitch.h | 143 + Source/JavaScriptCore/jit/CCallHelpers.cpp | 73 + Source/JavaScriptCore/jit/CCallHelpers.h | 1185 +- Source/JavaScriptCore/jit/CachedRecovery.cpp | 71 + Source/JavaScriptCore/jit/CachedRecovery.h | 134 + Source/JavaScriptCore/jit/CallFrameShuffleData.cpp | 68 + Source/JavaScriptCore/jit/CallFrameShuffleData.h | 52 + Source/JavaScriptCore/jit/CallFrameShuffler.cpp | 776 + Source/JavaScriptCore/jit/CallFrameShuffler.h | 804 + .../JavaScriptCore/jit/CallFrameShuffler32_64.cpp | 305 + Source/JavaScriptCore/jit/CallFrameShuffler64.cpp | 369 + .../JavaScriptCore/jit/ClosureCallStubRoutine.cpp | 63 - Source/JavaScriptCore/jit/ClosureCallStubRoutine.h | 66 - Source/JavaScriptCore/jit/CompactJITCodeMap.h | 41 +- .../jit/ExecutableAllocationFuzz.cpp | 73 + .../JavaScriptCore/jit/ExecutableAllocationFuzz.h | 47 + Source/JavaScriptCore/jit/ExecutableAllocator.cpp | 463 +- Source/JavaScriptCore/jit/ExecutableAllocator.h | 113 +- .../jit/ExecutableAllocatorFixedVMPool.cpp | 194 - Source/JavaScriptCore/jit/FPRInfo.h | 112 +- .../JavaScriptCore/jit/GCAwareJITStubRoutine.cpp | 85 +- Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h | 57 +- Source/JavaScriptCore/jit/GPRInfo.cpp | 51 + Source/JavaScriptCore/jit/GPRInfo.h | 392 +- Source/JavaScriptCore/jit/HostCallReturnValue.cpp | 1 + Source/JavaScriptCore/jit/HostCallReturnValue.h | 12 +- Source/JavaScriptCore/jit/ICStats.cpp | 128 + Source/JavaScriptCore/jit/ICStats.h | 194 + Source/JavaScriptCore/jit/IntrinsicEmitter.cpp | 136 + Source/JavaScriptCore/jit/JIT.cpp | 655 +- Source/JavaScriptCore/jit/JIT.h | 462 +- Source/JavaScriptCore/jit/JITAddGenerator.cpp | 187 + Source/JavaScriptCore/jit/JITAddGenerator.h | 79 + Source/JavaScriptCore/jit/JITArithmetic.cpp | 1065 +- Source/JavaScriptCore/jit/JITArithmetic32_64.cpp | 782 +- Source/JavaScriptCore/jit/JITBitAndGenerator.cpp | 85 + Source/JavaScriptCore/jit/JITBitAndGenerator.h | 46 + .../JavaScriptCore/jit/JITBitBinaryOpGenerator.h | 68 + Source/JavaScriptCore/jit/JITBitOrGenerator.cpp | 74 + Source/JavaScriptCore/jit/JITBitOrGenerator.h | 46 + Source/JavaScriptCore/jit/JITBitXorGenerator.cpp | 73 + Source/JavaScriptCore/jit/JITBitXorGenerator.h | 46 + Source/JavaScriptCore/jit/JITCall.cpp | 297 +- Source/JavaScriptCore/jit/JITCall32_64.cpp | 319 +- Source/JavaScriptCore/jit/JITCode.cpp | 172 +- Source/JavaScriptCore/jit/JITCode.h | 82 +- Source/JavaScriptCore/jit/JITCompilationEffort.h | 8 +- Source/JavaScriptCore/jit/JITDisassembler.cpp | 9 +- Source/JavaScriptCore/jit/JITDisassembler.h | 35 +- Source/JavaScriptCore/jit/JITDivGenerator.cpp | 139 + Source/JavaScriptCore/jit/JITDivGenerator.h | 82 + Source/JavaScriptCore/jit/JITExceptions.cpp | 51 +- Source/JavaScriptCore/jit/JITExceptions.h | 17 +- .../JavaScriptCore/jit/JITInlineCacheGenerator.cpp | 126 +- .../JavaScriptCore/jit/JITInlineCacheGenerator.h | 59 +- Source/JavaScriptCore/jit/JITInlines.h | 619 +- .../JavaScriptCore/jit/JITLeftShiftGenerator.cpp | 84 + Source/JavaScriptCore/jit/JITLeftShiftGenerator.h | 46 + Source/JavaScriptCore/jit/JITMathIC.h | 290 + Source/JavaScriptCore/jit/JITMathICForwards.h | 46 + Source/JavaScriptCore/jit/JITMathICInlineResult.h | 40 + Source/JavaScriptCore/jit/JITMulGenerator.cpp | 254 + Source/JavaScriptCore/jit/JITMulGenerator.h | 79 + Source/JavaScriptCore/jit/JITNegGenerator.cpp | 127 + Source/JavaScriptCore/jit/JITNegGenerator.h | 57 + Source/JavaScriptCore/jit/JITOpcodes.cpp | 1283 +- Source/JavaScriptCore/jit/JITOpcodes32_64.cpp | 946 +- Source/JavaScriptCore/jit/JITOperationWrappers.h | 413 - Source/JavaScriptCore/jit/JITOperations.cpp | 2496 ++- Source/JavaScriptCore/jit/JITOperations.h | 552 +- Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp | 46 + Source/JavaScriptCore/jit/JITPropertyAccess.cpp | 1297 +- .../JavaScriptCore/jit/JITPropertyAccess32_64.cpp | 901 +- .../JavaScriptCore/jit/JITRightShiftGenerator.cpp | 140 + Source/JavaScriptCore/jit/JITRightShiftGenerator.h | 60 + Source/JavaScriptCore/jit/JITStubRoutine.cpp | 9 +- Source/JavaScriptCore/jit/JITStubRoutine.h | 48 +- Source/JavaScriptCore/jit/JITStubs.cpp | 54 - Source/JavaScriptCore/jit/JITStubs.h | 59 - Source/JavaScriptCore/jit/JITStubsARM.h | 302 - Source/JavaScriptCore/jit/JITStubsARMv7.h | 351 - Source/JavaScriptCore/jit/JITStubsMSVC64.asm | 44 + Source/JavaScriptCore/jit/JITStubsX86.h | 649 - Source/JavaScriptCore/jit/JITStubsX86Common.h | 148 - Source/JavaScriptCore/jit/JITStubsX86_64.h | 218 - Source/JavaScriptCore/jit/JITSubGenerator.cpp | 142 + Source/JavaScriptCore/jit/JITSubGenerator.h | 76 + Source/JavaScriptCore/jit/JITThunks.cpp | 72 +- Source/JavaScriptCore/jit/JITThunks.h | 72 +- .../jit/JITToDFGDeferredCompilationCallback.cpp | 16 +- .../jit/JITToDFGDeferredCompilationCallback.h | 15 +- Source/JavaScriptCore/jit/JITWorklist.cpp | 330 + Source/JavaScriptCore/jit/JITWorklist.h | 83 + Source/JavaScriptCore/jit/JITWriteBarrier.h | 147 - Source/JavaScriptCore/jit/JSInterfaceJIT.h | 56 +- Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp | 306 + Source/JavaScriptCore/jit/PCToCodeOriginMap.h | 101 + .../jit/PolymorphicCallStubRoutine.cpp | 137 + .../jit/PolymorphicCallStubRoutine.h | 111 + Source/JavaScriptCore/jit/Reg.cpp | 58 + Source/JavaScriptCore/jit/Reg.h | 248 + Source/JavaScriptCore/jit/RegisterAtOffset.cpp | 45 + Source/JavaScriptCore/jit/RegisterAtOffset.h | 77 + Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp | 72 + Source/JavaScriptCore/jit/RegisterAtOffsetList.h | 72 + Source/JavaScriptCore/jit/RegisterMap.h | 110 + Source/JavaScriptCore/jit/RegisterSet.cpp | 301 +- Source/JavaScriptCore/jit/RegisterSet.h | 135 +- Source/JavaScriptCore/jit/Repatch.cpp | 2095 +-- Source/JavaScriptCore/jit/Repatch.h | 52 +- .../jit/ScratchRegisterAllocator.cpp | 302 + .../JavaScriptCore/jit/ScratchRegisterAllocator.h | 198 +- Source/JavaScriptCore/jit/SetupVarargsFrame.cpp | 141 + Source/JavaScriptCore/jit/SetupVarargsFrame.h | 43 + Source/JavaScriptCore/jit/SlowPathCall.h | 9 +- Source/JavaScriptCore/jit/SnippetOperand.h | 104 + Source/JavaScriptCore/jit/SpecializedThunkJIT.h | 51 +- Source/JavaScriptCore/jit/SpillRegistersMode.h | 32 + Source/JavaScriptCore/jit/TagRegistersMode.cpp | 50 + Source/JavaScriptCore/jit/TagRegistersMode.h | 42 + Source/JavaScriptCore/jit/TempRegisterSet.cpp | 3 + Source/JavaScriptCore/jit/TempRegisterSet.h | 27 +- Source/JavaScriptCore/jit/ThunkGenerator.h | 8 +- Source/JavaScriptCore/jit/ThunkGenerators.cpp | 930 +- Source/JavaScriptCore/jit/ThunkGenerators.h | 33 +- Source/JavaScriptCore/jit/UnusedPointer.h | 5 +- Source/JavaScriptCore/jsc.cpp | 3785 ++++- Source/JavaScriptCore/llint/LLIntCLoop.cpp | 6 +- Source/JavaScriptCore/llint/LLIntCLoop.h | 14 +- Source/JavaScriptCore/llint/LLIntCommon.h | 11 +- Source/JavaScriptCore/llint/LLIntData.cpp | 382 +- Source/JavaScriptCore/llint/LLIntData.h | 63 +- Source/JavaScriptCore/llint/LLIntEntrypoint.cpp | 55 +- Source/JavaScriptCore/llint/LLIntEntrypoint.h | 11 +- Source/JavaScriptCore/llint/LLIntExceptions.cpp | 20 +- Source/JavaScriptCore/llint/LLIntExceptions.h | 11 +- .../JavaScriptCore/llint/LLIntOfflineAsmConfig.h | 63 +- .../JavaScriptCore/llint/LLIntOffsetsExtractor.cpp | 29 +- Source/JavaScriptCore/llint/LLIntOpcode.h | 48 +- Source/JavaScriptCore/llint/LLIntPCRanges.h | 51 + Source/JavaScriptCore/llint/LLIntSlowPaths.cpp | 1176 +- Source/JavaScriptCore/llint/LLIntSlowPaths.h | 58 +- Source/JavaScriptCore/llint/LLIntThunks.cpp | 85 +- Source/JavaScriptCore/llint/LLIntThunks.h | 24 +- .../JavaScriptCore/llint/LowLevelInterpreter.asm | 1518 +- .../JavaScriptCore/llint/LowLevelInterpreter.cpp | 226 +- Source/JavaScriptCore/llint/LowLevelInterpreter.h | 25 +- .../llint/LowLevelInterpreter32_64.asm | 2024 +-- .../JavaScriptCore/llint/LowLevelInterpreter64.asm | 1925 ++- Source/JavaScriptCore/offlineasm/arm.rb | 67 +- Source/JavaScriptCore/offlineasm/arm64.rb | 194 +- Source/JavaScriptCore/offlineasm/asm.rb | 149 +- Source/JavaScriptCore/offlineasm/ast.rb | 179 +- Source/JavaScriptCore/offlineasm/backends.rb | 64 +- Source/JavaScriptCore/offlineasm/cloop.rb | 72 +- Source/JavaScriptCore/offlineasm/config.rb | 25 +- .../offlineasm/generate_offset_extractor.rb | 11 +- Source/JavaScriptCore/offlineasm/instructions.rb | 46 +- Source/JavaScriptCore/offlineasm/mips.rb | 243 +- Source/JavaScriptCore/offlineasm/parser.rb | 167 +- Source/JavaScriptCore/offlineasm/registers.rb | 26 +- Source/JavaScriptCore/offlineasm/risc.rb | 2 +- Source/JavaScriptCore/offlineasm/self_hash.rb | 15 + Source/JavaScriptCore/offlineasm/settings.rb | 59 +- Source/JavaScriptCore/offlineasm/sh4.rb | 1072 -- Source/JavaScriptCore/offlineasm/transform.rb | 12 + Source/JavaScriptCore/offlineasm/x86.rb | 801 +- Source/JavaScriptCore/os-win32/stdbool.h | 45 - Source/JavaScriptCore/parser/ASTBuilder.h | 991 +- Source/JavaScriptCore/parser/Keywords.table | 16 +- Source/JavaScriptCore/parser/Lexer.cpp | 1179 +- Source/JavaScriptCore/parser/Lexer.h | 168 +- Source/JavaScriptCore/parser/ModuleAnalyzer.cpp | 149 + Source/JavaScriptCore/parser/ModuleAnalyzer.h | 53 + Source/JavaScriptCore/parser/ModuleScopeData.h | 66 + Source/JavaScriptCore/parser/NodeConstructors.h | 413 +- Source/JavaScriptCore/parser/NodeInfo.h | 62 - Source/JavaScriptCore/parser/Nodes.cpp | 184 +- Source/JavaScriptCore/parser/Nodes.h | 1290 +- .../JavaScriptCore/parser/NodesAnalyzeModule.cpp | 92 + Source/JavaScriptCore/parser/Parser.cpp | 3867 ++++- Source/JavaScriptCore/parser/Parser.h | 1571 +- Source/JavaScriptCore/parser/ParserArena.cpp | 48 +- Source/JavaScriptCore/parser/ParserArena.h | 40 +- Source/JavaScriptCore/parser/ParserError.h | 112 +- Source/JavaScriptCore/parser/ParserFunctionInfo.h | 52 + Source/JavaScriptCore/parser/ParserModes.h | 235 +- Source/JavaScriptCore/parser/ParserTokens.h | 84 +- Source/JavaScriptCore/parser/ResultType.h | 57 +- Source/JavaScriptCore/parser/SourceCode.cpp | 42 - Source/JavaScriptCore/parser/SourceCode.h | 93 +- Source/JavaScriptCore/parser/SourceCodeKey.h | 133 + Source/JavaScriptCore/parser/SourceProvider.cpp | 15 +- Source/JavaScriptCore/parser/SourceProvider.h | 99 +- .../JavaScriptCore/parser/SourceProviderCache.cpp | 4 +- Source/JavaScriptCore/parser/SourceProviderCache.h | 11 +- .../parser/SourceProviderCacheItem.h | 101 +- Source/JavaScriptCore/parser/SyntaxChecker.h | 301 +- .../JavaScriptCore/parser/UnlinkedSourceCode.cpp | 43 + Source/JavaScriptCore/parser/UnlinkedSourceCode.h | 108 + .../JavaScriptCore/parser/VariableEnvironment.cpp | 99 + Source/JavaScriptCore/parser/VariableEnvironment.h | 117 + Source/JavaScriptCore/postprocess-headers.sh | 18 + Source/JavaScriptCore/profiler/CallIdentifier.h | 101 - Source/JavaScriptCore/profiler/LegacyProfiler.cpp | 184 - Source/JavaScriptCore/profiler/LegacyProfiler.h | 73 - Source/JavaScriptCore/profiler/Profile.cpp | 102 - Source/JavaScriptCore/profiler/Profile.h | 68 - .../JavaScriptCore/profiler/ProfileGenerator.cpp | 219 - Source/JavaScriptCore/profiler/ProfileGenerator.h | 79 - Source/JavaScriptCore/profiler/ProfileNode.cpp | 247 - Source/JavaScriptCore/profiler/ProfileNode.h | 140 - .../JavaScriptCore/profiler/ProfilerBytecode.cpp | 3 +- Source/JavaScriptCore/profiler/ProfilerBytecode.h | 13 +- .../profiler/ProfilerBytecodeSequence.cpp | 30 +- .../profiler/ProfilerBytecodeSequence.h | 6 +- .../JavaScriptCore/profiler/ProfilerBytecodes.cpp | 2 +- Source/JavaScriptCore/profiler/ProfilerBytecodes.h | 6 +- .../profiler/ProfilerCompilation.cpp | 99 +- .../JavaScriptCore/profiler/ProfilerCompilation.h | 25 +- .../profiler/ProfilerCompilationKind.cpp | 8 +- .../profiler/ProfilerCompilationKind.h | 12 +- .../profiler/ProfilerCompiledBytecode.cpp | 2 +- .../profiler/ProfilerCompiledBytecode.h | 6 +- .../JavaScriptCore/profiler/ProfilerDatabase.cpp | 94 +- Source/JavaScriptCore/profiler/ProfilerDatabase.h | 34 +- Source/JavaScriptCore/profiler/ProfilerEvent.cpp | 63 + Source/JavaScriptCore/profiler/ProfilerEvent.h | 74 + .../profiler/ProfilerExecutionCounter.h | 6 +- .../profiler/ProfilerJettisonReason.cpp | 73 + .../profiler/ProfilerJettisonReason.h | 50 + Source/JavaScriptCore/profiler/ProfilerOSRExit.cpp | 2 +- Source/JavaScriptCore/profiler/ProfilerOSRExit.h | 6 +- .../profiler/ProfilerOSRExitSite.cpp | 11 +- .../JavaScriptCore/profiler/ProfilerOSRExitSite.h | 6 +- Source/JavaScriptCore/profiler/ProfilerOrigin.cpp | 2 +- Source/JavaScriptCore/profiler/ProfilerOrigin.h | 6 +- .../profiler/ProfilerOriginStack.cpp | 14 +- .../JavaScriptCore/profiler/ProfilerOriginStack.h | 6 +- .../profiler/ProfilerProfiledBytecodes.cpp | 2 +- .../profiler/ProfilerProfiledBytecodes.h | 6 +- Source/JavaScriptCore/profiler/ProfilerUID.cpp | 56 + Source/JavaScriptCore/profiler/ProfilerUID.h | 110 + Source/JavaScriptCore/replay/EmptyInputCursor.h | 76 + Source/JavaScriptCore/replay/EncodedValue.cpp | 201 + Source/JavaScriptCore/replay/EncodedValue.h | 222 + Source/JavaScriptCore/replay/InputCursor.h | 85 + Source/JavaScriptCore/replay/JSInputs.json | 49 + .../JavaScriptCore/replay/NondeterministicInput.h | 86 + .../replay/scripts/CodeGeneratorReplayInputs.py | 1070 ++ .../scripts/CodeGeneratorReplayInputsTemplates.py | 265 + .../fail-on-c-style-enum-no-storage.json-error | 1 + .../fail-on-duplicate-enum-type.json-error | 1 + .../fail-on-duplicate-input-names.json-error | 1 + .../fail-on-duplicate-type-names.json-error | 1 + .../fail-on-enum-type-missing-values.json-error | 1 + .../fail-on-missing-input-member-name.json-error | 1 + .../expected/fail-on-missing-input-name.json-error | 1 + .../fail-on-missing-input-queue.json-error | 1 + .../expected/fail-on-missing-type-mode.json-error | 1 + .../expected/fail-on-missing-type-name.json-error | 1 + .../fail-on-unknown-input-queue.json-error | 1 + .../fail-on-unknown-member-type.json-error | 1 + .../expected/fail-on-unknown-type-mode.json-error | 1 + ...s-with-guarded-values.json-TestReplayInputs.cpp | 74 + ...ers-with-guarded-values.json-TestReplayInputs.h | 77 + ...enum-encoding-helpers.json-TestReplayInputs.cpp | 228 + ...e-enum-encoding-helpers.json-TestReplayInputs.h | 105 + ...erate-enum-with-guard.json-TestReplayInputs.cpp | 106 + ...enerate-enum-with-guard.json-TestReplayInputs.h | 92 + ...s-with-same-base-name.json-TestReplayInputs.cpp | 144 + ...ums-with-same-base-name.json-TestReplayInputs.h | 95 + .../generate-event-loop-shape-types.json-error | 1 + ...rate-input-with-guard.json-TestReplayInputs.cpp | 108 + ...nerate-input-with-guard.json-TestReplayInputs.h | 106 + ...t-with-vector-members.json-TestReplayInputs.cpp | 118 + ...put-with-vector-members.json-TestReplayInputs.h | 106 + ...ate-inputs-with-flags.json-TestReplayInputs.cpp | 104 + ...erate-inputs-with-flags.json-TestReplayInputs.h | 98 + ...e-memoized-type-modes.json-TestReplayInputs.cpp | 104 + ...ate-memoized-type-modes.json-TestReplayInputs.h | 98 + .../tests/fail-on-c-style-enum-no-storage.json | 25 + .../scripts/tests/fail-on-duplicate-enum-type.json | 35 + .../tests/fail-on-duplicate-input-names.json | 29 + .../tests/fail-on-duplicate-type-names.json | 24 + .../tests/fail-on-enum-type-missing-values.json | 23 + .../tests/fail-on-missing-input-member-name.json | 22 + .../scripts/tests/fail-on-missing-input-name.json | 22 + .../scripts/tests/fail-on-missing-input-queue.json | 22 + .../scripts/tests/fail-on-missing-type-mode.json | 23 + .../scripts/tests/fail-on-missing-type-name.json | 23 + .../scripts/tests/fail-on-unknown-input-queue.json | 23 + .../scripts/tests/fail-on-unknown-member-type.json | 23 + .../scripts/tests/fail-on-unknown-type-mode.json | 23 + ...-enum-encoding-helpers-with-guarded-values.json | 29 + .../tests/generate-enum-encoding-helpers.json | 45 + .../scripts/tests/generate-enum-with-guard.json | 36 + .../tests/generate-enums-with-same-base-name.json | 52 + .../tests/generate-event-loop-shape-types.json | 38 + .../scripts/tests/generate-input-with-guard.json | 30 + .../tests/generate-input-with-vector-members.json | 42 + .../scripts/tests/generate-inputs-with-flags.json | 29 + .../tests/generate-memoized-type-modes.json | 29 + .../runtime/AbstractModuleRecord.cpp | 769 + .../JavaScriptCore/runtime/AbstractModuleRecord.h | 180 + Source/JavaScriptCore/runtime/ArgList.cpp | 73 +- Source/JavaScriptCore/runtime/ArgList.h | 57 +- Source/JavaScriptCore/runtime/Arguments.cpp | 387 - Source/JavaScriptCore/runtime/Arguments.h | 298 - .../runtime/ArgumentsIteratorConstructor.cpp | 45 - .../runtime/ArgumentsIteratorConstructor.h | 63 - .../runtime/ArgumentsIteratorPrototype.cpp | 61 - .../runtime/ArgumentsIteratorPrototype.h | 61 - Source/JavaScriptCore/runtime/ArgumentsMode.h | 35 + Source/JavaScriptCore/runtime/ArityCheckMode.h | 35 + Source/JavaScriptCore/runtime/ArrayBuffer.cpp | 289 +- Source/JavaScriptCore/runtime/ArrayBuffer.h | 232 +- .../runtime/ArrayBufferNeuteringWatchpoint.cpp | 11 +- .../runtime/ArrayBufferNeuteringWatchpoint.h | 19 +- .../runtime/ArrayBufferSharingMode.h | 59 + Source/JavaScriptCore/runtime/ArrayBufferView.cpp | 10 +- Source/JavaScriptCore/runtime/ArrayBufferView.h | 62 +- Source/JavaScriptCore/runtime/ArrayConstructor.cpp | 102 +- Source/JavaScriptCore/runtime/ArrayConstructor.h | 41 +- Source/JavaScriptCore/runtime/ArrayConventions.cpp | 68 + Source/JavaScriptCore/runtime/ArrayConventions.h | 64 +- .../runtime/ArrayIteratorAdaptiveWatchpoint.cpp | 45 + .../runtime/ArrayIteratorAdaptiveWatchpoint.h | 45 + .../runtime/ArrayIteratorConstructor.cpp | 42 - .../runtime/ArrayIteratorConstructor.h | 63 - .../runtime/ArrayIteratorPrototype.cpp | 29 +- .../runtime/ArrayIteratorPrototype.h | 10 +- Source/JavaScriptCore/runtime/ArrayPrototype.cpp | 2051 +-- Source/JavaScriptCore/runtime/ArrayPrototype.h | 38 +- Source/JavaScriptCore/runtime/ArrayStorage.h | 81 +- .../runtime/AsyncFunctionConstructor.cpp | 77 + .../runtime/AsyncFunctionConstructor.h | 59 + .../runtime/AsyncFunctionPrototype.cpp | 57 + .../runtime/AsyncFunctionPrototype.h | 57 + Source/JavaScriptCore/runtime/AtomicsObject.cpp | 396 + Source/JavaScriptCore/runtime/AtomicsObject.h | 50 + Source/JavaScriptCore/runtime/AuxiliaryBarrier.h | 62 + .../runtime/AuxiliaryBarrierInlines.h | 51 + .../JavaScriptCore/runtime/BasicBlockLocation.cpp | 100 + Source/JavaScriptCore/runtime/BasicBlockLocation.h | 70 + .../runtime/BatchedTransitionOptimizer.h | 23 +- Source/JavaScriptCore/runtime/BigInteger.h | 8 +- .../JavaScriptCore/runtime/BooleanConstructor.cpp | 27 +- Source/JavaScriptCore/runtime/BooleanConstructor.h | 11 +- Source/JavaScriptCore/runtime/BooleanObject.cpp | 6 +- Source/JavaScriptCore/runtime/BooleanObject.h | 7 +- Source/JavaScriptCore/runtime/BooleanPrototype.cpp | 38 +- Source/JavaScriptCore/runtime/BooleanPrototype.h | 8 +- Source/JavaScriptCore/runtime/BundlePath.h | 35 + Source/JavaScriptCore/runtime/Butterfly.h | 21 +- Source/JavaScriptCore/runtime/ButterflyInlines.h | 125 +- Source/JavaScriptCore/runtime/CallData.cpp | 34 +- Source/JavaScriptCore/runtime/CallData.h | 45 +- Source/JavaScriptCore/runtime/CatchScope.cpp | 47 + Source/JavaScriptCore/runtime/CatchScope.h | 71 + Source/JavaScriptCore/runtime/ClassInfo.h | 101 +- Source/JavaScriptCore/runtime/ClonedArguments.cpp | 280 + Source/JavaScriptCore/runtime/ClonedArguments.h | 80 + Source/JavaScriptCore/runtime/CodeCache.cpp | 184 +- Source/JavaScriptCore/runtime/CodeCache.h | 195 +- .../runtime/CodeSpecializationKind.h | 6 +- .../JavaScriptCore/runtime/CommonIdentifiers.cpp | 57 +- Source/JavaScriptCore/runtime/CommonIdentifiers.h | 219 +- Source/JavaScriptCore/runtime/CommonSlowPaths.cpp | 789 +- Source/JavaScriptCore/runtime/CommonSlowPaths.h | 235 +- .../runtime/CommonSlowPathsExceptions.cpp | 14 +- .../runtime/CommonSlowPathsExceptions.h | 10 +- Source/JavaScriptCore/runtime/CompilationResult.h | 6 +- Source/JavaScriptCore/runtime/Completion.cpp | 206 +- Source/JavaScriptCore/runtime/Completion.h | 56 +- Source/JavaScriptCore/runtime/ConcurrentJITLock.h | 123 - Source/JavaScriptCore/runtime/ConcurrentJSLock.h | 134 + Source/JavaScriptCore/runtime/ConsoleClient.cpp | 251 + Source/JavaScriptCore/runtime/ConsoleClient.h | 71 + Source/JavaScriptCore/runtime/ConsoleObject.cpp | 370 + Source/JavaScriptCore/runtime/ConsoleObject.h | 57 + Source/JavaScriptCore/runtime/ConsoleTypes.h | 72 + Source/JavaScriptCore/runtime/ConstantMode.cpp | 46 + Source/JavaScriptCore/runtime/ConstantMode.h | 17 +- Source/JavaScriptCore/runtime/ConstructAbility.h | 35 + Source/JavaScriptCore/runtime/ConstructData.cpp | 33 +- Source/JavaScriptCore/runtime/ConstructData.h | 47 +- .../JavaScriptCore/runtime/ControlFlowProfiler.cpp | 137 + .../JavaScriptCore/runtime/ControlFlowProfiler.h | 112 + .../JavaScriptCore/runtime/CustomGetterSetter.cpp | 63 + Source/JavaScriptCore/runtime/CustomGetterSetter.h | 81 + Source/JavaScriptCore/runtime/DataView.cpp | 21 +- Source/JavaScriptCore/runtime/DataView.h | 19 +- Source/JavaScriptCore/runtime/DateConstructor.cpp | 180 +- Source/JavaScriptCore/runtime/DateConstructor.h | 58 +- Source/JavaScriptCore/runtime/DateConversion.cpp | 3 +- Source/JavaScriptCore/runtime/DateConversion.h | 7 +- Source/JavaScriptCore/runtime/DateInstance.cpp | 8 +- Source/JavaScriptCore/runtime/DateInstance.h | 125 +- Source/JavaScriptCore/runtime/DateInstanceCache.h | 94 +- Source/JavaScriptCore/runtime/DatePrototype.cpp | 476 +- Source/JavaScriptCore/runtime/DatePrototype.h | 50 +- .../runtime/DefinePropertyAttributes.h | 164 + Source/JavaScriptCore/runtime/DirectArguments.cpp | 166 + Source/JavaScriptCore/runtime/DirectArguments.h | 170 + .../runtime/DirectArgumentsOffset.cpp | 42 + .../JavaScriptCore/runtime/DirectArgumentsOffset.h | 49 + .../runtime/DirectEvalExecutable.cpp | 78 + .../JavaScriptCore/runtime/DirectEvalExecutable.h | 39 + Source/JavaScriptCore/runtime/DumpContext.cpp | 4 +- Source/JavaScriptCore/runtime/DumpContext.h | 9 +- .../runtime/ECMAScriptSpecInternalFunctions.cpp | 42 + .../runtime/ECMAScriptSpecInternalFunctions.h | 34 + Source/JavaScriptCore/runtime/EnumerationMode.h | 77 + Source/JavaScriptCore/runtime/Error.cpp | 275 +- Source/JavaScriptCore/runtime/Error.h | 235 +- Source/JavaScriptCore/runtime/ErrorConstructor.cpp | 36 +- Source/JavaScriptCore/runtime/ErrorConstructor.h | 50 +- .../JavaScriptCore/runtime/ErrorHandlingScope.cpp | 48 + Source/JavaScriptCore/runtime/ErrorHandlingScope.h | 41 + Source/JavaScriptCore/runtime/ErrorInstance.cpp | 198 +- Source/JavaScriptCore/runtime/ErrorInstance.h | 72 +- Source/JavaScriptCore/runtime/ErrorPrototype.cpp | 47 +- Source/JavaScriptCore/runtime/ErrorPrototype.h | 53 +- Source/JavaScriptCore/runtime/EvalExecutable.cpp | 57 + Source/JavaScriptCore/runtime/EvalExecutable.h | 76 + Source/JavaScriptCore/runtime/Exception.cpp | 84 + Source/JavaScriptCore/runtime/Exception.h | 78 + .../runtime/ExceptionEventLocation.cpp | 39 + .../runtime/ExceptionEventLocation.h | 51 + Source/JavaScriptCore/runtime/ExceptionFuzz.cpp | 67 + Source/JavaScriptCore/runtime/ExceptionFuzz.h | 46 + Source/JavaScriptCore/runtime/ExceptionHelpers.cpp | 270 +- Source/JavaScriptCore/runtime/ExceptionHelpers.h | 53 +- Source/JavaScriptCore/runtime/ExceptionScope.cpp | 52 + Source/JavaScriptCore/runtime/ExceptionScope.h | 78 + Source/JavaScriptCore/runtime/Executable.cpp | 635 - Source/JavaScriptCore/runtime/Executable.h | 673 - Source/JavaScriptCore/runtime/ExecutableBase.cpp | 155 + Source/JavaScriptCore/runtime/ExecutableBase.h | 240 + Source/JavaScriptCore/runtime/Float32Array.h | 6 +- Source/JavaScriptCore/runtime/Float64Array.h | 6 +- .../JavaScriptCore/runtime/FunctionConstructor.cpp | 120 +- .../JavaScriptCore/runtime/FunctionConstructor.h | 62 +- .../JavaScriptCore/runtime/FunctionExecutable.cpp | 109 + Source/JavaScriptCore/runtime/FunctionExecutable.h | 189 + .../runtime/FunctionExecutableDump.cpp | 8 +- .../runtime/FunctionExecutableDump.h | 9 +- .../runtime/FunctionHasExecutedCache.cpp | 100 + .../runtime/FunctionHasExecutedCache.h | 62 + .../JavaScriptCore/runtime/FunctionPrototype.cpp | 206 +- Source/JavaScriptCore/runtime/FunctionPrototype.h | 61 +- Source/JavaScriptCore/runtime/FunctionRareData.cpp | 94 + Source/JavaScriptCore/runtime/FunctionRareData.h | 129 + .../JavaScriptCore/runtime/GCActivityCallback.cpp | 191 - Source/JavaScriptCore/runtime/GCActivityCallback.h | 112 - .../runtime/GeneratorFunctionConstructor.cpp | 77 + .../runtime/GeneratorFunctionConstructor.h | 65 + .../runtime/GeneratorFunctionPrototype.cpp | 59 + .../runtime/GeneratorFunctionPrototype.h | 59 + .../JavaScriptCore/runtime/GeneratorPrototype.cpp | 55 + Source/JavaScriptCore/runtime/GeneratorPrototype.h | 61 + Source/JavaScriptCore/runtime/GenericArguments.h | 66 + .../runtime/GenericArgumentsInlines.h | 316 + Source/JavaScriptCore/runtime/GenericOffset.h | 109 + .../JavaScriptCore/runtime/GenericTypedArrayView.h | 26 +- .../runtime/GenericTypedArrayViewInlines.h | 54 +- Source/JavaScriptCore/runtime/GetPutInfo.h | 234 + Source/JavaScriptCore/runtime/GetterSetter.cpp | 69 +- Source/JavaScriptCore/runtime/GetterSetter.h | 172 +- .../JavaScriptCore/runtime/HasOwnPropertyCache.h | 153 + Source/JavaScriptCore/runtime/HashMapImpl.cpp | 94 + Source/JavaScriptCore/runtime/HashMapImpl.h | 638 + Source/JavaScriptCore/runtime/Identifier.cpp | 127 +- Source/JavaScriptCore/runtime/Identifier.h | 495 +- Source/JavaScriptCore/runtime/IdentifierInlines.h | 151 + Source/JavaScriptCore/runtime/IndexingHeader.h | 18 +- .../JavaScriptCore/runtime/IndexingHeaderInlines.h | 8 +- Source/JavaScriptCore/runtime/IndexingType.h | 78 +- .../runtime/IndirectEvalExecutable.cpp | 77 + .../runtime/IndirectEvalExecutable.h | 39 + Source/JavaScriptCore/runtime/InferredType.cpp | 613 + Source/JavaScriptCore/runtime/InferredType.h | 289 + .../JavaScriptCore/runtime/InferredTypeTable.cpp | 184 + Source/JavaScriptCore/runtime/InferredTypeTable.h | 109 + Source/JavaScriptCore/runtime/InferredValue.cpp | 139 + Source/JavaScriptCore/runtime/InferredValue.h | 134 + .../JavaScriptCore/runtime/InitializeThreading.cpp | 22 +- .../JavaScriptCore/runtime/InitializeThreading.h | 15 +- .../runtime/InspectorInstrumentationObject.cpp | 95 + .../runtime/InspectorInstrumentationObject.h | 62 + Source/JavaScriptCore/runtime/Int16Array.h | 6 +- Source/JavaScriptCore/runtime/Int32Array.h | 6 +- Source/JavaScriptCore/runtime/Int8Array.h | 6 +- .../runtime/IntegralTypedArrayBase.h | 63 - .../runtime/IntendedStructureChain.cpp | 141 - .../runtime/IntendedStructureChain.h | 70 - Source/JavaScriptCore/runtime/InternalFunction.cpp | 80 +- Source/JavaScriptCore/runtime/InternalFunction.h | 59 +- Source/JavaScriptCore/runtime/IntlCollator.cpp | 454 + Source/JavaScriptCore/runtime/IntlCollator.h | 86 + .../runtime/IntlCollatorConstructor.cpp | 168 + .../runtime/IntlCollatorConstructor.h | 63 + .../runtime/IntlCollatorPrototype.cpp | 154 + .../JavaScriptCore/runtime/IntlCollatorPrototype.h | 54 + .../JavaScriptCore/runtime/IntlDateTimeFormat.cpp | 896 + Source/JavaScriptCore/runtime/IntlDateTimeFormat.h | 108 + .../runtime/IntlDateTimeFormatConstructor.cpp | 169 + .../runtime/IntlDateTimeFormatConstructor.h | 63 + .../runtime/IntlDateTimeFormatPrototype.cpp | 175 + .../runtime/IntlDateTimeFormatPrototype.h | 54 + Source/JavaScriptCore/runtime/IntlNumberFormat.cpp | 519 + Source/JavaScriptCore/runtime/IntlNumberFormat.h | 90 + .../runtime/IntlNumberFormatConstructor.cpp | 169 + .../runtime/IntlNumberFormatConstructor.h | 63 + .../runtime/IntlNumberFormatPrototype.cpp | 163 + .../runtime/IntlNumberFormatPrototype.h | 54 + Source/JavaScriptCore/runtime/IntlObject.cpp | 1057 ++ Source/JavaScriptCore/runtime/IntlObject.h | 73 + Source/JavaScriptCore/runtime/IntlObjectInlines.h | 66 + Source/JavaScriptCore/runtime/Intrinsic.h | 57 +- Source/JavaScriptCore/runtime/IterationKind.h | 37 + Source/JavaScriptCore/runtime/IterationStatus.h | 35 + .../JavaScriptCore/runtime/IteratorOperations.cpp | 206 + Source/JavaScriptCore/runtime/IteratorOperations.h | 73 + .../JavaScriptCore/runtime/IteratorPrototype.cpp | 48 + Source/JavaScriptCore/runtime/IteratorPrototype.h | 59 + .../JavaScriptCore/runtime/JSAPIValueWrapper.cpp | 2 +- Source/JavaScriptCore/runtime/JSAPIValueWrapper.h | 76 +- Source/JavaScriptCore/runtime/JSActivation.cpp | 234 - Source/JavaScriptCore/runtime/JSActivation.h | 208 - .../JavaScriptCore/runtime/JSArgumentsIterator.cpp | 41 - .../JavaScriptCore/runtime/JSArgumentsIterator.h | 79 - Source/JavaScriptCore/runtime/JSArray.cpp | 1337 +- Source/JavaScriptCore/runtime/JSArray.h | 233 +- Source/JavaScriptCore/runtime/JSArrayBuffer.cpp | 79 +- Source/JavaScriptCore/runtime/JSArrayBuffer.h | 54 +- .../runtime/JSArrayBufferConstructor.cpp | 68 +- .../runtime/JSArrayBufferConstructor.h | 21 +- .../runtime/JSArrayBufferPrototype.cpp | 83 +- .../runtime/JSArrayBufferPrototype.h | 16 +- .../JavaScriptCore/runtime/JSArrayBufferView.cpp | 168 +- Source/JavaScriptCore/runtime/JSArrayBufferView.h | 55 +- .../runtime/JSArrayBufferViewInlines.h | 48 +- Source/JavaScriptCore/runtime/JSArrayInlines.h | 101 + Source/JavaScriptCore/runtime/JSArrayIterator.cpp | 169 - Source/JavaScriptCore/runtime/JSArrayIterator.h | 93 - Source/JavaScriptCore/runtime/JSAsyncFunction.cpp | 74 + Source/JavaScriptCore/runtime/JSAsyncFunction.h | 64 + Source/JavaScriptCore/runtime/JSBoundFunction.cpp | 158 +- Source/JavaScriptCore/runtime/JSBoundFunction.h | 31 +- Source/JavaScriptCore/runtime/JSCInlines.h | 54 + Source/JavaScriptCore/runtime/JSCJSValue.cpp | 282 +- Source/JavaScriptCore/runtime/JSCJSValue.h | 194 +- Source/JavaScriptCore/runtime/JSCJSValueInlines.h | 380 +- Source/JavaScriptCore/runtime/JSCallee.cpp | 66 + Source/JavaScriptCore/runtime/JSCallee.h | 105 + Source/JavaScriptCore/runtime/JSCell.cpp | 131 +- Source/JavaScriptCore/runtime/JSCell.h | 211 +- Source/JavaScriptCore/runtime/JSCellInlines.h | 265 +- .../runtime/JSCustomGetterSetterFunction.cpp | 96 + .../runtime/JSCustomGetterSetterFunction.h | 71 + Source/JavaScriptCore/runtime/JSDataView.cpp | 116 +- Source/JavaScriptCore/runtime/JSDataView.h | 32 +- .../JavaScriptCore/runtime/JSDataViewPrototype.cpp | 264 +- .../JavaScriptCore/runtime/JSDataViewPrototype.h | 14 +- Source/JavaScriptCore/runtime/JSDateMath.cpp | 5 +- Source/JavaScriptCore/runtime/JSDateMath.h | 15 +- .../JavaScriptCore/runtime/JSDestructibleObject.h | 47 +- .../runtime/JSDestructibleObjectSubspace.cpp | 66 + .../runtime/JSDestructibleObjectSubspace.h | 42 + .../JavaScriptCore/runtime/JSEnvironmentRecord.cpp | 67 + .../JavaScriptCore/runtime/JSEnvironmentRecord.h | 114 + Source/JavaScriptCore/runtime/JSExportMacros.h | 12 +- Source/JavaScriptCore/runtime/JSFixedArray.cpp | 43 + Source/JavaScriptCore/runtime/JSFixedArray.h | 140 + Source/JavaScriptCore/runtime/JSFloat32Array.h | 6 +- Source/JavaScriptCore/runtime/JSFloat64Array.h | 6 +- Source/JavaScriptCore/runtime/JSFunction.cpp | 636 +- Source/JavaScriptCore/runtime/JSFunction.h | 326 +- Source/JavaScriptCore/runtime/JSFunctionInlines.h | 65 +- .../JavaScriptCore/runtime/JSGeneratorFunction.cpp | 72 + .../JavaScriptCore/runtime/JSGeneratorFunction.h | 95 + .../runtime/JSGenericTypedArrayView.h | 188 +- .../runtime/JSGenericTypedArrayViewConstructor.h | 24 +- .../JSGenericTypedArrayViewConstructorInlines.h | 253 +- .../runtime/JSGenericTypedArrayViewInlines.h | 399 +- .../runtime/JSGenericTypedArrayViewPrototype.h | 20 +- .../JSGenericTypedArrayViewPrototypeFunctions.h | 579 + .../JSGenericTypedArrayViewPrototypeInlines.h | 110 +- .../runtime/JSGlobalLexicalEnvironment.cpp | 71 + .../runtime/JSGlobalLexicalEnvironment.h | 74 + Source/JavaScriptCore/runtime/JSGlobalObject.cpp | 1443 +- Source/JavaScriptCore/runtime/JSGlobalObject.h | 718 +- .../runtime/JSGlobalObjectDebuggable.cpp | 88 + .../runtime/JSGlobalObjectDebuggable.h | 71 + .../runtime/JSGlobalObjectFunctions.cpp | 766 +- .../runtime/JSGlobalObjectFunctions.h | 20 +- .../JavaScriptCore/runtime/JSGlobalObjectInlines.h | 69 + Source/JavaScriptCore/runtime/JSInt16Array.h | 6 +- Source/JavaScriptCore/runtime/JSInt32Array.h | 6 +- Source/JavaScriptCore/runtime/JSInt8Array.h | 6 +- .../JavaScriptCore/runtime/JSInternalPromise.cpp | 67 + Source/JavaScriptCore/runtime/JSInternalPromise.h | 57 + .../runtime/JSInternalPromiseConstructor.cpp | 86 + .../runtime/JSInternalPromiseConstructor.h | 51 + .../runtime/JSInternalPromiseDeferred.cpp | 78 + .../runtime/JSInternalPromiseDeferred.h | 56 + .../runtime/JSInternalPromisePrototype.cpp | 59 + .../runtime/JSInternalPromisePrototype.h | 46 + Source/JavaScriptCore/runtime/JSJob.cpp | 79 + Source/JavaScriptCore/runtime/JSJob.h | 38 + .../runtime/JSLexicalEnvironment.cpp | 116 + .../JavaScriptCore/runtime/JSLexicalEnvironment.h | 88 + Source/JavaScriptCore/runtime/JSLock.cpp | 336 +- Source/JavaScriptCore/runtime/JSLock.h | 203 +- Source/JavaScriptCore/runtime/JSMap.cpp | 21 +- Source/JavaScriptCore/runtime/JSMap.h | 40 +- Source/JavaScriptCore/runtime/JSMapIterator.cpp | 27 +- Source/JavaScriptCore/runtime/JSMapIterator.h | 89 +- .../JavaScriptCore/runtime/JSModuleEnvironment.cpp | 139 + .../JavaScriptCore/runtime/JSModuleEnvironment.h | 100 + Source/JavaScriptCore/runtime/JSModuleLoader.cpp | 256 + Source/JavaScriptCore/runtime/JSModuleLoader.h | 88 + .../runtime/JSModuleNamespaceObject.cpp | 221 + .../runtime/JSModuleNamespaceObject.h | 113 + Source/JavaScriptCore/runtime/JSModuleRecord.cpp | 213 + Source/JavaScriptCore/runtime/JSModuleRecord.h | 71 + Source/JavaScriptCore/runtime/JSNameScope.cpp | 83 - Source/JavaScriptCore/runtime/JSNameScope.h | 90 - .../JavaScriptCore/runtime/JSNativeStdFunction.cpp | 77 + .../JavaScriptCore/runtime/JSNativeStdFunction.h | 66 + Source/JavaScriptCore/runtime/JSNotAnObject.cpp | 88 - Source/JavaScriptCore/runtime/JSNotAnObject.h | 85 - Source/JavaScriptCore/runtime/JSONObject.cpp | 464 +- Source/JavaScriptCore/runtime/JSONObject.h | 55 +- Source/JavaScriptCore/runtime/JSObject.cpp | 2780 ++-- Source/JavaScriptCore/runtime/JSObject.h | 1196 +- Source/JavaScriptCore/runtime/JSObjectInlines.h | 363 + Source/JavaScriptCore/runtime/JSPromise.cpp | 137 +- Source/JavaScriptCore/runtime/JSPromise.h | 68 +- .../runtime/JSPromiseConstructor.cpp | 518 +- .../JavaScriptCore/runtime/JSPromiseConstructor.h | 25 +- .../JavaScriptCore/runtime/JSPromiseDeferred.cpp | 225 +- Source/JavaScriptCore/runtime/JSPromiseDeferred.h | 36 +- .../JavaScriptCore/runtime/JSPromiseFunctions.cpp | 274 - Source/JavaScriptCore/runtime/JSPromiseFunctions.h | 47 - .../JavaScriptCore/runtime/JSPromisePrototype.cpp | 161 +- Source/JavaScriptCore/runtime/JSPromisePrototype.h | 17 +- .../JavaScriptCore/runtime/JSPromiseReaction.cpp | 158 - Source/JavaScriptCore/runtime/JSPromiseReaction.h | 68 - .../runtime/JSPropertyNameEnumerator.cpp | 98 + .../runtime/JSPropertyNameEnumerator.h | 145 + .../runtime/JSPropertyNameIterator.cpp | 197 +- .../runtime/JSPropertyNameIterator.h | 152 +- Source/JavaScriptCore/runtime/JSProxy.cpp | 89 +- Source/JavaScriptCore/runtime/JSProxy.h | 36 +- Source/JavaScriptCore/runtime/JSScope.cpp | 294 +- Source/JavaScriptCore/runtime/JSScope.h | 165 +- Source/JavaScriptCore/runtime/JSScriptFetcher.cpp | 40 + Source/JavaScriptCore/runtime/JSScriptFetcher.h | 77 + .../runtime/JSSegmentedVariableObject.cpp | 89 +- .../runtime/JSSegmentedVariableObject.h | 76 +- .../runtime/JSSegmentedVariableObjectSubspace.cpp | 66 + .../runtime/JSSegmentedVariableObjectSubspace.h | 42 + Source/JavaScriptCore/runtime/JSSet.cpp | 20 +- Source/JavaScriptCore/runtime/JSSet.h | 39 +- Source/JavaScriptCore/runtime/JSSetIterator.cpp | 27 +- Source/JavaScriptCore/runtime/JSSetIterator.h | 84 +- Source/JavaScriptCore/runtime/JSSourceCode.cpp | 40 + Source/JavaScriptCore/runtime/JSSourceCode.h | 77 + Source/JavaScriptCore/runtime/JSString.cpp | 379 +- Source/JavaScriptCore/runtime/JSString.h | 1075 +- Source/JavaScriptCore/runtime/JSStringBuilder.h | 158 +- Source/JavaScriptCore/runtime/JSStringInlines.h | 39 + Source/JavaScriptCore/runtime/JSStringIterator.cpp | 61 + Source/JavaScriptCore/runtime/JSStringIterator.h | 63 + Source/JavaScriptCore/runtime/JSStringJoiner.cpp | 148 +- Source/JavaScriptCore/runtime/JSStringJoiner.h | 131 +- Source/JavaScriptCore/runtime/JSStringSubspace.cpp | 66 + Source/JavaScriptCore/runtime/JSStringSubspace.h | 42 + .../JavaScriptCore/runtime/JSSymbolTableObject.cpp | 37 +- .../JavaScriptCore/runtime/JSSymbolTableObject.h | 172 +- .../runtime/JSTemplateRegistryKey.cpp | 56 + .../JavaScriptCore/runtime/JSTemplateRegistryKey.h | 58 + Source/JavaScriptCore/runtime/JSType.h | 72 +- Source/JavaScriptCore/runtime/JSTypeInfo.h | 172 +- .../runtime/JSTypedArrayConstructors.cpp | 4 +- .../runtime/JSTypedArrayConstructors.h | 5 +- .../runtime/JSTypedArrayPrototypes.cpp | 7 +- .../runtime/JSTypedArrayPrototypes.h | 6 +- .../runtime/JSTypedArrayViewConstructor.cpp | 85 + .../runtime/JSTypedArrayViewConstructor.h | 59 + .../runtime/JSTypedArrayViewPrototype.cpp | 356 + .../runtime/JSTypedArrayViewPrototype.h | 54 + Source/JavaScriptCore/runtime/JSTypedArrays.cpp | 11 +- Source/JavaScriptCore/runtime/JSTypedArrays.h | 8 +- Source/JavaScriptCore/runtime/JSUint16Array.h | 6 +- Source/JavaScriptCore/runtime/JSUint32Array.h | 6 +- Source/JavaScriptCore/runtime/JSUint8Array.h | 6 +- .../JavaScriptCore/runtime/JSUint8ClampedArray.h | 6 +- Source/JavaScriptCore/runtime/JSVariableObject.cpp | 38 - Source/JavaScriptCore/runtime/JSVariableObject.h | 76 - Source/JavaScriptCore/runtime/JSWeakMap.cpp | 14 +- Source/JavaScriptCore/runtime/JSWeakMap.h | 10 +- Source/JavaScriptCore/runtime/JSWeakSet.cpp | 54 + Source/JavaScriptCore/runtime/JSWeakSet.h | 79 + Source/JavaScriptCore/runtime/JSWithScope.cpp | 31 +- Source/JavaScriptCore/runtime/JSWithScope.h | 49 +- Source/JavaScriptCore/runtime/JSWrapperObject.cpp | 7 +- Source/JavaScriptCore/runtime/JSWrapperObject.h | 92 +- .../JavaScriptCore/runtime/LazyClassStructure.cpp | 102 + Source/JavaScriptCore/runtime/LazyClassStructure.h | 125 + .../runtime/LazyClassStructureInlines.h | 46 + Source/JavaScriptCore/runtime/LazyProperty.h | 118 + .../JavaScriptCore/runtime/LazyPropertyInlines.h | 104 + Source/JavaScriptCore/runtime/LiteralParser.cpp | 217 +- Source/JavaScriptCore/runtime/LiteralParser.h | 64 +- Source/JavaScriptCore/runtime/Lookup.cpp | 83 +- Source/JavaScriptCore/runtime/Lookup.h | 555 +- Source/JavaScriptCore/runtime/MapBase.cpp | 50 + Source/JavaScriptCore/runtime/MapBase.h | 94 + Source/JavaScriptCore/runtime/MapConstructor.cpp | 82 +- Source/JavaScriptCore/runtime/MapConstructor.h | 14 +- Source/JavaScriptCore/runtime/MapData.cpp | 256 - Source/JavaScriptCore/runtime/MapData.h | 236 - .../runtime/MapIteratorConstructor.cpp | 45 - .../runtime/MapIteratorConstructor.h | 63 - .../runtime/MapIteratorPrototype.cpp | 40 +- .../JavaScriptCore/runtime/MapIteratorPrototype.h | 7 +- Source/JavaScriptCore/runtime/MapPrototype.cpp | 204 +- Source/JavaScriptCore/runtime/MapPrototype.h | 10 +- Source/JavaScriptCore/runtime/MatchResult.cpp | 40 + Source/JavaScriptCore/runtime/MatchResult.h | 24 +- Source/JavaScriptCore/runtime/MathCommon.cpp | 524 + Source/JavaScriptCore/runtime/MathCommon.h | 212 + Source/JavaScriptCore/runtime/MathObject.cpp | 633 +- Source/JavaScriptCore/runtime/MathObject.h | 48 +- Source/JavaScriptCore/runtime/MemoryStatistics.cpp | 48 + Source/JavaScriptCore/runtime/MemoryStatistics.h | 10 +- Source/JavaScriptCore/runtime/Microtask.h | 5 +- .../runtime/ModuleLoaderPrototype.cpp | 248 + .../JavaScriptCore/runtime/ModuleLoaderPrototype.h | 57 + .../runtime/ModuleProgramExecutable.cpp | 100 + .../runtime/ModuleProgramExecutable.h | 78 + Source/JavaScriptCore/runtime/NameConstructor.cpp | 69 - Source/JavaScriptCore/runtime/NameConstructor.h | 65 - Source/JavaScriptCore/runtime/NameInstance.cpp | 47 - Source/JavaScriptCore/runtime/NameInstance.h | 77 - Source/JavaScriptCore/runtime/NamePrototype.cpp | 81 - Source/JavaScriptCore/runtime/NamePrototype.h | 64 - .../runtime/NativeErrorConstructor.cpp | 50 +- .../runtime/NativeErrorConstructor.h | 72 +- .../runtime/NativeErrorPrototype.cpp | 8 +- .../JavaScriptCore/runtime/NativeErrorPrototype.h | 46 +- Source/JavaScriptCore/runtime/NativeExecutable.cpp | 88 + Source/JavaScriptCore/runtime/NativeExecutable.h | 96 + .../runtime/NativeStdFunctionCell.cpp | 55 + .../JavaScriptCore/runtime/NativeStdFunctionCell.h | 58 + .../JavaScriptCore/runtime/NullGetterFunction.cpp | 52 + Source/JavaScriptCore/runtime/NullGetterFunction.h | 59 + .../JavaScriptCore/runtime/NullSetterFunction.cpp | 94 + Source/JavaScriptCore/runtime/NullSetterFunction.h | 59 + .../JavaScriptCore/runtime/NumberConstructor.cpp | 110 +- Source/JavaScriptCore/runtime/NumberConstructor.h | 57 +- Source/JavaScriptCore/runtime/NumberObject.cpp | 6 +- Source/JavaScriptCore/runtime/NumberObject.h | 43 +- Source/JavaScriptCore/runtime/NumberPrototype.cpp | 237 +- Source/JavaScriptCore/runtime/NumberPrototype.h | 61 +- Source/JavaScriptCore/runtime/NumericStrings.h | 109 +- .../JavaScriptCore/runtime/ObjectConstructor.cpp | 743 +- Source/JavaScriptCore/runtime/ObjectConstructor.h | 138 +- Source/JavaScriptCore/runtime/ObjectPrototype.cpp | 255 +- Source/JavaScriptCore/runtime/ObjectPrototype.h | 36 +- Source/JavaScriptCore/runtime/Operations.cpp | 79 +- Source/JavaScriptCore/runtime/Operations.h | 200 +- Source/JavaScriptCore/runtime/Options.cpp | 755 +- Source/JavaScriptCore/runtime/Options.h | 678 +- Source/JavaScriptCore/runtime/ParseInt.h | 227 + Source/JavaScriptCore/runtime/PrivateName.h | 39 +- .../JavaScriptCore/runtime/ProgramExecutable.cpp | 214 + Source/JavaScriptCore/runtime/ProgramExecutable.h | 82 + .../JavaScriptCore/runtime/PropertyDescriptor.cpp | 55 +- Source/JavaScriptCore/runtime/PropertyDescriptor.h | 150 +- .../JavaScriptCore/runtime/PropertyMapHashTable.h | 165 +- Source/JavaScriptCore/runtime/PropertyName.h | 104 +- .../JavaScriptCore/runtime/PropertyNameArray.cpp | 55 - Source/JavaScriptCore/runtime/PropertyNameArray.h | 205 +- Source/JavaScriptCore/runtime/PropertyOffset.h | 6 +- Source/JavaScriptCore/runtime/PropertySlot.cpp | 33 + Source/JavaScriptCore/runtime/PropertySlot.h | 229 +- Source/JavaScriptCore/runtime/PropertyStorage.h | 6 +- Source/JavaScriptCore/runtime/PropertyTable.cpp | 50 +- Source/JavaScriptCore/runtime/Protect.h | 72 +- Source/JavaScriptCore/runtime/PrototypeMap.cpp | 41 +- Source/JavaScriptCore/runtime/PrototypeMap.h | 43 +- .../JavaScriptCore/runtime/PrototypeMapInlines.h | 46 + Source/JavaScriptCore/runtime/ProxyConstructor.cpp | 124 + Source/JavaScriptCore/runtime/ProxyConstructor.h | 56 + Source/JavaScriptCore/runtime/ProxyObject.cpp | 1164 ++ Source/JavaScriptCore/runtime/ProxyObject.h | 116 + Source/JavaScriptCore/runtime/ProxyRevoke.cpp | 87 + Source/JavaScriptCore/runtime/ProxyRevoke.h | 60 + Source/JavaScriptCore/runtime/PureNaN.h | 95 + Source/JavaScriptCore/runtime/PutDirectIndexMode.h | 6 +- Source/JavaScriptCore/runtime/PutPropertySlot.h | 185 +- Source/JavaScriptCore/runtime/ReflectObject.cpp | 314 + Source/JavaScriptCore/runtime/ReflectObject.h | 58 + Source/JavaScriptCore/runtime/RegExp.cpp | 297 +- Source/JavaScriptCore/runtime/RegExp.h | 153 +- Source/JavaScriptCore/runtime/RegExpCache.cpp | 7 +- Source/JavaScriptCore/runtime/RegExpCache.h | 18 +- .../JavaScriptCore/runtime/RegExpCachedResult.cpp | 52 +- Source/JavaScriptCore/runtime/RegExpCachedResult.h | 102 +- .../JavaScriptCore/runtime/RegExpConstructor.cpp | 284 +- Source/JavaScriptCore/runtime/RegExpConstructor.h | 178 +- Source/JavaScriptCore/runtime/RegExpInlines.h | 234 + Source/JavaScriptCore/runtime/RegExpKey.h | 13 +- .../JavaScriptCore/runtime/RegExpMatchesArray.cpp | 127 +- Source/JavaScriptCore/runtime/RegExpMatchesArray.h | 207 +- Source/JavaScriptCore/runtime/RegExpObject.cpp | 345 +- Source/JavaScriptCore/runtime/RegExpObject.h | 150 +- .../JavaScriptCore/runtime/RegExpObjectInlines.h | 143 + Source/JavaScriptCore/runtime/RegExpPrototype.cpp | 693 +- Source/JavaScriptCore/runtime/RegExpPrototype.h | 62 +- Source/JavaScriptCore/runtime/Reject.h | 44 - Source/JavaScriptCore/runtime/RuntimeFlags.h | 97 + Source/JavaScriptCore/runtime/RuntimeType.cpp | 87 + Source/JavaScriptCore/runtime/RuntimeType.h | 59 + Source/JavaScriptCore/runtime/SamplingCounter.cpp | 2 +- Source/JavaScriptCore/runtime/SamplingCounter.h | 9 +- Source/JavaScriptCore/runtime/SamplingProfiler.cpp | 1096 ++ Source/JavaScriptCore/runtime/SamplingProfiler.h | 216 + Source/JavaScriptCore/runtime/ScopeOffset.cpp | 42 + Source/JavaScriptCore/runtime/ScopeOffset.h | 47 + Source/JavaScriptCore/runtime/ScopedArguments.cpp | 156 + Source/JavaScriptCore/runtime/ScopedArguments.h | 168 + .../runtime/ScopedArgumentsTable.cpp | 109 + .../JavaScriptCore/runtime/ScopedArgumentsTable.h | 92 + Source/JavaScriptCore/runtime/ScriptExecutable.cpp | 334 + Source/JavaScriptCore/runtime/ScriptExecutable.h | 143 + Source/JavaScriptCore/runtime/ScriptFetcher.h | 37 + Source/JavaScriptCore/runtime/SetConstructor.cpp | 75 +- Source/JavaScriptCore/runtime/SetConstructor.h | 14 +- .../runtime/SetIteratorConstructor.cpp | 45 - .../runtime/SetIteratorConstructor.h | 63 - .../runtime/SetIteratorPrototype.cpp | 39 +- .../JavaScriptCore/runtime/SetIteratorPrototype.h | 7 +- Source/JavaScriptCore/runtime/SetPrototype.cpp | 177 +- Source/JavaScriptCore/runtime/SetPrototype.h | 10 +- .../runtime/SimpleTypedArrayController.cpp | 29 +- .../runtime/SimpleTypedArrayController.h | 24 +- Source/JavaScriptCore/runtime/SlowPathReturnType.h | 83 + Source/JavaScriptCore/runtime/SmallStrings.cpp | 46 +- Source/JavaScriptCore/runtime/SmallStrings.h | 124 +- Source/JavaScriptCore/runtime/SourceOrigin.h | 58 + .../JavaScriptCore/runtime/SparseArrayValueMap.cpp | 104 +- .../JavaScriptCore/runtime/SparseArrayValueMap.h | 24 +- Source/JavaScriptCore/runtime/StackAlignment.h | 26 +- Source/JavaScriptCore/runtime/StackFrame.cpp | 126 + Source/JavaScriptCore/runtime/StackFrame.h | 72 + .../runtime/StrictEvalActivation.cpp | 8 +- .../JavaScriptCore/runtime/StrictEvalActivation.h | 21 +- .../JavaScriptCore/runtime/StringConstructor.cpp | 81 +- Source/JavaScriptCore/runtime/StringConstructor.h | 57 +- .../runtime/StringIteratorPrototype.cpp | 55 + .../runtime/StringIteratorPrototype.h | 60 + Source/JavaScriptCore/runtime/StringObject.cpp | 121 +- Source/JavaScriptCore/runtime/StringObject.h | 138 +- Source/JavaScriptCore/runtime/StringPrototype.cpp | 1775 +- Source/JavaScriptCore/runtime/StringPrototype.h | 60 +- .../runtime/StringRecursionChecker.cpp | 8 +- .../runtime/StringRecursionChecker.h | 31 +- Source/JavaScriptCore/runtime/Structure.cpp | 1321 +- Source/JavaScriptCore/runtime/Structure.h | 617 +- Source/JavaScriptCore/runtime/StructureChain.cpp | 18 +- Source/JavaScriptCore/runtime/StructureChain.h | 103 +- Source/JavaScriptCore/runtime/StructureIDBlob.h | 92 + Source/JavaScriptCore/runtime/StructureIDTable.cpp | 121 + Source/JavaScriptCore/runtime/StructureIDTable.h | 135 + Source/JavaScriptCore/runtime/StructureInlines.h | 302 +- .../JavaScriptCore/runtime/StructureRareData.cpp | 183 +- Source/JavaScriptCore/runtime/StructureRareData.h | 51 +- .../runtime/StructureRareDataInlines.h | 13 +- .../runtime/StructureTransitionTable.h | 122 +- Source/JavaScriptCore/runtime/Symbol.cpp | 132 + Source/JavaScriptCore/runtime/Symbol.h | 87 + .../JavaScriptCore/runtime/SymbolConstructor.cpp | 129 + Source/JavaScriptCore/runtime/SymbolConstructor.h | 65 + Source/JavaScriptCore/runtime/SymbolObject.cpp | 61 + Source/JavaScriptCore/runtime/SymbolObject.h | 66 + Source/JavaScriptCore/runtime/SymbolPrototype.cpp | 113 + Source/JavaScriptCore/runtime/SymbolPrototype.h | 60 + Source/JavaScriptCore/runtime/SymbolTable.cpp | 229 +- Source/JavaScriptCore/runtime/SymbolTable.h | 465 +- Source/JavaScriptCore/runtime/TemplateRegistry.cpp | 90 + Source/JavaScriptCore/runtime/TemplateRegistry.h | 47 + .../JavaScriptCore/runtime/TemplateRegistryKey.cpp | 39 + .../JavaScriptCore/runtime/TemplateRegistryKey.h | 124 + .../runtime/TemplateRegistryKeyTable.cpp | 64 + .../runtime/TemplateRegistryKeyTable.h | 59 + Source/JavaScriptCore/runtime/TestRunnerUtils.cpp | 106 +- Source/JavaScriptCore/runtime/TestRunnerUtils.h | 26 +- Source/JavaScriptCore/runtime/ThrowScope.cpp | 113 + Source/JavaScriptCore/runtime/ThrowScope.h | 107 + Source/JavaScriptCore/runtime/ToNativeFromValue.h | 16 +- Source/JavaScriptCore/runtime/Tracing.h | 50 - Source/JavaScriptCore/runtime/TypeError.h | 40 + .../JavaScriptCore/runtime/TypeLocationCache.cpp | 61 + Source/JavaScriptCore/runtime/TypeLocationCache.h | 65 + Source/JavaScriptCore/runtime/TypeProfiler.cpp | 167 + Source/JavaScriptCore/runtime/TypeProfiler.h | 141 + Source/JavaScriptCore/runtime/TypeProfilerLog.cpp | 115 + Source/JavaScriptCore/runtime/TypeProfilerLog.h | 83 + Source/JavaScriptCore/runtime/TypeSet.cpp | 584 + Source/JavaScriptCore/runtime/TypeSet.h | 110 + Source/JavaScriptCore/runtime/TypedArrayAdaptors.h | 106 +- Source/JavaScriptCore/runtime/TypedArrayBase.h | 153 - .../JavaScriptCore/runtime/TypedArrayController.h | 8 +- Source/JavaScriptCore/runtime/TypedArrayInlines.h | 7 +- Source/JavaScriptCore/runtime/TypedArrayType.cpp | 35 +- Source/JavaScriptCore/runtime/TypedArrayType.h | 78 +- Source/JavaScriptCore/runtime/TypedArrays.h | 6 +- Source/JavaScriptCore/runtime/TypeofType.cpp | 63 + Source/JavaScriptCore/runtime/TypeofType.h | 48 + Source/JavaScriptCore/runtime/Uint16Array.h | 6 +- Source/JavaScriptCore/runtime/Uint16WithFraction.h | 8 +- Source/JavaScriptCore/runtime/Uint32Array.h | 6 +- Source/JavaScriptCore/runtime/Uint8Array.h | 6 +- Source/JavaScriptCore/runtime/Uint8ClampedArray.h | 6 +- Source/JavaScriptCore/runtime/VM.cpp | 894 +- Source/JavaScriptCore/runtime/VM.h | 1041 +- Source/JavaScriptCore/runtime/VMEntryScope.cpp | 71 +- Source/JavaScriptCore/runtime/VMEntryScope.h | 23 +- Source/JavaScriptCore/runtime/VMInlines.h | 70 + Source/JavaScriptCore/runtime/VarOffset.cpp | 76 + Source/JavaScriptCore/runtime/VarOffset.h | 243 + Source/JavaScriptCore/runtime/Watchdog.cpp | 226 +- Source/JavaScriptCore/runtime/Watchdog.h | 93 +- Source/JavaScriptCore/runtime/WatchdogNone.cpp | 50 - Source/JavaScriptCore/runtime/WeakGCMap.h | 85 +- Source/JavaScriptCore/runtime/WeakGCMapInlines.h | 79 + .../JavaScriptCore/runtime/WeakMapConstructor.cpp | 72 +- Source/JavaScriptCore/runtime/WeakMapConstructor.h | 10 +- Source/JavaScriptCore/runtime/WeakMapData.cpp | 41 +- Source/JavaScriptCore/runtime/WeakMapData.h | 27 +- Source/JavaScriptCore/runtime/WeakMapPrototype.cpp | 60 +- Source/JavaScriptCore/runtime/WeakMapPrototype.h | 7 +- Source/JavaScriptCore/runtime/WeakRandom.h | 94 - .../JavaScriptCore/runtime/WeakSetConstructor.cpp | 98 + Source/JavaScriptCore/runtime/WeakSetConstructor.h | 63 + Source/JavaScriptCore/runtime/WeakSetPrototype.cpp | 105 + Source/JavaScriptCore/runtime/WeakSetPrototype.h | 58 + Source/JavaScriptCore/runtime/WriteBarrier.h | 60 +- .../JavaScriptCore/runtime/WriteBarrierInlines.h | 64 + Source/JavaScriptCore/shell/CMakeLists.txt | 58 + Source/JavaScriptCore/shell/DLLLauncherMain.cpp | 222 + Source/JavaScriptCore/shell/PlatformGTK.cmake | 3 + Source/JavaScriptCore/testRegExp.cpp | 535 + Source/JavaScriptCore/tested-symbols.symlst | 87 + Source/JavaScriptCore/tools/CodeProfile.cpp | 5 +- Source/JavaScriptCore/tools/CodeProfile.h | 20 +- Source/JavaScriptCore/tools/CodeProfiling.cpp | 12 +- Source/JavaScriptCore/tools/CodeProfiling.h | 8 +- Source/JavaScriptCore/tools/FunctionOverrides.cpp | 266 + Source/JavaScriptCore/tools/FunctionOverrides.h | 64 + Source/JavaScriptCore/tools/FunctionWhitelist.cpp | 99 + Source/JavaScriptCore/tools/FunctionWhitelist.h | 46 + Source/JavaScriptCore/tools/JSDollarVM.cpp | 35 + Source/JavaScriptCore/tools/JSDollarVM.h | 57 + .../JavaScriptCore/tools/JSDollarVMPrototype.cpp | 477 + Source/JavaScriptCore/tools/JSDollarVMPrototype.h | 78 + Source/JavaScriptCore/tools/ProfileTreeNode.h | 12 +- .../JavaScriptCore/tools/SigillCrashAnalyzer.cpp | 396 + Source/JavaScriptCore/tools/SigillCrashAnalyzer.h | 33 + Source/JavaScriptCore/tools/TieredMMapArray.h | 8 +- Source/JavaScriptCore/tools/VMInspector.cpp | 195 + Source/JavaScriptCore/tools/VMInspector.h | 72 + Source/JavaScriptCore/ucd/CaseFolding.txt | 1414 ++ Source/JavaScriptCore/wasm/JSWebAssembly.cpp | 111 + Source/JavaScriptCore/wasm/JSWebAssembly.h | 78 + Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp | 1614 ++ Source/JavaScriptCore/wasm/WasmB3IRGenerator.h | 54 + Source/JavaScriptCore/wasm/WasmBinding.cpp | 468 + Source/JavaScriptCore/wasm/WasmBinding.h | 45 + .../JavaScriptCore/wasm/WasmCallingConvention.cpp | 67 + Source/JavaScriptCore/wasm/WasmCallingConvention.h | 205 + Source/JavaScriptCore/wasm/WasmExceptionType.h | 65 + Source/JavaScriptCore/wasm/WasmFormat.cpp | 62 + Source/JavaScriptCore/wasm/WasmFormat.h | 310 + Source/JavaScriptCore/wasm/WasmFunctionParser.h | 636 + Source/JavaScriptCore/wasm/WasmMemory.cpp | 171 + Source/JavaScriptCore/wasm/WasmMemory.h | 91 + .../JavaScriptCore/wasm/WasmMemoryInformation.cpp | 86 + Source/JavaScriptCore/wasm/WasmMemoryInformation.h | 71 + Source/JavaScriptCore/wasm/WasmModuleParser.cpp | 629 + Source/JavaScriptCore/wasm/WasmModuleParser.h | 76 + Source/JavaScriptCore/wasm/WasmPageCount.cpp | 45 + Source/JavaScriptCore/wasm/WasmPageCount.h | 101 + Source/JavaScriptCore/wasm/WasmParser.h | 276 + Source/JavaScriptCore/wasm/WasmPlan.cpp | 273 + Source/JavaScriptCore/wasm/WasmPlan.h | 116 + Source/JavaScriptCore/wasm/WasmSections.h | 85 + Source/JavaScriptCore/wasm/WasmSignature.cpp | 157 + Source/JavaScriptCore/wasm/WasmSignature.h | 169 + Source/JavaScriptCore/wasm/WasmValidate.cpp | 396 + Source/JavaScriptCore/wasm/WasmValidate.h | 43 + Source/JavaScriptCore/wasm/generateWasm.py | 96 + .../wasm/generateWasmB3IRGeneratorInlinesHeader.py | 218 + .../JavaScriptCore/wasm/generateWasmOpsHeader.py | 285 + .../wasm/generateWasmValidateInlinesHeader.py | 170 + .../JavaScriptCore/wasm/js/JSWebAssemblyCallee.cpp | 56 + .../JavaScriptCore/wasm/js/JSWebAssemblyCallee.h | 71 + .../wasm/js/JSWebAssemblyCompileError.cpp | 61 + .../wasm/js/JSWebAssemblyCompileError.h | 54 + .../JavaScriptCore/wasm/js/JSWebAssemblyHelpers.h | 76 + .../wasm/js/JSWebAssemblyInstance.cpp | 99 + .../JavaScriptCore/wasm/js/JSWebAssemblyInstance.h | 117 + .../wasm/js/JSWebAssemblyLinkError.cpp | 61 + .../wasm/js/JSWebAssemblyLinkError.h | 54 + .../JavaScriptCore/wasm/js/JSWebAssemblyMemory.cpp | 148 + .../JavaScriptCore/wasm/js/JSWebAssemblyMemory.h | 69 + .../JavaScriptCore/wasm/js/JSWebAssemblyModule.cpp | 98 + .../JavaScriptCore/wasm/js/JSWebAssemblyModule.h | 123 + .../wasm/js/JSWebAssemblyRuntimeError.cpp | 54 + .../wasm/js/JSWebAssemblyRuntimeError.h | 52 + .../JavaScriptCore/wasm/js/JSWebAssemblyTable.cpp | 136 + Source/JavaScriptCore/wasm/js/JSWebAssemblyTable.h | 84 + .../wasm/js/WebAssemblyCompileErrorConstructor.cpp | 102 + .../wasm/js/WebAssemblyCompileErrorConstructor.h | 58 + .../wasm/js/WebAssemblyCompileErrorPrototype.cpp | 69 + .../wasm/js/WebAssemblyCompileErrorPrototype.h | 54 + .../JavaScriptCore/wasm/js/WebAssemblyFunction.cpp | 187 + .../JavaScriptCore/wasm/js/WebAssemblyFunction.h | 80 + .../wasm/js/WebAssemblyFunctionCell.cpp | 0 .../wasm/js/WebAssemblyFunctionCell.h | 0 .../wasm/js/WebAssemblyInstanceConstructor.cpp | 345 + .../wasm/js/WebAssemblyInstanceConstructor.h | 59 + .../wasm/js/WebAssemblyInstancePrototype.cpp | 69 + .../wasm/js/WebAssemblyInstancePrototype.h | 54 + .../wasm/js/WebAssemblyLinkErrorConstructor.cpp | 102 + .../wasm/js/WebAssemblyLinkErrorConstructor.h | 58 + .../wasm/js/WebAssemblyLinkErrorPrototype.cpp | 69 + .../wasm/js/WebAssemblyLinkErrorPrototype.h | 54 + .../wasm/js/WebAssemblyMemoryConstructor.cpp | 160 + .../wasm/js/WebAssemblyMemoryConstructor.h | 59 + .../wasm/js/WebAssemblyMemoryPrototype.cpp | 123 + .../wasm/js/WebAssemblyMemoryPrototype.h | 54 + .../wasm/js/WebAssemblyModuleConstructor.cpp | 156 + .../wasm/js/WebAssemblyModuleConstructor.h | 61 + .../wasm/js/WebAssemblyModulePrototype.cpp | 112 + .../wasm/js/WebAssemblyModulePrototype.h | 54 + .../wasm/js/WebAssemblyModuleRecord.cpp | 291 + .../wasm/js/WebAssemblyModuleRecord.h | 67 + .../wasm/js/WebAssemblyPrototype.cpp | 89 + .../JavaScriptCore/wasm/js/WebAssemblyPrototype.h | 54 + .../wasm/js/WebAssemblyRuntimeErrorConstructor.cpp | 102 + .../wasm/js/WebAssemblyRuntimeErrorConstructor.h | 58 + .../wasm/js/WebAssemblyRuntimeErrorPrototype.cpp | 69 + .../wasm/js/WebAssemblyRuntimeErrorPrototype.h | 54 + .../wasm/js/WebAssemblyTableConstructor.cpp | 150 + .../wasm/js/WebAssemblyTableConstructor.h | 59 + .../wasm/js/WebAssemblyTablePrototype.cpp | 178 + .../wasm/js/WebAssemblyTablePrototype.h | 54 + .../wasm/js/WebAssemblyToJSCallee.cpp | 67 + .../JavaScriptCore/wasm/js/WebAssemblyToJSCallee.h | 53 + Source/JavaScriptCore/wasm/wasm.json | 217 + Source/JavaScriptCore/yarr/RegularExpression.cpp | 25 +- Source/JavaScriptCore/yarr/RegularExpression.h | 9 +- Source/JavaScriptCore/yarr/Yarr.h | 15 +- Source/JavaScriptCore/yarr/YarrCanonicalize.h | 143 + .../JavaScriptCore/yarr/YarrCanonicalizeUCS2.cpp | 823 +- Source/JavaScriptCore/yarr/YarrCanonicalizeUCS2.h | 138 - Source/JavaScriptCore/yarr/YarrCanonicalizeUCS2.js | 193 + Source/JavaScriptCore/yarr/YarrInterpreter.cpp | 411 +- Source/JavaScriptCore/yarr/YarrInterpreter.h | 97 +- Source/JavaScriptCore/yarr/YarrJIT.cpp | 534 +- Source/JavaScriptCore/yarr/YarrJIT.h | 42 +- Source/JavaScriptCore/yarr/YarrParser.h | 225 +- Source/JavaScriptCore/yarr/YarrPattern.cpp | 309 +- Source/JavaScriptCore/yarr/YarrPattern.h | 189 +- Source/JavaScriptCore/yarr/YarrSyntaxChecker.cpp | 10 +- Source/JavaScriptCore/yarr/YarrSyntaxChecker.h | 10 +- 2741 files changed, 416228 insertions(+), 130137 deletions(-) delete mode 100644 Source/JavaScriptCore/API/APIShims.h create mode 100644 Source/JavaScriptCore/API/APIUtils.h create mode 100644 Source/JavaScriptCore/API/JSContext.h create mode 100644 Source/JavaScriptCore/API/JSContextInternal.h create mode 100644 Source/JavaScriptCore/API/JSContextPrivate.h create mode 100644 Source/JavaScriptCore/API/JSContextRefInspectorSupport.h create mode 100644 Source/JavaScriptCore/API/JSContextRefInternal.h create mode 100644 Source/JavaScriptCore/API/JSExport.h create mode 100644 Source/JavaScriptCore/API/JSManagedValue.h create mode 100644 Source/JavaScriptCore/API/JSManagedValueInternal.h create mode 100644 Source/JavaScriptCore/API/JSRemoteInspector.cpp create mode 100644 Source/JavaScriptCore/API/JSRemoteInspector.h create mode 100644 Source/JavaScriptCore/API/JSStringRefBSTR.cpp create mode 100644 Source/JavaScriptCore/API/JSStringRefBSTR.h create mode 100644 Source/JavaScriptCore/API/JSStringRefCF.cpp create mode 100644 Source/JavaScriptCore/API/JSStringRefCF.h create mode 100644 Source/JavaScriptCore/API/JSTypedArray.cpp create mode 100644 Source/JavaScriptCore/API/JSTypedArray.h create mode 100644 Source/JavaScriptCore/API/JSValue.h create mode 100644 Source/JavaScriptCore/API/JSValueInternal.h create mode 100644 Source/JavaScriptCore/API/JSVirtualMachine.h create mode 100644 Source/JavaScriptCore/API/JSVirtualMachineInternal.h create mode 100644 Source/JavaScriptCore/API/JSVirtualMachinePrivate.h create mode 100644 Source/JavaScriptCore/API/JSWeakObjectMapRefPrivate.cpp create mode 100644 Source/JavaScriptCore/API/JSWeakObjectMapRefPrivate.h create mode 100644 Source/JavaScriptCore/API/JSWrapperMap.h create mode 100644 Source/JavaScriptCore/API/JavaScriptCore.h create mode 100644 Source/JavaScriptCore/API/ObjcRuntimeExtras.h create mode 100644 Source/JavaScriptCore/API/tests/CompareAndSwapTest.cpp create mode 100644 Source/JavaScriptCore/API/tests/CompareAndSwapTest.h create mode 100644 Source/JavaScriptCore/API/tests/CurrentThisInsideBlockGetterTest.h create mode 100644 Source/JavaScriptCore/API/tests/CustomGlobalObjectClassTest.c create mode 100644 Source/JavaScriptCore/API/tests/CustomGlobalObjectClassTest.h create mode 100644 Source/JavaScriptCore/API/tests/DateTests.h create mode 100644 Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.cpp create mode 100644 Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.h create mode 100644 Source/JavaScriptCore/API/tests/FunctionOverridesTest.cpp create mode 100644 Source/JavaScriptCore/API/tests/FunctionOverridesTest.h create mode 100644 Source/JavaScriptCore/API/tests/GlobalContextWithFinalizerTest.cpp create mode 100644 Source/JavaScriptCore/API/tests/GlobalContextWithFinalizerTest.h create mode 100644 Source/JavaScriptCore/API/tests/JSExportTests.h create mode 100644 Source/JavaScriptCore/API/tests/JSONParseTest.cpp create mode 100644 Source/JavaScriptCore/API/tests/JSONParseTest.h create mode 100644 Source/JavaScriptCore/API/tests/PingPongStackOverflowTest.cpp create mode 100644 Source/JavaScriptCore/API/tests/PingPongStackOverflowTest.h create mode 100644 Source/JavaScriptCore/API/tests/Regress141275.h create mode 100644 Source/JavaScriptCore/API/tests/Regress141809.h create mode 100644 Source/JavaScriptCore/API/tests/TypedArrayCTest.cpp create mode 100644 Source/JavaScriptCore/API/tests/TypedArrayCTest.h create mode 100644 Source/JavaScriptCore/API/tests/minidom.html create mode 100644 Source/JavaScriptCore/API/tests/minidom.js create mode 100644 Source/JavaScriptCore/API/tests/testapi-function-overrides.js create mode 100644 Source/JavaScriptCore/API/tests/testapi.c create mode 100644 Source/JavaScriptCore/API/tests/testapi.js create mode 100644 Source/JavaScriptCore/CMakeLists.txt delete mode 100644 Source/JavaScriptCore/ChangeLog create mode 100644 Source/JavaScriptCore/DerivedSources.make delete mode 100644 Source/JavaScriptCore/ForwardingHeaders/JavaScriptCore/APIShims.h create mode 100644 Source/JavaScriptCore/ForwardingHeaders/JavaScriptCore/JSObjectRefPrivate.h create mode 100644 Source/JavaScriptCore/ForwardingHeaders/JavaScriptCore/JSStringRefCF.h create mode 100644 Source/JavaScriptCore/ForwardingHeaders/JavaScriptCore/JSTypedArray.h create mode 100644 Source/JavaScriptCore/ForwardingHeaders/JavaScriptCore/JavaScriptCore.h delete mode 100644 Source/JavaScriptCore/GNUmakefile.am delete mode 100644 Source/JavaScriptCore/GNUmakefile.list.am create mode 100644 Source/JavaScriptCore/PlatformGTK.cmake create mode 100644 Source/JavaScriptCore/PlatformJSCOnly.cmake create mode 100644 Source/JavaScriptCore/Scripts/UpdateContents.py create mode 100644 Source/JavaScriptCore/Scripts/builtins/__init__.py create mode 100644 Source/JavaScriptCore/Scripts/builtins/builtins.py create mode 100755 Source/JavaScriptCore/Scripts/builtins/builtins_generate_combined_header.py create mode 100644 Source/JavaScriptCore/Scripts/builtins/builtins_generate_combined_implementation.py create mode 100755 Source/JavaScriptCore/Scripts/builtins/builtins_generate_internals_wrapper_header.py create mode 100755 Source/JavaScriptCore/Scripts/builtins/builtins_generate_internals_wrapper_implementation.py create mode 100755 Source/JavaScriptCore/Scripts/builtins/builtins_generate_separate_header.py create mode 100644 Source/JavaScriptCore/Scripts/builtins/builtins_generate_separate_implementation.py create mode 100755 Source/JavaScriptCore/Scripts/builtins/builtins_generate_wrapper_header.py create mode 100755 Source/JavaScriptCore/Scripts/builtins/builtins_generate_wrapper_implementation.py create mode 100644 Source/JavaScriptCore/Scripts/builtins/builtins_generator.py create mode 100755 Source/JavaScriptCore/Scripts/builtins/builtins_model.py create mode 100644 Source/JavaScriptCore/Scripts/builtins/builtins_templates.py create mode 100644 Source/JavaScriptCore/Scripts/cssmin.py create mode 100755 Source/JavaScriptCore/Scripts/generate-combined-inspector-json.py create mode 100644 Source/JavaScriptCore/Scripts/generate-js-builtins.py create mode 100755 Source/JavaScriptCore/Scripts/inline-and-minify-stylesheets-and-scripts.py create mode 100644 Source/JavaScriptCore/Scripts/jsmin.py create mode 100644 Source/JavaScriptCore/Scripts/lazywriter.py create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Combined.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Combined.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Combined.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-InternalClashingNames-Combined.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/WebCore-AnotherGuardedInternalBuiltin-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/WebCore-ArbitraryConditionalGuard-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/WebCore-DuplicateFlagAnnotation-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/WebCore-DuplicateKeyValueAnnotation-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/WebCore-GuardedBuiltin-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/WebCore-GuardedInternalBuiltin-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/WebCore-UnguardedBuiltin-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/WebCore-xmlCasingTest-Separate.js create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-error create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-DuplicateFlagAnnotation-Separate.js-error create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-DuplicateKeyValueAnnotation-Separate.js-error create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result create mode 100644 Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCoreJSBuiltins.h-result create mode 100644 Source/JavaScriptCore/Scripts/xxd.pl delete mode 100644 Source/JavaScriptCore/assembler/ARMv7Assembler.cpp create mode 100644 Source/JavaScriptCore/assembler/AbortReason.h create mode 100644 Source/JavaScriptCore/assembler/AllowMacroScratchRegisterUsage.h create mode 100644 Source/JavaScriptCore/assembler/AssemblerCommon.h create mode 100644 Source/JavaScriptCore/assembler/CPU.h create mode 100644 Source/JavaScriptCore/assembler/DisallowMacroScratchRegisterUsage.h create mode 100644 Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp create mode 100644 Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp create mode 100644 Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp create mode 100644 Source/JavaScriptCore/assembler/MacroAssemblerHelpers.h create mode 100644 Source/JavaScriptCore/assembler/MacroAssemblerPrinter.cpp create mode 100644 Source/JavaScriptCore/assembler/MacroAssemblerPrinter.h delete mode 100644 Source/JavaScriptCore/assembler/MacroAssemblerSH4.h create mode 100644 Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h delete mode 100644 Source/JavaScriptCore/assembler/RepatchBuffer.h delete mode 100644 Source/JavaScriptCore/assembler/SH4Assembler.h create mode 100644 Source/JavaScriptCore/b3/B3ArgumentRegValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3ArgumentRegValue.h create mode 100644 Source/JavaScriptCore/b3/B3BasicBlock.cpp create mode 100644 Source/JavaScriptCore/b3/B3BasicBlock.h create mode 100644 Source/JavaScriptCore/b3/B3BasicBlockInlines.h create mode 100644 Source/JavaScriptCore/b3/B3BasicBlockUtils.h create mode 100644 Source/JavaScriptCore/b3/B3BlockInsertionSet.cpp create mode 100644 Source/JavaScriptCore/b3/B3BlockInsertionSet.h create mode 100644 Source/JavaScriptCore/b3/B3BlockWorklist.h create mode 100644 Source/JavaScriptCore/b3/B3BottomProvider.h create mode 100644 Source/JavaScriptCore/b3/B3BreakCriticalEdges.cpp create mode 100644 Source/JavaScriptCore/b3/B3BreakCriticalEdges.h create mode 100644 Source/JavaScriptCore/b3/B3CCallValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3CCallValue.h create mode 100644 Source/JavaScriptCore/b3/B3CFG.h create mode 100644 Source/JavaScriptCore/b3/B3CaseCollection.cpp create mode 100644 Source/JavaScriptCore/b3/B3CaseCollection.h create mode 100644 Source/JavaScriptCore/b3/B3CaseCollectionInlines.h create mode 100644 Source/JavaScriptCore/b3/B3CheckSpecial.cpp create mode 100644 Source/JavaScriptCore/b3/B3CheckSpecial.h create mode 100644 Source/JavaScriptCore/b3/B3CheckValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3CheckValue.h create mode 100644 Source/JavaScriptCore/b3/B3Common.cpp create mode 100644 Source/JavaScriptCore/b3/B3Common.h create mode 100644 Source/JavaScriptCore/b3/B3Commutativity.cpp create mode 100644 Source/JavaScriptCore/b3/B3Commutativity.h create mode 100644 Source/JavaScriptCore/b3/B3Compilation.cpp create mode 100644 Source/JavaScriptCore/b3/B3Compilation.h create mode 100644 Source/JavaScriptCore/b3/B3Compile.cpp create mode 100644 Source/JavaScriptCore/b3/B3Compile.h create mode 100644 Source/JavaScriptCore/b3/B3ComputeDivisionMagic.h create mode 100644 Source/JavaScriptCore/b3/B3Const32Value.cpp create mode 100644 Source/JavaScriptCore/b3/B3Const32Value.h create mode 100644 Source/JavaScriptCore/b3/B3Const64Value.cpp create mode 100644 Source/JavaScriptCore/b3/B3Const64Value.h create mode 100644 Source/JavaScriptCore/b3/B3ConstDoubleValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3ConstDoubleValue.h create mode 100644 Source/JavaScriptCore/b3/B3ConstFloatValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3ConstFloatValue.h create mode 100644 Source/JavaScriptCore/b3/B3ConstPtrValue.h create mode 100644 Source/JavaScriptCore/b3/B3ConstrainedValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3ConstrainedValue.h create mode 100644 Source/JavaScriptCore/b3/B3DataSection.cpp create mode 100644 Source/JavaScriptCore/b3/B3DataSection.h create mode 100644 Source/JavaScriptCore/b3/B3Dominators.h create mode 100644 Source/JavaScriptCore/b3/B3DuplicateTails.cpp create mode 100644 Source/JavaScriptCore/b3/B3DuplicateTails.h create mode 100644 Source/JavaScriptCore/b3/B3Effects.cpp create mode 100644 Source/JavaScriptCore/b3/B3Effects.h create mode 100644 Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.cpp create mode 100644 Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.h create mode 100644 Source/JavaScriptCore/b3/B3FenceValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3FenceValue.h create mode 100644 Source/JavaScriptCore/b3/B3FixSSA.cpp create mode 100644 Source/JavaScriptCore/b3/B3FixSSA.h create mode 100644 Source/JavaScriptCore/b3/B3FoldPathConstants.cpp create mode 100644 Source/JavaScriptCore/b3/B3FoldPathConstants.h create mode 100644 Source/JavaScriptCore/b3/B3FrequencyClass.cpp create mode 100644 Source/JavaScriptCore/b3/B3FrequencyClass.h create mode 100644 Source/JavaScriptCore/b3/B3FrequentedBlock.h create mode 100644 Source/JavaScriptCore/b3/B3Generate.cpp create mode 100644 Source/JavaScriptCore/b3/B3Generate.h create mode 100644 Source/JavaScriptCore/b3/B3GenericFrequentedBlock.h create mode 100644 Source/JavaScriptCore/b3/B3HeapRange.cpp create mode 100644 Source/JavaScriptCore/b3/B3HeapRange.h create mode 100644 Source/JavaScriptCore/b3/B3InferSwitches.cpp create mode 100644 Source/JavaScriptCore/b3/B3InferSwitches.h create mode 100644 Source/JavaScriptCore/b3/B3InsertionSet.cpp create mode 100644 Source/JavaScriptCore/b3/B3InsertionSet.h create mode 100644 Source/JavaScriptCore/b3/B3InsertionSetInlines.h create mode 100644 Source/JavaScriptCore/b3/B3Kind.cpp create mode 100644 Source/JavaScriptCore/b3/B3Kind.h create mode 100644 Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.cpp create mode 100644 Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.h create mode 100644 Source/JavaScriptCore/b3/B3LowerMacros.cpp create mode 100644 Source/JavaScriptCore/b3/B3LowerMacros.h create mode 100644 Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.cpp create mode 100644 Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.h create mode 100644 Source/JavaScriptCore/b3/B3LowerToAir.cpp create mode 100644 Source/JavaScriptCore/b3/B3LowerToAir.h create mode 100644 Source/JavaScriptCore/b3/B3MathExtras.cpp create mode 100644 Source/JavaScriptCore/b3/B3MathExtras.h create mode 100644 Source/JavaScriptCore/b3/B3MemoryValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3MemoryValue.h create mode 100644 Source/JavaScriptCore/b3/B3MoveConstants.cpp create mode 100644 Source/JavaScriptCore/b3/B3MoveConstants.h create mode 100644 Source/JavaScriptCore/b3/B3OpaqueByproduct.h create mode 100644 Source/JavaScriptCore/b3/B3OpaqueByproducts.cpp create mode 100644 Source/JavaScriptCore/b3/B3OpaqueByproducts.h create mode 100644 Source/JavaScriptCore/b3/B3Opcode.cpp create mode 100644 Source/JavaScriptCore/b3/B3Opcode.h create mode 100644 Source/JavaScriptCore/b3/B3Origin.cpp create mode 100644 Source/JavaScriptCore/b3/B3Origin.h create mode 100644 Source/JavaScriptCore/b3/B3OriginDump.cpp create mode 100644 Source/JavaScriptCore/b3/B3OriginDump.h create mode 100644 Source/JavaScriptCore/b3/B3PCToOriginMap.h create mode 100644 Source/JavaScriptCore/b3/B3PatchpointSpecial.cpp create mode 100644 Source/JavaScriptCore/b3/B3PatchpointSpecial.h create mode 100644 Source/JavaScriptCore/b3/B3PatchpointValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3PatchpointValue.h create mode 100644 Source/JavaScriptCore/b3/B3PhaseScope.cpp create mode 100644 Source/JavaScriptCore/b3/B3PhaseScope.h create mode 100644 Source/JavaScriptCore/b3/B3PhiChildren.cpp create mode 100644 Source/JavaScriptCore/b3/B3PhiChildren.h create mode 100644 Source/JavaScriptCore/b3/B3Procedure.cpp create mode 100644 Source/JavaScriptCore/b3/B3Procedure.h create mode 100644 Source/JavaScriptCore/b3/B3ProcedureInlines.h create mode 100644 Source/JavaScriptCore/b3/B3PureCSE.cpp create mode 100644 Source/JavaScriptCore/b3/B3PureCSE.h create mode 100644 Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.cpp create mode 100644 Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.h create mode 100644 Source/JavaScriptCore/b3/B3ReduceStrength.cpp create mode 100644 Source/JavaScriptCore/b3/B3ReduceStrength.h create mode 100644 Source/JavaScriptCore/b3/B3SSACalculator.cpp create mode 100644 Source/JavaScriptCore/b3/B3SSACalculator.h create mode 100644 Source/JavaScriptCore/b3/B3SlotBaseValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3SlotBaseValue.h create mode 100644 Source/JavaScriptCore/b3/B3SparseCollection.h create mode 100644 Source/JavaScriptCore/b3/B3StackSlot.cpp create mode 100644 Source/JavaScriptCore/b3/B3StackSlot.h create mode 100644 Source/JavaScriptCore/b3/B3StackmapGenerationParams.cpp create mode 100644 Source/JavaScriptCore/b3/B3StackmapGenerationParams.h create mode 100644 Source/JavaScriptCore/b3/B3StackmapSpecial.cpp create mode 100644 Source/JavaScriptCore/b3/B3StackmapSpecial.h create mode 100644 Source/JavaScriptCore/b3/B3StackmapValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3StackmapValue.h create mode 100644 Source/JavaScriptCore/b3/B3SuccessorCollection.h create mode 100644 Source/JavaScriptCore/b3/B3SwitchCase.cpp create mode 100644 Source/JavaScriptCore/b3/B3SwitchCase.h create mode 100644 Source/JavaScriptCore/b3/B3SwitchValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3SwitchValue.h create mode 100644 Source/JavaScriptCore/b3/B3TimingScope.cpp create mode 100644 Source/JavaScriptCore/b3/B3TimingScope.h create mode 100644 Source/JavaScriptCore/b3/B3Type.cpp create mode 100644 Source/JavaScriptCore/b3/B3Type.h create mode 100644 Source/JavaScriptCore/b3/B3TypeMap.h create mode 100644 Source/JavaScriptCore/b3/B3UpsilonValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3UpsilonValue.h create mode 100644 Source/JavaScriptCore/b3/B3UseCounts.cpp create mode 100644 Source/JavaScriptCore/b3/B3UseCounts.h create mode 100644 Source/JavaScriptCore/b3/B3Validate.cpp create mode 100644 Source/JavaScriptCore/b3/B3Validate.h create mode 100644 Source/JavaScriptCore/b3/B3Value.cpp create mode 100644 Source/JavaScriptCore/b3/B3Value.h create mode 100644 Source/JavaScriptCore/b3/B3ValueInlines.h create mode 100644 Source/JavaScriptCore/b3/B3ValueKey.cpp create mode 100644 Source/JavaScriptCore/b3/B3ValueKey.h create mode 100644 Source/JavaScriptCore/b3/B3ValueKeyInlines.h create mode 100644 Source/JavaScriptCore/b3/B3ValueRep.cpp create mode 100644 Source/JavaScriptCore/b3/B3ValueRep.h create mode 100644 Source/JavaScriptCore/b3/B3Variable.cpp create mode 100644 Source/JavaScriptCore/b3/B3Variable.h create mode 100644 Source/JavaScriptCore/b3/B3VariableValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3VariableValue.h create mode 100644 Source/JavaScriptCore/b3/B3WasmAddressValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3WasmAddressValue.h create mode 100644 Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.cpp create mode 100644 Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.h create mode 100644 Source/JavaScriptCore/b3/air/AirAllocateStack.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirAllocateStack.h create mode 100644 Source/JavaScriptCore/b3/air/AirArg.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirArg.h create mode 100644 Source/JavaScriptCore/b3/air/AirArgInlines.h create mode 100644 Source/JavaScriptCore/b3/air/AirBasicBlock.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirBasicBlock.h create mode 100644 Source/JavaScriptCore/b3/air/AirBlockWorklist.h create mode 100644 Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirCCallSpecial.h create mode 100644 Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirCCallingConvention.h create mode 100644 Source/JavaScriptCore/b3/air/AirCode.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirCode.h create mode 100644 Source/JavaScriptCore/b3/air/AirCustom.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirCustom.h create mode 100644 Source/JavaScriptCore/b3/air/AirDumpAsJS.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirDumpAsJS.h create mode 100644 Source/JavaScriptCore/b3/air/AirEliminateDeadCode.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirEliminateDeadCode.h create mode 100644 Source/JavaScriptCore/b3/air/AirEmitShuffle.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirEmitShuffle.h create mode 100644 Source/JavaScriptCore/b3/air/AirFixObviousSpills.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirFixObviousSpills.h create mode 100644 Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.h create mode 100644 Source/JavaScriptCore/b3/air/AirFrequentedBlock.h create mode 100644 Source/JavaScriptCore/b3/air/AirGenerate.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirGenerate.h create mode 100644 Source/JavaScriptCore/b3/air/AirGenerated.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirGenerationContext.h create mode 100644 Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h create mode 100644 Source/JavaScriptCore/b3/air/AirInsertionSet.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirInsertionSet.h create mode 100644 Source/JavaScriptCore/b3/air/AirInst.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirInst.h create mode 100644 Source/JavaScriptCore/b3/air/AirInstInlines.h create mode 100644 Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h create mode 100644 Source/JavaScriptCore/b3/air/AirKind.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirKind.h create mode 100644 Source/JavaScriptCore/b3/air/AirLiveness.h create mode 100644 Source/JavaScriptCore/b3/air/AirLogRegisterPressure.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirLogRegisterPressure.h create mode 100644 Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.h create mode 100644 Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.h create mode 100644 Source/JavaScriptCore/b3/air/AirLowerMacros.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirLowerMacros.h create mode 100644 Source/JavaScriptCore/b3/air/AirOpcode.opcodes create mode 100644 Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.h create mode 100644 Source/JavaScriptCore/b3/air/AirPadInterference.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirPadInterference.h create mode 100644 Source/JavaScriptCore/b3/air/AirPhaseScope.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirPhaseScope.h create mode 100644 Source/JavaScriptCore/b3/air/AirReportUsedRegisters.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirReportUsedRegisters.h create mode 100644 Source/JavaScriptCore/b3/air/AirSimplifyCFG.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirSimplifyCFG.h create mode 100644 Source/JavaScriptCore/b3/air/AirSpecial.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirSpecial.h create mode 100644 Source/JavaScriptCore/b3/air/AirSpillEverything.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirSpillEverything.h create mode 100644 Source/JavaScriptCore/b3/air/AirStackSlot.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirStackSlot.h create mode 100644 Source/JavaScriptCore/b3/air/AirStackSlotKind.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirStackSlotKind.h create mode 100644 Source/JavaScriptCore/b3/air/AirTmp.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirTmp.h create mode 100644 Source/JavaScriptCore/b3/air/AirTmpInlines.h create mode 100644 Source/JavaScriptCore/b3/air/AirTmpWidth.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirTmpWidth.h create mode 100644 Source/JavaScriptCore/b3/air/AirUseCounts.h create mode 100644 Source/JavaScriptCore/b3/air/AirValidate.cpp create mode 100644 Source/JavaScriptCore/b3/air/AirValidate.h create mode 100644 Source/JavaScriptCore/b3/air/opcode_generator.rb create mode 100644 Source/JavaScriptCore/b3/air/testair.cpp create mode 100644 Source/JavaScriptCore/b3/testb3.cpp create mode 100644 Source/JavaScriptCore/builtins/ArrayConstructor.js create mode 100644 Source/JavaScriptCore/builtins/ArrayIteratorPrototype.js create mode 100644 Source/JavaScriptCore/builtins/ArrayPrototype.js create mode 100644 Source/JavaScriptCore/builtins/AsyncFunctionPrototype.js create mode 100644 Source/JavaScriptCore/builtins/BuiltinExecutableCreator.cpp create mode 100644 Source/JavaScriptCore/builtins/BuiltinExecutableCreator.h create mode 100644 Source/JavaScriptCore/builtins/BuiltinExecutables.cpp create mode 100644 Source/JavaScriptCore/builtins/BuiltinExecutables.h create mode 100644 Source/JavaScriptCore/builtins/BuiltinNames.h create mode 100644 Source/JavaScriptCore/builtins/BuiltinUtils.h create mode 100644 Source/JavaScriptCore/builtins/DatePrototype.js create mode 100644 Source/JavaScriptCore/builtins/FunctionPrototype.js create mode 100644 Source/JavaScriptCore/builtins/GeneratorPrototype.js create mode 100644 Source/JavaScriptCore/builtins/GlobalObject.js create mode 100644 Source/JavaScriptCore/builtins/GlobalOperations.js create mode 100644 Source/JavaScriptCore/builtins/InspectorInstrumentationObject.js create mode 100644 Source/JavaScriptCore/builtins/InternalPromiseConstructor.js create mode 100644 Source/JavaScriptCore/builtins/IteratorHelpers.js create mode 100644 Source/JavaScriptCore/builtins/IteratorPrototype.js create mode 100644 Source/JavaScriptCore/builtins/MapPrototype.js create mode 100644 Source/JavaScriptCore/builtins/ModuleLoaderPrototype.js create mode 100644 Source/JavaScriptCore/builtins/NumberConstructor.js create mode 100644 Source/JavaScriptCore/builtins/NumberPrototype.js create mode 100644 Source/JavaScriptCore/builtins/ObjectConstructor.js create mode 100644 Source/JavaScriptCore/builtins/PromiseConstructor.js create mode 100644 Source/JavaScriptCore/builtins/PromiseOperations.js create mode 100644 Source/JavaScriptCore/builtins/PromisePrototype.js create mode 100644 Source/JavaScriptCore/builtins/ReflectObject.js create mode 100644 Source/JavaScriptCore/builtins/RegExpPrototype.js create mode 100644 Source/JavaScriptCore/builtins/SetPrototype.js create mode 100644 Source/JavaScriptCore/builtins/StringConstructor.js create mode 100644 Source/JavaScriptCore/builtins/StringIteratorPrototype.js create mode 100644 Source/JavaScriptCore/builtins/StringPrototype.js create mode 100644 Source/JavaScriptCore/builtins/TypedArrayConstructor.js create mode 100644 Source/JavaScriptCore/builtins/TypedArrayPrototype.js create mode 100644 Source/JavaScriptCore/bytecode/AccessCase.cpp create mode 100644 Source/JavaScriptCore/bytecode/AccessCase.h create mode 100644 Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp create mode 100644 Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h create mode 100644 Source/JavaScriptCore/bytecode/ArithProfile.cpp create mode 100644 Source/JavaScriptCore/bytecode/ArithProfile.h create mode 100644 Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp create mode 100644 Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h create mode 100644 Source/JavaScriptCore/bytecode/BytecodeGraph.h create mode 100644 Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp create mode 100644 Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h create mode 100644 Source/JavaScriptCore/bytecode/BytecodeKills.h create mode 100644 Source/JavaScriptCore/bytecode/BytecodeList.json create mode 100644 Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp create mode 100644 Source/JavaScriptCore/bytecode/BytecodeRewriter.h create mode 100644 Source/JavaScriptCore/bytecode/CallEdge.cpp create mode 100644 Source/JavaScriptCore/bytecode/CallEdge.h create mode 100644 Source/JavaScriptCore/bytecode/CallMode.cpp create mode 100644 Source/JavaScriptCore/bytecode/CallMode.h create mode 100644 Source/JavaScriptCore/bytecode/CallVariant.cpp create mode 100644 Source/JavaScriptCore/bytecode/CallVariant.h create mode 100644 Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp create mode 100644 Source/JavaScriptCore/bytecode/ComplexGetStatus.h create mode 100644 Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.cpp create mode 100644 Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.h create mode 100644 Source/JavaScriptCore/bytecode/DataFormat.cpp create mode 100644 Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp create mode 100644 Source/JavaScriptCore/bytecode/DeferredSourceDump.h create mode 100644 Source/JavaScriptCore/bytecode/DirectEvalCodeCache.cpp create mode 100644 Source/JavaScriptCore/bytecode/DirectEvalCodeCache.h create mode 100644 Source/JavaScriptCore/bytecode/EvalCodeBlock.cpp create mode 100644 Source/JavaScriptCore/bytecode/EvalCodeBlock.h delete mode 100644 Source/JavaScriptCore/bytecode/EvalCodeCache.h create mode 100644 Source/JavaScriptCore/bytecode/ExecutableInfo.h create mode 100644 Source/JavaScriptCore/bytecode/ExitingJITType.cpp create mode 100644 Source/JavaScriptCore/bytecode/ExitingJITType.h create mode 100644 Source/JavaScriptCore/bytecode/FunctionCodeBlock.cpp create mode 100644 Source/JavaScriptCore/bytecode/FunctionCodeBlock.h create mode 100644 Source/JavaScriptCore/bytecode/GetByIdVariant.cpp create mode 100644 Source/JavaScriptCore/bytecode/GetByIdVariant.h create mode 100644 Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp create mode 100644 Source/JavaScriptCore/bytecode/GetterSetterAccessCase.h create mode 100644 Source/JavaScriptCore/bytecode/GlobalCodeBlock.h create mode 100644 Source/JavaScriptCore/bytecode/InlineAccess.cpp create mode 100644 Source/JavaScriptCore/bytecode/InlineAccess.h create mode 100644 Source/JavaScriptCore/bytecode/InlineCallFrame.cpp create mode 100644 Source/JavaScriptCore/bytecode/InlineCallFrame.h create mode 100644 Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h create mode 100644 Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.cpp create mode 100644 Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.h create mode 100644 Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp create mode 100644 Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h create mode 100644 Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.cpp create mode 100644 Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.h create mode 100644 Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.cpp create mode 100644 Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.h create mode 100644 Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp create mode 100644 Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h create mode 100644 Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp create mode 100644 Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h create mode 100644 Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp create mode 100644 Source/JavaScriptCore/bytecode/PolymorphicAccess.h delete mode 100644 Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h delete mode 100644 Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp delete mode 100644 Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h create mode 100644 Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h delete mode 100644 Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.cpp delete mode 100644 Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.h create mode 100644 Source/JavaScriptCore/bytecode/ProgramCodeBlock.cpp create mode 100644 Source/JavaScriptCore/bytecode/ProgramCodeBlock.h create mode 100644 Source/JavaScriptCore/bytecode/PropertyCondition.cpp create mode 100644 Source/JavaScriptCore/bytecode/PropertyCondition.h create mode 100644 Source/JavaScriptCore/bytecode/ProxyableAccessCase.cpp create mode 100644 Source/JavaScriptCore/bytecode/ProxyableAccessCase.h create mode 100644 Source/JavaScriptCore/bytecode/PutByIdFlags.cpp create mode 100644 Source/JavaScriptCore/bytecode/PutByIdFlags.h create mode 100644 Source/JavaScriptCore/bytecode/PutByIdVariant.cpp create mode 100644 Source/JavaScriptCore/bytecode/PutByIdVariant.h delete mode 100644 Source/JavaScriptCore/bytecode/SamplingTool.cpp delete mode 100644 Source/JavaScriptCore/bytecode/SamplingTool.h create mode 100644 Source/JavaScriptCore/bytecode/StructureSet.cpp create mode 100644 Source/JavaScriptCore/bytecode/SuperSampler.cpp create mode 100644 Source/JavaScriptCore/bytecode/SuperSampler.h create mode 100644 Source/JavaScriptCore/bytecode/ToThisStatus.cpp create mode 100644 Source/JavaScriptCore/bytecode/ToThisStatus.h create mode 100644 Source/JavaScriptCore/bytecode/TrackedReferences.cpp create mode 100644 Source/JavaScriptCore/bytecode/TrackedReferences.h create mode 100644 Source/JavaScriptCore/bytecode/TypeLocation.h create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.cpp create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.h create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.cpp create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.h create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedGlobalCodeBlock.h create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.cpp create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.h create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.cpp create mode 100644 Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.h delete mode 100644 Source/JavaScriptCore/bytecode/VariableWatchpointSet.h create mode 100644 Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp create mode 100644 Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h create mode 100644 Source/JavaScriptCore/bytecode/VirtualRegister.cpp delete mode 100644 Source/JavaScriptCore/debugger/DebuggerActivation.cpp delete mode 100644 Source/JavaScriptCore/debugger/DebuggerActivation.h create mode 100644 Source/JavaScriptCore/debugger/DebuggerEvalEnabler.h create mode 100644 Source/JavaScriptCore/debugger/DebuggerLocation.cpp create mode 100644 Source/JavaScriptCore/debugger/DebuggerLocation.h create mode 100644 Source/JavaScriptCore/debugger/DebuggerParseData.cpp create mode 100644 Source/JavaScriptCore/debugger/DebuggerParseData.h create mode 100644 Source/JavaScriptCore/debugger/DebuggerScope.cpp create mode 100644 Source/JavaScriptCore/debugger/DebuggerScope.h create mode 100644 Source/JavaScriptCore/debugger/ScriptProfilingScope.h create mode 100644 Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.h create mode 100644 Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.h delete mode 100644 Source/JavaScriptCore/dfg/DFGAnalysis.h create mode 100644 Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.h delete mode 100644 Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp delete mode 100644 Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGArgumentsUtilities.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGArgumentsUtilities.h create mode 100644 Source/JavaScriptCore/dfg/DFGAvailabilityMap.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGAvailabilityMap.h create mode 100644 Source/JavaScriptCore/dfg/DFGBackwardsCFG.h create mode 100644 Source/JavaScriptCore/dfg/DFGBackwardsDominators.h delete mode 100644 Source/JavaScriptCore/dfg/DFGBinarySwitch.cpp delete mode 100644 Source/JavaScriptCore/dfg/DFGBinarySwitch.h create mode 100644 Source/JavaScriptCore/dfg/DFGBlockMap.h create mode 100644 Source/JavaScriptCore/dfg/DFGBlockMapInlines.h create mode 100644 Source/JavaScriptCore/dfg/DFGBlockSet.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGBlockSet.h create mode 100644 Source/JavaScriptCore/dfg/DFGBlockSetInlines.h create mode 100644 Source/JavaScriptCore/dfg/DFGBlockWorklist.h create mode 100644 Source/JavaScriptCore/dfg/DFGCFG.h create mode 100644 Source/JavaScriptCore/dfg/DFGCallCreateDirectArgumentsSlowPathGenerator.h create mode 100644 Source/JavaScriptCore/dfg/DFGCleanUpPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGCleanUpPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGClobbersExitState.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGClobbersExitState.h create mode 100644 Source/JavaScriptCore/dfg/DFGCombinedLiveness.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGCombinedLiveness.h create mode 100644 Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGControlEquivalenceAnalysis.h create mode 100644 Source/JavaScriptCore/dfg/DFGDOMJITPatchpointParams.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGDOMJITPatchpointParams.h create mode 100644 Source/JavaScriptCore/dfg/DFGDesiredInferredType.h delete mode 100644 Source/JavaScriptCore/dfg/DFGDesiredStructureChains.cpp delete mode 100644 Source/JavaScriptCore/dfg/DFGDesiredStructureChains.h delete mode 100644 Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.cpp delete mode 100644 Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.h create mode 100644 Source/JavaScriptCore/dfg/DFGDoesGC.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGDoesGC.h delete mode 100644 Source/JavaScriptCore/dfg/DFGDominators.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGEpoch.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGEpoch.h create mode 100644 Source/JavaScriptCore/dfg/DFGFlowIndexing.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGFlowIndexing.h create mode 100644 Source/JavaScriptCore/dfg/DFGFlowMap.h delete mode 100644 Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.cpp delete mode 100644 Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGForAllKills.h create mode 100644 Source/JavaScriptCore/dfg/DFGFrozenValue.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGFrozenValue.h create mode 100644 Source/JavaScriptCore/dfg/DFGGraphSafepoint.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGGraphSafepoint.h create mode 100644 Source/JavaScriptCore/dfg/DFGHeapLocation.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGHeapLocation.h create mode 100644 Source/JavaScriptCore/dfg/DFGInferredTypeCheck.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGInferredTypeCheck.h create mode 100644 Source/JavaScriptCore/dfg/DFGInsertionSet.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGLazyNode.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGLazyNode.h create mode 100644 Source/JavaScriptCore/dfg/DFGLiveCatchVariablePreservationPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGLiveCatchVariablePreservationPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGMaximalFlushInsertionPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGMaximalFlushInsertionPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGMayExit.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGMayExit.h delete mode 100644 Source/JavaScriptCore/dfg/DFGMergeMode.h create mode 100644 Source/JavaScriptCore/dfg/DFGMinifiedGraph.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.h create mode 100644 Source/JavaScriptCore/dfg/DFGNodeAbstractValuePair.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGNodeAbstractValuePair.h create mode 100644 Source/JavaScriptCore/dfg/DFGNodeFlowProjection.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGNodeFlowProjection.h create mode 100644 Source/JavaScriptCore/dfg/DFGNodeOrigin.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGNodeOrigin.h create mode 100644 Source/JavaScriptCore/dfg/DFGNullAbstractState.h create mode 100644 Source/JavaScriptCore/dfg/DFGOSRExitFuzz.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGOSRExitFuzz.h create mode 100644 Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGObjectMaterializationData.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGObjectMaterializationData.h create mode 100644 Source/JavaScriptCore/dfg/DFGOpInfo.h create mode 100644 Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGPhiChildren.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGPhiChildren.h create mode 100644 Source/JavaScriptCore/dfg/DFGPlanInlines.h create mode 100644 Source/JavaScriptCore/dfg/DFGPrePostNumbering.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGPrePostNumbering.h create mode 100644 Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h create mode 100644 Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h create mode 100644 Source/JavaScriptCore/dfg/DFGPropertyTypeKey.h create mode 100644 Source/JavaScriptCore/dfg/DFGPureValue.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGPureValue.h create mode 100644 Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGRegisteredStructure.h create mode 100644 Source/JavaScriptCore/dfg/DFGRegisteredStructureSet.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGRegisteredStructureSet.h delete mode 100644 Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.cpp delete mode 100644 Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGSSACalculator.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGSSACalculator.h create mode 100644 Source/JavaScriptCore/dfg/DFGSafepoint.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGSafepoint.h create mode 100644 Source/JavaScriptCore/dfg/DFGScannable.h create mode 100644 Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGStoreBarrierClusteringPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGStoreBarrierClusteringPhase.h delete mode 100644 Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.cpp delete mode 100644 Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGStructureAbstractValue.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGStructureClobberState.h create mode 100644 Source/JavaScriptCore/dfg/DFGThreadData.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGThreadData.h create mode 100644 Source/JavaScriptCore/dfg/DFGTransition.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGTransition.h delete mode 100644 Source/JavaScriptCore/dfg/DFGValueRecoveryOverride.h create mode 100644 Source/JavaScriptCore/dfg/DFGValueStrength.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGValueStrength.h create mode 100644 Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.cpp create mode 100644 Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.h create mode 100644 Source/JavaScriptCore/dfg/DFGVariableAccessData.cpp delete mode 100644 Source/JavaScriptCore/dfg/DFGVariadicFunction.h create mode 100644 Source/JavaScriptCore/dfg/DFGWorklistInlines.h create mode 100644 Source/JavaScriptCore/disassembler/ARMLLVMDisassembler.cpp create mode 100644 Source/JavaScriptCore/disassembler/ARMv7/ARMv7DOpcode.cpp create mode 100644 Source/JavaScriptCore/disassembler/ARMv7/ARMv7DOpcode.h create mode 100644 Source/JavaScriptCore/disassembler/ARMv7Disassembler.cpp delete mode 100644 Source/JavaScriptCore/disassembler/LLVMDisassembler.cpp delete mode 100644 Source/JavaScriptCore/disassembler/LLVMDisassembler.h create mode 100644 Source/JavaScriptCore/disassembler/UDis86Disassembler.cpp create mode 100644 Source/JavaScriptCore/disassembler/UDis86Disassembler.h create mode 100644 Source/JavaScriptCore/disassembler/udis86/differences.txt create mode 100644 Source/JavaScriptCore/disassembler/udis86/optable.xml create mode 100644 Source/JavaScriptCore/disassembler/udis86/ud_itab.py create mode 100644 Source/JavaScriptCore/disassembler/udis86/ud_opcode.py create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86.c create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86.h create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86_decode.c create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86_decode.h create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86_extern.h create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86_itab_holder.c create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86_syn-att.c create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86_syn-intel.c create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86_syn.c create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86_syn.h create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86_types.h create mode 100644 Source/JavaScriptCore/disassembler/udis86/udis86_udint.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITAbstractHeap.cpp create mode 100644 Source/JavaScriptCore/domjit/DOMJITAbstractHeap.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITCallDOMGetterPatchpoint.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITEffect.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITGetterSetter.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITHeapRange.cpp create mode 100644 Source/JavaScriptCore/domjit/DOMJITHeapRange.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITPatchpoint.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITPatchpointParams.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITReg.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITSignature.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITSlowPathCalls.h create mode 100644 Source/JavaScriptCore/domjit/DOMJITValue.h create mode 100644 Source/JavaScriptCore/dynbench.cpp create mode 100644 Source/JavaScriptCore/features.json delete mode 100644 Source/JavaScriptCore/ftl/FTLAbbreviations.h create mode 100644 Source/JavaScriptCore/ftl/FTLAvailableRecovery.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLAvailableRecovery.h create mode 100644 Source/JavaScriptCore/ftl/FTLDOMJITPatchpointParams.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLDOMJITPatchpointParams.h create mode 100644 Source/JavaScriptCore/ftl/FTLExceptionTarget.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLExceptionTarget.h delete mode 100644 Source/JavaScriptCore/ftl/FTLExitArgumentList.h create mode 100644 Source/JavaScriptCore/ftl/FTLExitPropertyValue.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLExitPropertyValue.h delete mode 100644 Source/JavaScriptCore/ftl/FTLExitThunkGenerator.cpp delete mode 100644 Source/JavaScriptCore/ftl/FTLExitThunkGenerator.h create mode 100644 Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.h delete mode 100644 Source/JavaScriptCore/ftl/FTLIntrinsicRepository.cpp delete mode 100644 Source/JavaScriptCore/ftl/FTLIntrinsicRepository.h create mode 100644 Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLLazySlowPath.h create mode 100644 Source/JavaScriptCore/ftl/FTLLazySlowPathCall.h create mode 100644 Source/JavaScriptCore/ftl/FTLLocation.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLLocation.h create mode 100644 Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLLowerDFGToB3.h delete mode 100644 Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp delete mode 100644 Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.h delete mode 100644 Source/JavaScriptCore/ftl/FTLOSRExitCompilationInfo.h create mode 100644 Source/JavaScriptCore/ftl/FTLOSRExitHandle.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLOSRExitHandle.h create mode 100644 Source/JavaScriptCore/ftl/FTLOperations.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLOperations.h create mode 100644 Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.h create mode 100644 Source/JavaScriptCore/ftl/FTLRecoveryOpcode.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLRecoveryOpcode.h create mode 100644 Source/JavaScriptCore/ftl/FTLSaveRestore.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLSaveRestore.h create mode 100644 Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLSlowPathCall.h create mode 100644 Source/JavaScriptCore/ftl/FTLSlowPathCallKey.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLSlowPathCallKey.h create mode 100644 Source/JavaScriptCore/ftl/FTLStackmapArgumentList.h delete mode 100644 Source/JavaScriptCore/ftl/FTLValueFormat.cpp delete mode 100644 Source/JavaScriptCore/ftl/FTLValueFormat.h create mode 100644 Source/JavaScriptCore/ftl/FTLValueRange.cpp create mode 100644 Source/JavaScriptCore/ftl/FTLValueRange.h create mode 100644 Source/JavaScriptCore/ftl/FTLWeight.h create mode 100644 Source/JavaScriptCore/ftl/FTLWeightedTarget.h create mode 100644 Source/JavaScriptCore/generate-bytecode-files create mode 100644 Source/JavaScriptCore/generateYarrCanonicalizeUnicode create mode 100644 Source/JavaScriptCore/heap/AllocatingScope.h create mode 100644 Source/JavaScriptCore/heap/AllocatorAttributes.cpp create mode 100644 Source/JavaScriptCore/heap/AllocatorAttributes.h delete mode 100644 Source/JavaScriptCore/heap/BlockAllocator.cpp delete mode 100644 Source/JavaScriptCore/heap/BlockAllocator.h create mode 100644 Source/JavaScriptCore/heap/CellContainer.cpp create mode 100644 Source/JavaScriptCore/heap/CellContainer.h create mode 100644 Source/JavaScriptCore/heap/CellContainerInlines.h create mode 100644 Source/JavaScriptCore/heap/CellState.h create mode 100644 Source/JavaScriptCore/heap/CodeBlockSetInlines.h create mode 100644 Source/JavaScriptCore/heap/CollectingScope.h create mode 100644 Source/JavaScriptCore/heap/CollectionScope.cpp create mode 100644 Source/JavaScriptCore/heap/CollectionScope.h create mode 100644 Source/JavaScriptCore/heap/CollectorPhase.cpp create mode 100644 Source/JavaScriptCore/heap/CollectorPhase.h create mode 100644 Source/JavaScriptCore/heap/ConstraintVolatility.h delete mode 100644 Source/JavaScriptCore/heap/CopiedAllocator.h delete mode 100644 Source/JavaScriptCore/heap/CopiedBlock.h delete mode 100644 Source/JavaScriptCore/heap/CopiedBlockInlines.h delete mode 100644 Source/JavaScriptCore/heap/CopiedSpace.cpp delete mode 100644 Source/JavaScriptCore/heap/CopiedSpace.h delete mode 100644 Source/JavaScriptCore/heap/CopiedSpaceInlines.h delete mode 100644 Source/JavaScriptCore/heap/CopyToken.h delete mode 100644 Source/JavaScriptCore/heap/CopyVisitor.cpp delete mode 100644 Source/JavaScriptCore/heap/CopyVisitor.h delete mode 100644 Source/JavaScriptCore/heap/CopyVisitorInlines.h delete mode 100644 Source/JavaScriptCore/heap/CopyWorkList.h delete mode 100644 Source/JavaScriptCore/heap/CopyWriteBarrier.h delete mode 100644 Source/JavaScriptCore/heap/DelayedReleaseScope.h create mode 100644 Source/JavaScriptCore/heap/DeleteAllCodeEffort.h create mode 100644 Source/JavaScriptCore/heap/DestructionMode.cpp create mode 100644 Source/JavaScriptCore/heap/DestructionMode.h create mode 100644 Source/JavaScriptCore/heap/EdenGCActivityCallback.cpp create mode 100644 Source/JavaScriptCore/heap/EdenGCActivityCallback.h create mode 100644 Source/JavaScriptCore/heap/FreeList.cpp create mode 100644 Source/JavaScriptCore/heap/FreeList.h create mode 100644 Source/JavaScriptCore/heap/FullGCActivityCallback.cpp create mode 100644 Source/JavaScriptCore/heap/FullGCActivityCallback.h create mode 100644 Source/JavaScriptCore/heap/GCActivityCallback.cpp create mode 100644 Source/JavaScriptCore/heap/GCActivityCallback.h create mode 100644 Source/JavaScriptCore/heap/GCConductor.cpp create mode 100644 Source/JavaScriptCore/heap/GCConductor.h create mode 100644 Source/JavaScriptCore/heap/GCDeferralContext.h create mode 100644 Source/JavaScriptCore/heap/GCDeferralContextInlines.h create mode 100644 Source/JavaScriptCore/heap/GCLogging.cpp create mode 100644 Source/JavaScriptCore/heap/GCLogging.h create mode 100644 Source/JavaScriptCore/heap/GCSegmentedArray.h create mode 100644 Source/JavaScriptCore/heap/GCSegmentedArrayInlines.h delete mode 100644 Source/JavaScriptCore/heap/GCThread.cpp delete mode 100644 Source/JavaScriptCore/heap/GCThread.h delete mode 100644 Source/JavaScriptCore/heap/GCThreadSharedData.cpp delete mode 100644 Source/JavaScriptCore/heap/GCThreadSharedData.h create mode 100644 Source/JavaScriptCore/heap/GCTypeMap.h delete mode 100644 Source/JavaScriptCore/heap/HeapBlock.h create mode 100644 Source/JavaScriptCore/heap/HeapCell.cpp create mode 100644 Source/JavaScriptCore/heap/HeapCell.h create mode 100644 Source/JavaScriptCore/heap/HeapCellInlines.h create mode 100644 Source/JavaScriptCore/heap/HeapHelperPool.cpp create mode 100644 Source/JavaScriptCore/heap/HeapHelperPool.h create mode 100644 Source/JavaScriptCore/heap/HeapInlines.h create mode 100644 Source/JavaScriptCore/heap/HeapObserver.h delete mode 100644 Source/JavaScriptCore/heap/HeapOperation.h create mode 100644 Source/JavaScriptCore/heap/HeapProfiler.cpp create mode 100644 Source/JavaScriptCore/heap/HeapProfiler.h delete mode 100644 Source/JavaScriptCore/heap/HeapRootVisitor.h create mode 100644 Source/JavaScriptCore/heap/HeapSnapshot.cpp create mode 100644 Source/JavaScriptCore/heap/HeapSnapshot.h create mode 100644 Source/JavaScriptCore/heap/HeapSnapshotBuilder.cpp create mode 100644 Source/JavaScriptCore/heap/HeapSnapshotBuilder.h delete mode 100644 Source/JavaScriptCore/heap/HeapStatistics.cpp delete mode 100644 Source/JavaScriptCore/heap/HeapStatistics.h create mode 100644 Source/JavaScriptCore/heap/HeapUtil.h create mode 100644 Source/JavaScriptCore/heap/HeapVerifier.cpp create mode 100644 Source/JavaScriptCore/heap/HeapVerifier.h create mode 100644 Source/JavaScriptCore/heap/LargeAllocation.cpp create mode 100644 Source/JavaScriptCore/heap/LargeAllocation.h create mode 100644 Source/JavaScriptCore/heap/LiveObjectData.h create mode 100644 Source/JavaScriptCore/heap/LiveObjectList.cpp create mode 100644 Source/JavaScriptCore/heap/LiveObjectList.h create mode 100644 Source/JavaScriptCore/heap/LockDuringMarking.h delete mode 100644 Source/JavaScriptCore/heap/MarkStackInlines.h create mode 100644 Source/JavaScriptCore/heap/MarkedAllocatorInlines.h create mode 100644 Source/JavaScriptCore/heap/MarkedBlockInlines.h create mode 100644 Source/JavaScriptCore/heap/MarkedSpaceInlines.h create mode 100644 Source/JavaScriptCore/heap/MarkingConstraint.cpp create mode 100644 Source/JavaScriptCore/heap/MarkingConstraint.h create mode 100644 Source/JavaScriptCore/heap/MarkingConstraintSet.cpp create mode 100644 Source/JavaScriptCore/heap/MarkingConstraintSet.h create mode 100644 Source/JavaScriptCore/heap/MutatorScheduler.cpp create mode 100644 Source/JavaScriptCore/heap/MutatorScheduler.h create mode 100644 Source/JavaScriptCore/heap/MutatorState.cpp create mode 100644 Source/JavaScriptCore/heap/MutatorState.h create mode 100644 Source/JavaScriptCore/heap/OpaqueRootSet.h create mode 100644 Source/JavaScriptCore/heap/PreventCollectionScope.h delete mode 100644 Source/JavaScriptCore/heap/RecursiveAllocationScope.h delete mode 100644 Source/JavaScriptCore/heap/Region.h create mode 100644 Source/JavaScriptCore/heap/RegisterState.h create mode 100644 Source/JavaScriptCore/heap/ReleaseHeapAccessScope.h create mode 100644 Source/JavaScriptCore/heap/RunningScope.h create mode 100644 Source/JavaScriptCore/heap/SpaceTimeMutatorScheduler.cpp create mode 100644 Source/JavaScriptCore/heap/SpaceTimeMutatorScheduler.h create mode 100644 Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.cpp create mode 100644 Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.h create mode 100644 Source/JavaScriptCore/heap/StopIfNecessaryTimer.cpp create mode 100644 Source/JavaScriptCore/heap/StopIfNecessaryTimer.h create mode 100644 Source/JavaScriptCore/heap/Subspace.cpp create mode 100644 Source/JavaScriptCore/heap/Subspace.h create mode 100644 Source/JavaScriptCore/heap/SubspaceInlines.h delete mode 100644 Source/JavaScriptCore/heap/SuperRegion.cpp delete mode 100644 Source/JavaScriptCore/heap/SuperRegion.h create mode 100644 Source/JavaScriptCore/heap/SweepingScope.h create mode 100644 Source/JavaScriptCore/heap/SynchronousStopTheWorldMutatorScheduler.cpp create mode 100644 Source/JavaScriptCore/heap/SynchronousStopTheWorldMutatorScheduler.h create mode 100644 Source/JavaScriptCore/heap/VisitRaceKey.cpp create mode 100644 Source/JavaScriptCore/heap/VisitRaceKey.h create mode 100644 Source/JavaScriptCore/heap/VisitingTimeout.h delete mode 100644 Source/JavaScriptCore/heap/WriteBarrierBuffer.cpp delete mode 100644 Source/JavaScriptCore/heap/WriteBarrierBuffer.h create mode 100644 Source/JavaScriptCore/icu/unicode/localpointer.h create mode 100644 Source/JavaScriptCore/icu/unicode/ptypes.h create mode 100644 Source/JavaScriptCore/icu/unicode/ucal.h create mode 100644 Source/JavaScriptCore/icu/unicode/ucurr.h create mode 100644 Source/JavaScriptCore/icu/unicode/udat.h create mode 100644 Source/JavaScriptCore/icu/unicode/udatpg.h create mode 100644 Source/JavaScriptCore/icu/unicode/udisplaycontext.h create mode 100644 Source/JavaScriptCore/icu/unicode/uformattable.h create mode 100644 Source/JavaScriptCore/icu/unicode/umisc.h create mode 100644 Source/JavaScriptCore/icu/unicode/unorm2.h create mode 100644 Source/JavaScriptCore/icu/unicode/unum.h create mode 100644 Source/JavaScriptCore/icu/unicode/unumsys.h create mode 100644 Source/JavaScriptCore/icu/unicode/uscript.h create mode 100644 Source/JavaScriptCore/icu/unicode/uvernum.h create mode 100644 Source/JavaScriptCore/inspector/AsyncStackTrace.cpp create mode 100644 Source/JavaScriptCore/inspector/AsyncStackTrace.h create mode 100644 Source/JavaScriptCore/inspector/ConsoleMessage.cpp create mode 100644 Source/JavaScriptCore/inspector/ConsoleMessage.h create mode 100644 Source/JavaScriptCore/inspector/EventLoop.cpp create mode 100644 Source/JavaScriptCore/inspector/EventLoop.h create mode 100644 Source/JavaScriptCore/inspector/IdentifiersFactory.cpp create mode 100644 Source/JavaScriptCore/inspector/IdentifiersFactory.h create mode 100644 Source/JavaScriptCore/inspector/InspectorFrontendRouter.cpp create mode 100644 Source/JavaScriptCore/inspector/InspectorFrontendRouter.h create mode 100644 Source/JavaScriptCore/inspector/InspectorProtocolTypes.h delete mode 100644 Source/JavaScriptCore/inspector/InspectorTypeBuilder.h create mode 100644 Source/JavaScriptCore/inspector/JSGlobalObjectConsoleClient.cpp create mode 100644 Source/JavaScriptCore/inspector/JSGlobalObjectConsoleClient.h create mode 100644 Source/JavaScriptCore/inspector/JSGlobalObjectInspectorController.cpp create mode 100644 Source/JavaScriptCore/inspector/JSGlobalObjectInspectorController.h create mode 100644 Source/JavaScriptCore/inspector/JSGlobalObjectScriptDebugServer.cpp create mode 100644 Source/JavaScriptCore/inspector/JSGlobalObjectScriptDebugServer.h create mode 100644 Source/JavaScriptCore/inspector/PerGlobalObjectWrapperWorld.cpp create mode 100644 Source/JavaScriptCore/inspector/PerGlobalObjectWrapperWorld.h create mode 100644 Source/JavaScriptCore/inspector/ScriptArguments.cpp create mode 100644 Source/JavaScriptCore/inspector/ScriptArguments.h create mode 100644 Source/JavaScriptCore/inspector/ScriptCallFrame.cpp create mode 100644 Source/JavaScriptCore/inspector/ScriptCallFrame.h create mode 100644 Source/JavaScriptCore/inspector/ScriptCallStack.cpp create mode 100644 Source/JavaScriptCore/inspector/ScriptCallStack.h create mode 100644 Source/JavaScriptCore/inspector/ScriptCallStackFactory.cpp create mode 100644 Source/JavaScriptCore/inspector/ScriptCallStackFactory.h create mode 100644 Source/JavaScriptCore/inspector/agents/InspectorConsoleAgent.cpp create mode 100644 Source/JavaScriptCore/inspector/agents/InspectorConsoleAgent.h create mode 100644 Source/JavaScriptCore/inspector/agents/InspectorHeapAgent.cpp create mode 100644 Source/JavaScriptCore/inspector/agents/InspectorHeapAgent.h create mode 100644 Source/JavaScriptCore/inspector/agents/InspectorScriptProfilerAgent.cpp create mode 100644 Source/JavaScriptCore/inspector/agents/InspectorScriptProfilerAgent.h create mode 100644 Source/JavaScriptCore/inspector/agents/JSGlobalObjectConsoleAgent.cpp create mode 100644 Source/JavaScriptCore/inspector/agents/JSGlobalObjectConsoleAgent.h create mode 100644 Source/JavaScriptCore/inspector/agents/JSGlobalObjectDebuggerAgent.cpp create mode 100644 Source/JavaScriptCore/inspector/agents/JSGlobalObjectDebuggerAgent.h create mode 100644 Source/JavaScriptCore/inspector/agents/JSGlobalObjectRuntimeAgent.cpp create mode 100644 Source/JavaScriptCore/inspector/agents/JSGlobalObjectRuntimeAgent.h create mode 100644 Source/JavaScriptCore/inspector/augmentable/AlternateDispatchableAgent.h create mode 100644 Source/JavaScriptCore/inspector/augmentable/AugmentableInspectorController.h create mode 100644 Source/JavaScriptCore/inspector/augmentable/AugmentableInspectorControllerClient.h create mode 100644 Source/JavaScriptCore/inspector/protocol/ApplicationCache.json create mode 100644 Source/JavaScriptCore/inspector/protocol/CSS.json create mode 100644 Source/JavaScriptCore/inspector/protocol/Console.json create mode 100644 Source/JavaScriptCore/inspector/protocol/DOM.json create mode 100644 Source/JavaScriptCore/inspector/protocol/DOMDebugger.json create mode 100644 Source/JavaScriptCore/inspector/protocol/DOMStorage.json create mode 100644 Source/JavaScriptCore/inspector/protocol/Database.json create mode 100644 Source/JavaScriptCore/inspector/protocol/Heap.json create mode 100644 Source/JavaScriptCore/inspector/protocol/IndexedDB.json create mode 100644 Source/JavaScriptCore/inspector/protocol/Inspector.json delete mode 100644 Source/JavaScriptCore/inspector/protocol/InspectorDomain.json create mode 100644 Source/JavaScriptCore/inspector/protocol/LayerTree.json create mode 100644 Source/JavaScriptCore/inspector/protocol/Memory.json create mode 100644 Source/JavaScriptCore/inspector/protocol/Network.json create mode 100644 Source/JavaScriptCore/inspector/protocol/OverlayTypes.json create mode 100644 Source/JavaScriptCore/inspector/protocol/Page.json create mode 100644 Source/JavaScriptCore/inspector/protocol/Replay.json create mode 100644 Source/JavaScriptCore/inspector/protocol/ScriptProfiler.json create mode 100644 Source/JavaScriptCore/inspector/protocol/Timeline.json create mode 100644 Source/JavaScriptCore/inspector/protocol/Worker.json create mode 100644 Source/JavaScriptCore/inspector/remote/RemoteAutomationTarget.cpp create mode 100644 Source/JavaScriptCore/inspector/remote/RemoteAutomationTarget.h create mode 100644 Source/JavaScriptCore/inspector/remote/RemoteConnectionToTarget.h create mode 100644 Source/JavaScriptCore/inspector/remote/RemoteControllableTarget.cpp create mode 100644 Source/JavaScriptCore/inspector/remote/RemoteControllableTarget.h create mode 100644 Source/JavaScriptCore/inspector/remote/RemoteInspectionTarget.cpp create mode 100644 Source/JavaScriptCore/inspector/remote/RemoteInspectionTarget.h create mode 100644 Source/JavaScriptCore/inspector/remote/RemoteInspector.cpp create mode 100644 Source/JavaScriptCore/inspector/remote/RemoteInspector.h create mode 100644 Source/JavaScriptCore/inspector/remote/RemoteInspectorConstants.h delete mode 100755 Source/JavaScriptCore/inspector/scripts/CodeGeneratorInspector.py delete mode 100644 Source/JavaScriptCore/inspector/scripts/CodeGeneratorInspectorStrings.py create mode 100644 Source/JavaScriptCore/inspector/scripts/codegen/__init__.py create mode 100644 Source/JavaScriptCore/inspector/scripts/codegen/cpp_generator.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/cpp_generator_templates.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_alternate_backend_dispatcher_header.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_backend_dispatcher_header.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_backend_dispatcher_implementation.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_frontend_dispatcher_header.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_frontend_dispatcher_implementation.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_protocol_types_header.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_protocol_types_implementation.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_js_backend_commands.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_backend_dispatcher_header.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_backend_dispatcher_implementation.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_configuration_header.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_configuration_implementation.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_frontend_dispatcher_implementation.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_header.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_internal_header.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_protocol_type_conversions_header.py create mode 100644 Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_protocol_type_conversions_implementation.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_protocol_types_implementation.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/generator.py create mode 100644 Source/JavaScriptCore/inspector/scripts/codegen/generator_templates.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/models.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/objc_generator.py create mode 100755 Source/JavaScriptCore/inspector/scripts/codegen/objc_generator_templates.py delete mode 100644 Source/JavaScriptCore/inspector/scripts/cssmin.py delete mode 100755 Source/JavaScriptCore/inspector/scripts/generate-combined-inspector-json.py create mode 100755 Source/JavaScriptCore/inspector/scripts/generate-inspector-protocol-bindings.py delete mode 100755 Source/JavaScriptCore/inspector/scripts/inline-and-minify-stylesheets-and-scripts.py delete mode 100644 Source/JavaScriptCore/inspector/scripts/jsmin.py create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/all/definitions-with-mac-platform.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/all/expected/definitions-with-mac-platform.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/commands-with-async-attribute.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/commands-with-optional-call-return-parameters.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/definitions-with-mac-platform.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/domain-availability.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/domains-with-varying-command-sizes.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/enum-values.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/events-with-optional-parameters.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/commands-with-async-attribute.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/commands-with-optional-call-return-parameters.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/definitions-with-mac-platform.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/domain-availability.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/domains-with-varying-command-sizes.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/enum-values.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/events-with-optional-parameters.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-command-with-invalid-platform.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-domain-availability.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-duplicate-command-call-parameter-names.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-duplicate-command-return-parameter-names.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-duplicate-event-parameter-names.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-duplicate-type-declarations.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-duplicate-type-member-names.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-enum-with-no-values.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-number-typed-optional-parameter-flag.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-number-typed-optional-type-member.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-string-typed-optional-parameter-flag.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-string-typed-optional-type-member.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-type-declaration-using-type-reference.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-type-reference-as-primitive-type.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-type-with-invalid-platform.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-type-with-lowercase-name.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-unknown-type-reference-in-type-declaration.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/fail-on-unknown-type-reference-in-type-member.json-error create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/generate-domains-with-feature-guards.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/same-type-id-different-domain.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/shadowed-optional-type-setters.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/type-declaration-aliased-primitive-type.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/type-declaration-array-type.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/type-declaration-enum-type.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/type-declaration-object-type.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/type-requiring-runtime-casts.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/expected/worker-supported-domains.json-result create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-command-with-invalid-platform.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-domain-availability.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-duplicate-command-call-parameter-names.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-duplicate-command-return-parameter-names.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-duplicate-event-parameter-names.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-duplicate-type-declarations.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-duplicate-type-member-names.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-enum-with-no-values.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-number-typed-optional-parameter-flag.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-number-typed-optional-type-member.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-string-typed-optional-parameter-flag.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-string-typed-optional-type-member.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-type-declaration-using-type-reference.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-type-reference-as-primitive-type.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-type-with-invalid-platform.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-type-with-lowercase-name.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-unknown-type-reference-in-type-declaration.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/fail-on-unknown-type-reference-in-type-member.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/generate-domains-with-feature-guards.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/same-type-id-different-domain.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/shadowed-optional-type-setters.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/type-declaration-aliased-primitive-type.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/type-declaration-array-type.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/type-declaration-enum-type.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/type-declaration-object-type.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/type-requiring-runtime-casts.json create mode 100644 Source/JavaScriptCore/inspector/scripts/tests/generic/worker-supported-domains.json delete mode 100644 Source/JavaScriptCore/inspector/scripts/xxd.pl create mode 100644 Source/JavaScriptCore/interpreter/CLoopStack.cpp create mode 100644 Source/JavaScriptCore/interpreter/CLoopStack.h create mode 100644 Source/JavaScriptCore/interpreter/CLoopStackInlines.h delete mode 100644 Source/JavaScriptCore/interpreter/CallFrameInlines.h create mode 100644 Source/JavaScriptCore/interpreter/FrameTracers.h create mode 100644 Source/JavaScriptCore/interpreter/InterpreterInlines.h delete mode 100644 Source/JavaScriptCore/interpreter/JSStack.cpp delete mode 100644 Source/JavaScriptCore/interpreter/JSStack.h delete mode 100644 Source/JavaScriptCore/interpreter/JSStackInlines.h create mode 100644 Source/JavaScriptCore/interpreter/ShadowChicken.cpp create mode 100644 Source/JavaScriptCore/interpreter/ShadowChicken.h create mode 100644 Source/JavaScriptCore/interpreter/ShadowChickenInlines.h create mode 100644 Source/JavaScriptCore/interpreter/VMEntryRecord.h delete mode 100644 Source/JavaScriptCore/interpreter/VMInspector.cpp delete mode 100644 Source/JavaScriptCore/interpreter/VMInspector.h create mode 100644 Source/JavaScriptCore/jit/BinarySwitch.cpp create mode 100644 Source/JavaScriptCore/jit/BinarySwitch.h create mode 100644 Source/JavaScriptCore/jit/CCallHelpers.cpp create mode 100644 Source/JavaScriptCore/jit/CachedRecovery.cpp create mode 100644 Source/JavaScriptCore/jit/CachedRecovery.h create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffleData.cpp create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffleData.h create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler.cpp create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler.h create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler64.cpp delete mode 100644 Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp delete mode 100644 Source/JavaScriptCore/jit/ClosureCallStubRoutine.h create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h delete mode 100644 Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp create mode 100644 Source/JavaScriptCore/jit/GPRInfo.cpp create mode 100644 Source/JavaScriptCore/jit/ICStats.cpp create mode 100644 Source/JavaScriptCore/jit/ICStats.h create mode 100644 Source/JavaScriptCore/jit/IntrinsicEmitter.cpp create mode 100644 Source/JavaScriptCore/jit/JITAddGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITAddGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitAndGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITBitAndGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitOrGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITBitOrGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitXorGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITBitXorGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITDivGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITDivGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITLeftShiftGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITMathIC.h create mode 100644 Source/JavaScriptCore/jit/JITMathICForwards.h create mode 100644 Source/JavaScriptCore/jit/JITMathICInlineResult.h create mode 100644 Source/JavaScriptCore/jit/JITMulGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITMulGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITNegGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITNegGenerator.h delete mode 100644 Source/JavaScriptCore/jit/JITOperationWrappers.h create mode 100644 Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp create mode 100644 Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITRightShiftGenerator.h delete mode 100644 Source/JavaScriptCore/jit/JITStubs.cpp delete mode 100644 Source/JavaScriptCore/jit/JITStubs.h delete mode 100644 Source/JavaScriptCore/jit/JITStubsARM.h delete mode 100644 Source/JavaScriptCore/jit/JITStubsARMv7.h create mode 100644 Source/JavaScriptCore/jit/JITStubsMSVC64.asm delete mode 100644 Source/JavaScriptCore/jit/JITStubsX86.h delete mode 100644 Source/JavaScriptCore/jit/JITStubsX86Common.h delete mode 100644 Source/JavaScriptCore/jit/JITStubsX86_64.h create mode 100644 Source/JavaScriptCore/jit/JITSubGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITSubGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITWorklist.cpp create mode 100644 Source/JavaScriptCore/jit/JITWorklist.h delete mode 100644 Source/JavaScriptCore/jit/JITWriteBarrier.h create mode 100644 Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp create mode 100644 Source/JavaScriptCore/jit/PCToCodeOriginMap.h create mode 100644 Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp create mode 100644 Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h create mode 100644 Source/JavaScriptCore/jit/Reg.cpp create mode 100644 Source/JavaScriptCore/jit/Reg.h create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffset.cpp create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffset.h create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffsetList.h create mode 100644 Source/JavaScriptCore/jit/RegisterMap.h create mode 100644 Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp create mode 100644 Source/JavaScriptCore/jit/SetupVarargsFrame.cpp create mode 100644 Source/JavaScriptCore/jit/SetupVarargsFrame.h create mode 100644 Source/JavaScriptCore/jit/SnippetOperand.h create mode 100644 Source/JavaScriptCore/jit/SpillRegistersMode.h create mode 100644 Source/JavaScriptCore/jit/TagRegistersMode.cpp create mode 100644 Source/JavaScriptCore/jit/TagRegistersMode.h create mode 100644 Source/JavaScriptCore/llint/LLIntPCRanges.h delete mode 100644 Source/JavaScriptCore/offlineasm/sh4.rb delete mode 100644 Source/JavaScriptCore/os-win32/stdbool.h create mode 100644 Source/JavaScriptCore/parser/ModuleAnalyzer.cpp create mode 100644 Source/JavaScriptCore/parser/ModuleAnalyzer.h create mode 100644 Source/JavaScriptCore/parser/ModuleScopeData.h delete mode 100644 Source/JavaScriptCore/parser/NodeInfo.h create mode 100644 Source/JavaScriptCore/parser/NodesAnalyzeModule.cpp create mode 100644 Source/JavaScriptCore/parser/ParserFunctionInfo.h delete mode 100644 Source/JavaScriptCore/parser/SourceCode.cpp create mode 100644 Source/JavaScriptCore/parser/SourceCodeKey.h create mode 100644 Source/JavaScriptCore/parser/UnlinkedSourceCode.cpp create mode 100644 Source/JavaScriptCore/parser/UnlinkedSourceCode.h create mode 100644 Source/JavaScriptCore/parser/VariableEnvironment.cpp create mode 100644 Source/JavaScriptCore/parser/VariableEnvironment.h create mode 100755 Source/JavaScriptCore/postprocess-headers.sh delete mode 100644 Source/JavaScriptCore/profiler/CallIdentifier.h delete mode 100644 Source/JavaScriptCore/profiler/LegacyProfiler.cpp delete mode 100644 Source/JavaScriptCore/profiler/LegacyProfiler.h delete mode 100644 Source/JavaScriptCore/profiler/Profile.cpp delete mode 100644 Source/JavaScriptCore/profiler/Profile.h delete mode 100644 Source/JavaScriptCore/profiler/ProfileGenerator.cpp delete mode 100644 Source/JavaScriptCore/profiler/ProfileGenerator.h delete mode 100644 Source/JavaScriptCore/profiler/ProfileNode.cpp delete mode 100644 Source/JavaScriptCore/profiler/ProfileNode.h create mode 100644 Source/JavaScriptCore/profiler/ProfilerEvent.cpp create mode 100644 Source/JavaScriptCore/profiler/ProfilerEvent.h create mode 100644 Source/JavaScriptCore/profiler/ProfilerJettisonReason.cpp create mode 100644 Source/JavaScriptCore/profiler/ProfilerJettisonReason.h create mode 100644 Source/JavaScriptCore/profiler/ProfilerUID.cpp create mode 100644 Source/JavaScriptCore/profiler/ProfilerUID.h create mode 100644 Source/JavaScriptCore/replay/EmptyInputCursor.h create mode 100644 Source/JavaScriptCore/replay/EncodedValue.cpp create mode 100644 Source/JavaScriptCore/replay/EncodedValue.h create mode 100644 Source/JavaScriptCore/replay/InputCursor.h create mode 100644 Source/JavaScriptCore/replay/JSInputs.json create mode 100644 Source/JavaScriptCore/replay/NondeterministicInput.h create mode 100755 Source/JavaScriptCore/replay/scripts/CodeGeneratorReplayInputs.py create mode 100755 Source/JavaScriptCore/replay/scripts/CodeGeneratorReplayInputsTemplates.py create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-c-style-enum-no-storage.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-duplicate-enum-type.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-duplicate-input-names.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-duplicate-type-names.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-enum-type-missing-values.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-missing-input-member-name.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-missing-input-name.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-missing-input-queue.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-missing-type-mode.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-missing-type-name.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-unknown-input-queue.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-unknown-member-type.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/fail-on-unknown-type-mode.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-enum-encoding-helpers-with-guarded-values.json-TestReplayInputs.cpp create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-enum-encoding-helpers-with-guarded-values.json-TestReplayInputs.h create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-enum-encoding-helpers.json-TestReplayInputs.cpp create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-enum-encoding-helpers.json-TestReplayInputs.h create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-enum-with-guard.json-TestReplayInputs.cpp create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-enum-with-guard.json-TestReplayInputs.h create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-enums-with-same-base-name.json-TestReplayInputs.cpp create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-enums-with-same-base-name.json-TestReplayInputs.h create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-event-loop-shape-types.json-error create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-input-with-guard.json-TestReplayInputs.cpp create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-input-with-guard.json-TestReplayInputs.h create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-input-with-vector-members.json-TestReplayInputs.cpp create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-input-with-vector-members.json-TestReplayInputs.h create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-inputs-with-flags.json-TestReplayInputs.cpp create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-inputs-with-flags.json-TestReplayInputs.h create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-memoized-type-modes.json-TestReplayInputs.cpp create mode 100644 Source/JavaScriptCore/replay/scripts/tests/expected/generate-memoized-type-modes.json-TestReplayInputs.h create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-c-style-enum-no-storage.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-duplicate-enum-type.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-duplicate-input-names.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-duplicate-type-names.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-enum-type-missing-values.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-missing-input-member-name.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-missing-input-name.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-missing-input-queue.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-missing-type-mode.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-missing-type-name.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-unknown-input-queue.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-unknown-member-type.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/fail-on-unknown-type-mode.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/generate-enum-encoding-helpers-with-guarded-values.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/generate-enum-encoding-helpers.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/generate-enum-with-guard.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/generate-enums-with-same-base-name.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/generate-event-loop-shape-types.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/generate-input-with-guard.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/generate-input-with-vector-members.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/generate-inputs-with-flags.json create mode 100644 Source/JavaScriptCore/replay/scripts/tests/generate-memoized-type-modes.json create mode 100644 Source/JavaScriptCore/runtime/AbstractModuleRecord.cpp create mode 100644 Source/JavaScriptCore/runtime/AbstractModuleRecord.h delete mode 100644 Source/JavaScriptCore/runtime/Arguments.cpp delete mode 100644 Source/JavaScriptCore/runtime/Arguments.h delete mode 100644 Source/JavaScriptCore/runtime/ArgumentsIteratorConstructor.cpp delete mode 100644 Source/JavaScriptCore/runtime/ArgumentsIteratorConstructor.h delete mode 100644 Source/JavaScriptCore/runtime/ArgumentsIteratorPrototype.cpp delete mode 100644 Source/JavaScriptCore/runtime/ArgumentsIteratorPrototype.h create mode 100644 Source/JavaScriptCore/runtime/ArgumentsMode.h create mode 100644 Source/JavaScriptCore/runtime/ArityCheckMode.h create mode 100644 Source/JavaScriptCore/runtime/ArrayBufferSharingMode.h create mode 100644 Source/JavaScriptCore/runtime/ArrayConventions.cpp create mode 100644 Source/JavaScriptCore/runtime/ArrayIteratorAdaptiveWatchpoint.cpp create mode 100644 Source/JavaScriptCore/runtime/ArrayIteratorAdaptiveWatchpoint.h delete mode 100644 Source/JavaScriptCore/runtime/ArrayIteratorConstructor.cpp delete mode 100644 Source/JavaScriptCore/runtime/ArrayIteratorConstructor.h create mode 100644 Source/JavaScriptCore/runtime/AsyncFunctionConstructor.cpp create mode 100644 Source/JavaScriptCore/runtime/AsyncFunctionConstructor.h create mode 100644 Source/JavaScriptCore/runtime/AsyncFunctionPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/AsyncFunctionPrototype.h create mode 100644 Source/JavaScriptCore/runtime/AtomicsObject.cpp create mode 100644 Source/JavaScriptCore/runtime/AtomicsObject.h create mode 100644 Source/JavaScriptCore/runtime/AuxiliaryBarrier.h create mode 100644 Source/JavaScriptCore/runtime/AuxiliaryBarrierInlines.h create mode 100644 Source/JavaScriptCore/runtime/BasicBlockLocation.cpp create mode 100644 Source/JavaScriptCore/runtime/BasicBlockLocation.h create mode 100644 Source/JavaScriptCore/runtime/BundlePath.h create mode 100644 Source/JavaScriptCore/runtime/CatchScope.cpp create mode 100644 Source/JavaScriptCore/runtime/CatchScope.h create mode 100644 Source/JavaScriptCore/runtime/ClonedArguments.cpp create mode 100644 Source/JavaScriptCore/runtime/ClonedArguments.h delete mode 100644 Source/JavaScriptCore/runtime/ConcurrentJITLock.h create mode 100644 Source/JavaScriptCore/runtime/ConcurrentJSLock.h create mode 100644 Source/JavaScriptCore/runtime/ConsoleClient.cpp create mode 100644 Source/JavaScriptCore/runtime/ConsoleClient.h create mode 100644 Source/JavaScriptCore/runtime/ConsoleObject.cpp create mode 100644 Source/JavaScriptCore/runtime/ConsoleObject.h create mode 100644 Source/JavaScriptCore/runtime/ConsoleTypes.h create mode 100644 Source/JavaScriptCore/runtime/ConstantMode.cpp create mode 100644 Source/JavaScriptCore/runtime/ConstructAbility.h create mode 100644 Source/JavaScriptCore/runtime/ControlFlowProfiler.cpp create mode 100644 Source/JavaScriptCore/runtime/ControlFlowProfiler.h create mode 100644 Source/JavaScriptCore/runtime/CustomGetterSetter.cpp create mode 100644 Source/JavaScriptCore/runtime/CustomGetterSetter.h create mode 100644 Source/JavaScriptCore/runtime/DefinePropertyAttributes.h create mode 100644 Source/JavaScriptCore/runtime/DirectArguments.cpp create mode 100644 Source/JavaScriptCore/runtime/DirectArguments.h create mode 100644 Source/JavaScriptCore/runtime/DirectArgumentsOffset.cpp create mode 100644 Source/JavaScriptCore/runtime/DirectArgumentsOffset.h create mode 100644 Source/JavaScriptCore/runtime/DirectEvalExecutable.cpp create mode 100644 Source/JavaScriptCore/runtime/DirectEvalExecutable.h create mode 100644 Source/JavaScriptCore/runtime/ECMAScriptSpecInternalFunctions.cpp create mode 100644 Source/JavaScriptCore/runtime/ECMAScriptSpecInternalFunctions.h create mode 100644 Source/JavaScriptCore/runtime/EnumerationMode.h create mode 100644 Source/JavaScriptCore/runtime/ErrorHandlingScope.cpp create mode 100644 Source/JavaScriptCore/runtime/ErrorHandlingScope.h create mode 100644 Source/JavaScriptCore/runtime/EvalExecutable.cpp create mode 100644 Source/JavaScriptCore/runtime/EvalExecutable.h create mode 100644 Source/JavaScriptCore/runtime/Exception.cpp create mode 100644 Source/JavaScriptCore/runtime/Exception.h create mode 100644 Source/JavaScriptCore/runtime/ExceptionEventLocation.cpp create mode 100644 Source/JavaScriptCore/runtime/ExceptionEventLocation.h create mode 100644 Source/JavaScriptCore/runtime/ExceptionFuzz.cpp create mode 100644 Source/JavaScriptCore/runtime/ExceptionFuzz.h create mode 100644 Source/JavaScriptCore/runtime/ExceptionScope.cpp create mode 100644 Source/JavaScriptCore/runtime/ExceptionScope.h delete mode 100644 Source/JavaScriptCore/runtime/Executable.cpp delete mode 100644 Source/JavaScriptCore/runtime/Executable.h create mode 100644 Source/JavaScriptCore/runtime/ExecutableBase.cpp create mode 100644 Source/JavaScriptCore/runtime/ExecutableBase.h create mode 100644 Source/JavaScriptCore/runtime/FunctionExecutable.cpp create mode 100644 Source/JavaScriptCore/runtime/FunctionExecutable.h create mode 100644 Source/JavaScriptCore/runtime/FunctionHasExecutedCache.cpp create mode 100644 Source/JavaScriptCore/runtime/FunctionHasExecutedCache.h create mode 100644 Source/JavaScriptCore/runtime/FunctionRareData.cpp create mode 100644 Source/JavaScriptCore/runtime/FunctionRareData.h delete mode 100644 Source/JavaScriptCore/runtime/GCActivityCallback.cpp delete mode 100644 Source/JavaScriptCore/runtime/GCActivityCallback.h create mode 100644 Source/JavaScriptCore/runtime/GeneratorFunctionConstructor.cpp create mode 100644 Source/JavaScriptCore/runtime/GeneratorFunctionConstructor.h create mode 100644 Source/JavaScriptCore/runtime/GeneratorFunctionPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/GeneratorFunctionPrototype.h create mode 100644 Source/JavaScriptCore/runtime/GeneratorPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/GeneratorPrototype.h create mode 100644 Source/JavaScriptCore/runtime/GenericArguments.h create mode 100644 Source/JavaScriptCore/runtime/GenericArgumentsInlines.h create mode 100644 Source/JavaScriptCore/runtime/GenericOffset.h create mode 100644 Source/JavaScriptCore/runtime/GetPutInfo.h create mode 100644 Source/JavaScriptCore/runtime/HasOwnPropertyCache.h create mode 100644 Source/JavaScriptCore/runtime/HashMapImpl.cpp create mode 100644 Source/JavaScriptCore/runtime/HashMapImpl.h create mode 100644 Source/JavaScriptCore/runtime/IdentifierInlines.h create mode 100644 Source/JavaScriptCore/runtime/IndirectEvalExecutable.cpp create mode 100644 Source/JavaScriptCore/runtime/IndirectEvalExecutable.h create mode 100644 Source/JavaScriptCore/runtime/InferredType.cpp create mode 100644 Source/JavaScriptCore/runtime/InferredType.h create mode 100644 Source/JavaScriptCore/runtime/InferredTypeTable.cpp create mode 100644 Source/JavaScriptCore/runtime/InferredTypeTable.h create mode 100644 Source/JavaScriptCore/runtime/InferredValue.cpp create mode 100644 Source/JavaScriptCore/runtime/InferredValue.h create mode 100644 Source/JavaScriptCore/runtime/InspectorInstrumentationObject.cpp create mode 100644 Source/JavaScriptCore/runtime/InspectorInstrumentationObject.h delete mode 100644 Source/JavaScriptCore/runtime/IntegralTypedArrayBase.h delete mode 100644 Source/JavaScriptCore/runtime/IntendedStructureChain.cpp delete mode 100644 Source/JavaScriptCore/runtime/IntendedStructureChain.h create mode 100644 Source/JavaScriptCore/runtime/IntlCollator.cpp create mode 100644 Source/JavaScriptCore/runtime/IntlCollator.h create mode 100644 Source/JavaScriptCore/runtime/IntlCollatorConstructor.cpp create mode 100644 Source/JavaScriptCore/runtime/IntlCollatorConstructor.h create mode 100644 Source/JavaScriptCore/runtime/IntlCollatorPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/IntlCollatorPrototype.h create mode 100644 Source/JavaScriptCore/runtime/IntlDateTimeFormat.cpp create mode 100644 Source/JavaScriptCore/runtime/IntlDateTimeFormat.h create mode 100644 Source/JavaScriptCore/runtime/IntlDateTimeFormatConstructor.cpp create mode 100644 Source/JavaScriptCore/runtime/IntlDateTimeFormatConstructor.h create mode 100644 Source/JavaScriptCore/runtime/IntlDateTimeFormatPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/IntlDateTimeFormatPrototype.h create mode 100644 Source/JavaScriptCore/runtime/IntlNumberFormat.cpp create mode 100644 Source/JavaScriptCore/runtime/IntlNumberFormat.h create mode 100644 Source/JavaScriptCore/runtime/IntlNumberFormatConstructor.cpp create mode 100644 Source/JavaScriptCore/runtime/IntlNumberFormatConstructor.h create mode 100644 Source/JavaScriptCore/runtime/IntlNumberFormatPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/IntlNumberFormatPrototype.h create mode 100644 Source/JavaScriptCore/runtime/IntlObject.cpp create mode 100644 Source/JavaScriptCore/runtime/IntlObject.h create mode 100644 Source/JavaScriptCore/runtime/IntlObjectInlines.h create mode 100644 Source/JavaScriptCore/runtime/IterationKind.h create mode 100644 Source/JavaScriptCore/runtime/IterationStatus.h create mode 100644 Source/JavaScriptCore/runtime/IteratorOperations.cpp create mode 100644 Source/JavaScriptCore/runtime/IteratorOperations.h create mode 100644 Source/JavaScriptCore/runtime/IteratorPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/IteratorPrototype.h delete mode 100644 Source/JavaScriptCore/runtime/JSActivation.cpp delete mode 100644 Source/JavaScriptCore/runtime/JSActivation.h delete mode 100644 Source/JavaScriptCore/runtime/JSArgumentsIterator.cpp delete mode 100644 Source/JavaScriptCore/runtime/JSArgumentsIterator.h create mode 100644 Source/JavaScriptCore/runtime/JSArrayInlines.h delete mode 100644 Source/JavaScriptCore/runtime/JSArrayIterator.cpp delete mode 100644 Source/JavaScriptCore/runtime/JSArrayIterator.h create mode 100644 Source/JavaScriptCore/runtime/JSAsyncFunction.cpp create mode 100644 Source/JavaScriptCore/runtime/JSAsyncFunction.h create mode 100644 Source/JavaScriptCore/runtime/JSCInlines.h create mode 100644 Source/JavaScriptCore/runtime/JSCallee.cpp create mode 100644 Source/JavaScriptCore/runtime/JSCallee.h create mode 100644 Source/JavaScriptCore/runtime/JSCustomGetterSetterFunction.cpp create mode 100644 Source/JavaScriptCore/runtime/JSCustomGetterSetterFunction.h create mode 100644 Source/JavaScriptCore/runtime/JSDestructibleObjectSubspace.cpp create mode 100644 Source/JavaScriptCore/runtime/JSDestructibleObjectSubspace.h create mode 100644 Source/JavaScriptCore/runtime/JSEnvironmentRecord.cpp create mode 100644 Source/JavaScriptCore/runtime/JSEnvironmentRecord.h create mode 100644 Source/JavaScriptCore/runtime/JSFixedArray.cpp create mode 100644 Source/JavaScriptCore/runtime/JSFixedArray.h create mode 100644 Source/JavaScriptCore/runtime/JSGeneratorFunction.cpp create mode 100644 Source/JavaScriptCore/runtime/JSGeneratorFunction.h create mode 100644 Source/JavaScriptCore/runtime/JSGenericTypedArrayViewPrototypeFunctions.h create mode 100644 Source/JavaScriptCore/runtime/JSGlobalLexicalEnvironment.cpp create mode 100644 Source/JavaScriptCore/runtime/JSGlobalLexicalEnvironment.h create mode 100644 Source/JavaScriptCore/runtime/JSGlobalObjectDebuggable.cpp create mode 100644 Source/JavaScriptCore/runtime/JSGlobalObjectDebuggable.h create mode 100644 Source/JavaScriptCore/runtime/JSGlobalObjectInlines.h create mode 100644 Source/JavaScriptCore/runtime/JSInternalPromise.cpp create mode 100644 Source/JavaScriptCore/runtime/JSInternalPromise.h create mode 100644 Source/JavaScriptCore/runtime/JSInternalPromiseConstructor.cpp create mode 100644 Source/JavaScriptCore/runtime/JSInternalPromiseConstructor.h create mode 100644 Source/JavaScriptCore/runtime/JSInternalPromiseDeferred.cpp create mode 100644 Source/JavaScriptCore/runtime/JSInternalPromiseDeferred.h create mode 100644 Source/JavaScriptCore/runtime/JSInternalPromisePrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/JSInternalPromisePrototype.h create mode 100644 Source/JavaScriptCore/runtime/JSJob.cpp create mode 100644 Source/JavaScriptCore/runtime/JSJob.h create mode 100644 Source/JavaScriptCore/runtime/JSLexicalEnvironment.cpp create mode 100644 Source/JavaScriptCore/runtime/JSLexicalEnvironment.h create mode 100644 Source/JavaScriptCore/runtime/JSModuleEnvironment.cpp create mode 100644 Source/JavaScriptCore/runtime/JSModuleEnvironment.h create mode 100644 Source/JavaScriptCore/runtime/JSModuleLoader.cpp create mode 100644 Source/JavaScriptCore/runtime/JSModuleLoader.h create mode 100644 Source/JavaScriptCore/runtime/JSModuleNamespaceObject.cpp create mode 100644 Source/JavaScriptCore/runtime/JSModuleNamespaceObject.h create mode 100644 Source/JavaScriptCore/runtime/JSModuleRecord.cpp create mode 100644 Source/JavaScriptCore/runtime/JSModuleRecord.h delete mode 100644 Source/JavaScriptCore/runtime/JSNameScope.cpp delete mode 100644 Source/JavaScriptCore/runtime/JSNameScope.h create mode 100644 Source/JavaScriptCore/runtime/JSNativeStdFunction.cpp create mode 100644 Source/JavaScriptCore/runtime/JSNativeStdFunction.h delete mode 100644 Source/JavaScriptCore/runtime/JSNotAnObject.cpp delete mode 100644 Source/JavaScriptCore/runtime/JSNotAnObject.h create mode 100644 Source/JavaScriptCore/runtime/JSObjectInlines.h delete mode 100644 Source/JavaScriptCore/runtime/JSPromiseFunctions.cpp delete mode 100644 Source/JavaScriptCore/runtime/JSPromiseFunctions.h delete mode 100644 Source/JavaScriptCore/runtime/JSPromiseReaction.cpp delete mode 100644 Source/JavaScriptCore/runtime/JSPromiseReaction.h create mode 100644 Source/JavaScriptCore/runtime/JSPropertyNameEnumerator.cpp create mode 100644 Source/JavaScriptCore/runtime/JSPropertyNameEnumerator.h create mode 100644 Source/JavaScriptCore/runtime/JSScriptFetcher.cpp create mode 100644 Source/JavaScriptCore/runtime/JSScriptFetcher.h create mode 100644 Source/JavaScriptCore/runtime/JSSegmentedVariableObjectSubspace.cpp create mode 100644 Source/JavaScriptCore/runtime/JSSegmentedVariableObjectSubspace.h create mode 100644 Source/JavaScriptCore/runtime/JSSourceCode.cpp create mode 100644 Source/JavaScriptCore/runtime/JSSourceCode.h create mode 100644 Source/JavaScriptCore/runtime/JSStringInlines.h create mode 100644 Source/JavaScriptCore/runtime/JSStringIterator.cpp create mode 100644 Source/JavaScriptCore/runtime/JSStringIterator.h create mode 100644 Source/JavaScriptCore/runtime/JSStringSubspace.cpp create mode 100644 Source/JavaScriptCore/runtime/JSStringSubspace.h create mode 100644 Source/JavaScriptCore/runtime/JSTemplateRegistryKey.cpp create mode 100644 Source/JavaScriptCore/runtime/JSTemplateRegistryKey.h create mode 100644 Source/JavaScriptCore/runtime/JSTypedArrayViewConstructor.cpp create mode 100644 Source/JavaScriptCore/runtime/JSTypedArrayViewConstructor.h create mode 100644 Source/JavaScriptCore/runtime/JSTypedArrayViewPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/JSTypedArrayViewPrototype.h delete mode 100644 Source/JavaScriptCore/runtime/JSVariableObject.cpp delete mode 100644 Source/JavaScriptCore/runtime/JSVariableObject.h create mode 100644 Source/JavaScriptCore/runtime/JSWeakSet.cpp create mode 100644 Source/JavaScriptCore/runtime/JSWeakSet.h create mode 100644 Source/JavaScriptCore/runtime/LazyClassStructure.cpp create mode 100644 Source/JavaScriptCore/runtime/LazyClassStructure.h create mode 100644 Source/JavaScriptCore/runtime/LazyClassStructureInlines.h create mode 100644 Source/JavaScriptCore/runtime/LazyProperty.h create mode 100644 Source/JavaScriptCore/runtime/LazyPropertyInlines.h create mode 100644 Source/JavaScriptCore/runtime/MapBase.cpp create mode 100644 Source/JavaScriptCore/runtime/MapBase.h delete mode 100644 Source/JavaScriptCore/runtime/MapData.cpp delete mode 100644 Source/JavaScriptCore/runtime/MapData.h delete mode 100644 Source/JavaScriptCore/runtime/MapIteratorConstructor.cpp delete mode 100644 Source/JavaScriptCore/runtime/MapIteratorConstructor.h create mode 100644 Source/JavaScriptCore/runtime/MatchResult.cpp create mode 100644 Source/JavaScriptCore/runtime/MathCommon.cpp create mode 100644 Source/JavaScriptCore/runtime/MathCommon.h create mode 100644 Source/JavaScriptCore/runtime/MemoryStatistics.cpp create mode 100644 Source/JavaScriptCore/runtime/ModuleLoaderPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/ModuleLoaderPrototype.h create mode 100644 Source/JavaScriptCore/runtime/ModuleProgramExecutable.cpp create mode 100644 Source/JavaScriptCore/runtime/ModuleProgramExecutable.h delete mode 100644 Source/JavaScriptCore/runtime/NameConstructor.cpp delete mode 100644 Source/JavaScriptCore/runtime/NameConstructor.h delete mode 100644 Source/JavaScriptCore/runtime/NameInstance.cpp delete mode 100644 Source/JavaScriptCore/runtime/NameInstance.h delete mode 100644 Source/JavaScriptCore/runtime/NamePrototype.cpp delete mode 100644 Source/JavaScriptCore/runtime/NamePrototype.h create mode 100644 Source/JavaScriptCore/runtime/NativeExecutable.cpp create mode 100644 Source/JavaScriptCore/runtime/NativeExecutable.h create mode 100644 Source/JavaScriptCore/runtime/NativeStdFunctionCell.cpp create mode 100644 Source/JavaScriptCore/runtime/NativeStdFunctionCell.h create mode 100644 Source/JavaScriptCore/runtime/NullGetterFunction.cpp create mode 100644 Source/JavaScriptCore/runtime/NullGetterFunction.h create mode 100644 Source/JavaScriptCore/runtime/NullSetterFunction.cpp create mode 100644 Source/JavaScriptCore/runtime/NullSetterFunction.h create mode 100644 Source/JavaScriptCore/runtime/ParseInt.h create mode 100644 Source/JavaScriptCore/runtime/ProgramExecutable.cpp create mode 100644 Source/JavaScriptCore/runtime/ProgramExecutable.h delete mode 100644 Source/JavaScriptCore/runtime/PropertyNameArray.cpp create mode 100644 Source/JavaScriptCore/runtime/PrototypeMapInlines.h create mode 100644 Source/JavaScriptCore/runtime/ProxyConstructor.cpp create mode 100644 Source/JavaScriptCore/runtime/ProxyConstructor.h create mode 100644 Source/JavaScriptCore/runtime/ProxyObject.cpp create mode 100644 Source/JavaScriptCore/runtime/ProxyObject.h create mode 100644 Source/JavaScriptCore/runtime/ProxyRevoke.cpp create mode 100644 Source/JavaScriptCore/runtime/ProxyRevoke.h create mode 100644 Source/JavaScriptCore/runtime/PureNaN.h create mode 100644 Source/JavaScriptCore/runtime/ReflectObject.cpp create mode 100644 Source/JavaScriptCore/runtime/ReflectObject.h create mode 100644 Source/JavaScriptCore/runtime/RegExpInlines.h create mode 100644 Source/JavaScriptCore/runtime/RegExpObjectInlines.h delete mode 100644 Source/JavaScriptCore/runtime/Reject.h create mode 100644 Source/JavaScriptCore/runtime/RuntimeFlags.h create mode 100644 Source/JavaScriptCore/runtime/RuntimeType.cpp create mode 100644 Source/JavaScriptCore/runtime/RuntimeType.h create mode 100644 Source/JavaScriptCore/runtime/SamplingProfiler.cpp create mode 100644 Source/JavaScriptCore/runtime/SamplingProfiler.h create mode 100644 Source/JavaScriptCore/runtime/ScopeOffset.cpp create mode 100644 Source/JavaScriptCore/runtime/ScopeOffset.h create mode 100644 Source/JavaScriptCore/runtime/ScopedArguments.cpp create mode 100644 Source/JavaScriptCore/runtime/ScopedArguments.h create mode 100644 Source/JavaScriptCore/runtime/ScopedArgumentsTable.cpp create mode 100644 Source/JavaScriptCore/runtime/ScopedArgumentsTable.h create mode 100644 Source/JavaScriptCore/runtime/ScriptExecutable.cpp create mode 100644 Source/JavaScriptCore/runtime/ScriptExecutable.h create mode 100644 Source/JavaScriptCore/runtime/ScriptFetcher.h delete mode 100644 Source/JavaScriptCore/runtime/SetIteratorConstructor.cpp delete mode 100644 Source/JavaScriptCore/runtime/SetIteratorConstructor.h create mode 100644 Source/JavaScriptCore/runtime/SlowPathReturnType.h create mode 100644 Source/JavaScriptCore/runtime/SourceOrigin.h create mode 100644 Source/JavaScriptCore/runtime/StackFrame.cpp create mode 100644 Source/JavaScriptCore/runtime/StackFrame.h create mode 100644 Source/JavaScriptCore/runtime/StringIteratorPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/StringIteratorPrototype.h create mode 100644 Source/JavaScriptCore/runtime/StructureIDBlob.h create mode 100644 Source/JavaScriptCore/runtime/StructureIDTable.cpp create mode 100644 Source/JavaScriptCore/runtime/StructureIDTable.h create mode 100644 Source/JavaScriptCore/runtime/Symbol.cpp create mode 100644 Source/JavaScriptCore/runtime/Symbol.h create mode 100644 Source/JavaScriptCore/runtime/SymbolConstructor.cpp create mode 100644 Source/JavaScriptCore/runtime/SymbolConstructor.h create mode 100644 Source/JavaScriptCore/runtime/SymbolObject.cpp create mode 100644 Source/JavaScriptCore/runtime/SymbolObject.h create mode 100644 Source/JavaScriptCore/runtime/SymbolPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/SymbolPrototype.h create mode 100644 Source/JavaScriptCore/runtime/TemplateRegistry.cpp create mode 100644 Source/JavaScriptCore/runtime/TemplateRegistry.h create mode 100644 Source/JavaScriptCore/runtime/TemplateRegistryKey.cpp create mode 100644 Source/JavaScriptCore/runtime/TemplateRegistryKey.h create mode 100644 Source/JavaScriptCore/runtime/TemplateRegistryKeyTable.cpp create mode 100644 Source/JavaScriptCore/runtime/TemplateRegistryKeyTable.h create mode 100644 Source/JavaScriptCore/runtime/ThrowScope.cpp create mode 100644 Source/JavaScriptCore/runtime/ThrowScope.h delete mode 100644 Source/JavaScriptCore/runtime/Tracing.h create mode 100644 Source/JavaScriptCore/runtime/TypeError.h create mode 100644 Source/JavaScriptCore/runtime/TypeLocationCache.cpp create mode 100644 Source/JavaScriptCore/runtime/TypeLocationCache.h create mode 100644 Source/JavaScriptCore/runtime/TypeProfiler.cpp create mode 100644 Source/JavaScriptCore/runtime/TypeProfiler.h create mode 100644 Source/JavaScriptCore/runtime/TypeProfilerLog.cpp create mode 100644 Source/JavaScriptCore/runtime/TypeProfilerLog.h create mode 100644 Source/JavaScriptCore/runtime/TypeSet.cpp create mode 100644 Source/JavaScriptCore/runtime/TypeSet.h delete mode 100644 Source/JavaScriptCore/runtime/TypedArrayBase.h create mode 100644 Source/JavaScriptCore/runtime/TypeofType.cpp create mode 100644 Source/JavaScriptCore/runtime/TypeofType.h create mode 100644 Source/JavaScriptCore/runtime/VMInlines.h create mode 100644 Source/JavaScriptCore/runtime/VarOffset.cpp create mode 100644 Source/JavaScriptCore/runtime/VarOffset.h delete mode 100644 Source/JavaScriptCore/runtime/WatchdogNone.cpp create mode 100644 Source/JavaScriptCore/runtime/WeakGCMapInlines.h delete mode 100644 Source/JavaScriptCore/runtime/WeakRandom.h create mode 100644 Source/JavaScriptCore/runtime/WeakSetConstructor.cpp create mode 100644 Source/JavaScriptCore/runtime/WeakSetConstructor.h create mode 100644 Source/JavaScriptCore/runtime/WeakSetPrototype.cpp create mode 100644 Source/JavaScriptCore/runtime/WeakSetPrototype.h create mode 100644 Source/JavaScriptCore/runtime/WriteBarrierInlines.h create mode 100644 Source/JavaScriptCore/shell/CMakeLists.txt create mode 100644 Source/JavaScriptCore/shell/DLLLauncherMain.cpp create mode 100644 Source/JavaScriptCore/shell/PlatformGTK.cmake create mode 100644 Source/JavaScriptCore/testRegExp.cpp create mode 100644 Source/JavaScriptCore/tested-symbols.symlst create mode 100644 Source/JavaScriptCore/tools/FunctionOverrides.cpp create mode 100644 Source/JavaScriptCore/tools/FunctionOverrides.h create mode 100644 Source/JavaScriptCore/tools/FunctionWhitelist.cpp create mode 100644 Source/JavaScriptCore/tools/FunctionWhitelist.h create mode 100644 Source/JavaScriptCore/tools/JSDollarVM.cpp create mode 100644 Source/JavaScriptCore/tools/JSDollarVM.h create mode 100644 Source/JavaScriptCore/tools/JSDollarVMPrototype.cpp create mode 100644 Source/JavaScriptCore/tools/JSDollarVMPrototype.h create mode 100644 Source/JavaScriptCore/tools/SigillCrashAnalyzer.cpp create mode 100644 Source/JavaScriptCore/tools/SigillCrashAnalyzer.h create mode 100644 Source/JavaScriptCore/tools/VMInspector.cpp create mode 100644 Source/JavaScriptCore/tools/VMInspector.h create mode 100644 Source/JavaScriptCore/ucd/CaseFolding.txt create mode 100644 Source/JavaScriptCore/wasm/JSWebAssembly.cpp create mode 100644 Source/JavaScriptCore/wasm/JSWebAssembly.h create mode 100644 Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmB3IRGenerator.h create mode 100644 Source/JavaScriptCore/wasm/WasmBinding.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmBinding.h create mode 100644 Source/JavaScriptCore/wasm/WasmCallingConvention.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmCallingConvention.h create mode 100644 Source/JavaScriptCore/wasm/WasmExceptionType.h create mode 100644 Source/JavaScriptCore/wasm/WasmFormat.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmFormat.h create mode 100644 Source/JavaScriptCore/wasm/WasmFunctionParser.h create mode 100644 Source/JavaScriptCore/wasm/WasmMemory.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmMemory.h create mode 100644 Source/JavaScriptCore/wasm/WasmMemoryInformation.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmMemoryInformation.h create mode 100644 Source/JavaScriptCore/wasm/WasmModuleParser.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmModuleParser.h create mode 100644 Source/JavaScriptCore/wasm/WasmPageCount.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmPageCount.h create mode 100644 Source/JavaScriptCore/wasm/WasmParser.h create mode 100644 Source/JavaScriptCore/wasm/WasmPlan.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmPlan.h create mode 100644 Source/JavaScriptCore/wasm/WasmSections.h create mode 100644 Source/JavaScriptCore/wasm/WasmSignature.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmSignature.h create mode 100644 Source/JavaScriptCore/wasm/WasmValidate.cpp create mode 100644 Source/JavaScriptCore/wasm/WasmValidate.h create mode 100755 Source/JavaScriptCore/wasm/generateWasm.py create mode 100755 Source/JavaScriptCore/wasm/generateWasmB3IRGeneratorInlinesHeader.py create mode 100755 Source/JavaScriptCore/wasm/generateWasmOpsHeader.py create mode 100755 Source/JavaScriptCore/wasm/generateWasmValidateInlinesHeader.py create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyCallee.cpp create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyCallee.h create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyCompileError.cpp create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyCompileError.h create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyHelpers.h create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyInstance.cpp create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyInstance.h create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyLinkError.cpp create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyLinkError.h create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyMemory.cpp create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyMemory.h create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyModule.cpp create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyModule.h create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyRuntimeError.cpp create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyRuntimeError.h create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyTable.cpp create mode 100644 Source/JavaScriptCore/wasm/js/JSWebAssemblyTable.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyCompileErrorConstructor.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyCompileErrorConstructor.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyCompileErrorPrototype.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyCompileErrorPrototype.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyFunction.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyFunction.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyFunctionCell.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyFunctionCell.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyInstanceConstructor.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyInstanceConstructor.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyInstancePrototype.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyInstancePrototype.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyLinkErrorConstructor.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyLinkErrorConstructor.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyLinkErrorPrototype.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyLinkErrorPrototype.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyMemoryConstructor.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyMemoryConstructor.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyMemoryPrototype.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyMemoryPrototype.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyModuleConstructor.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyModuleConstructor.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyModulePrototype.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyModulePrototype.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyModuleRecord.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyModuleRecord.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyPrototype.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyPrototype.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyRuntimeErrorConstructor.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyRuntimeErrorConstructor.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyRuntimeErrorPrototype.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyRuntimeErrorPrototype.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyTableConstructor.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyTableConstructor.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyTablePrototype.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyTablePrototype.h create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyToJSCallee.cpp create mode 100644 Source/JavaScriptCore/wasm/js/WebAssemblyToJSCallee.h create mode 100644 Source/JavaScriptCore/wasm/wasm.json create mode 100644 Source/JavaScriptCore/yarr/YarrCanonicalize.h delete mode 100644 Source/JavaScriptCore/yarr/YarrCanonicalizeUCS2.h create mode 100644 Source/JavaScriptCore/yarr/YarrCanonicalizeUCS2.js (limited to 'Source/JavaScriptCore') diff --git a/Source/JavaScriptCore/API/APICallbackFunction.h b/Source/JavaScriptCore/API/APICallbackFunction.h index 65c519b7a..e5283b5b4 100644 --- a/Source/JavaScriptCore/API/APICallbackFunction.h +++ b/Source/JavaScriptCore/API/APICallbackFunction.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,9 +27,9 @@ #define APICallbackFunction_h #include "APICast.h" -#include "APIShims.h" #include "Error.h" #include "JSCallbackConstructor.h" +#include "JSLock.h" #include namespace JSC { @@ -44,9 +44,11 @@ template static EncodedJSValue JSC_HOST_CALL construct(ExecState*); template EncodedJSValue JSC_HOST_CALL APICallbackFunction::call(ExecState* exec) { + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); JSContextRef execRef = toRef(exec); - JSObjectRef functionRef = toRef(exec->callee()); - JSObjectRef thisObjRef = toRef(jsCast(exec->hostThisValue().toThis(exec, NotStrictMode))); + JSObjectRef functionRef = toRef(exec->jsCallee()); + JSObjectRef thisObjRef = toRef(jsCast(exec->thisValue().toThis(exec, NotStrictMode))); int argumentCount = static_cast(exec->argumentCount()); Vector arguments; @@ -57,11 +59,11 @@ EncodedJSValue JSC_HOST_CALL APICallbackFunction::call(ExecState* exec) JSValueRef exception = 0; JSValueRef result; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); result = jsCast(toJS(functionRef))->functionCallback()(execRef, functionRef, thisObjRef, argumentCount, arguments.data(), &exception); } if (exception) - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); // result must be a valid JSValue. if (!result) @@ -73,7 +75,9 @@ EncodedJSValue JSC_HOST_CALL APICallbackFunction::call(ExecState* exec) template EncodedJSValue JSC_HOST_CALL APICallbackFunction::construct(ExecState* exec) { - JSObject* constructor = exec->callee(); + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSObject* constructor = exec->jsCallee(); JSContextRef ctx = toRef(exec); JSObjectRef constructorRef = toRef(constructor); @@ -88,16 +92,16 @@ EncodedJSValue JSC_HOST_CALL APICallbackFunction::construct(ExecState* exec) JSValueRef exception = 0; JSObjectRef result; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); result = callback(ctx, constructorRef, argumentCount, arguments.data(), &exception); } if (exception) { - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); return JSValue::encode(toJS(exec, exception)); } // result must be a valid JSValue. if (!result) - return throwVMTypeError(exec); + return throwVMTypeError(exec, scope); return JSValue::encode(toJS(result)); } diff --git a/Source/JavaScriptCore/API/APICast.h b/Source/JavaScriptCore/API/APICast.h index 6526d8907..8fe8d6034 100644 --- a/Source/JavaScriptCore/API/APICast.h +++ b/Source/JavaScriptCore/API/APICast.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -124,6 +124,7 @@ inline JSC::VM* toJS(JSContextGroupRef g) inline JSValueRef toRef(JSC::ExecState* exec, JSC::JSValue v) { + ASSERT(exec->vm().currentThreadIsHoldingAPILock()); #if USE(JSVALUE32_64) if (!v) return 0; diff --git a/Source/JavaScriptCore/API/APIShims.h b/Source/JavaScriptCore/API/APIShims.h deleted file mode 100644 index a133b8ed4..000000000 --- a/Source/JavaScriptCore/API/APIShims.h +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (C) 2009 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef APIShims_h -#define APIShims_h - -#include "CallFrame.h" -#include "GCActivityCallback.h" -#include "IncrementalSweeper.h" -#include "JSLock.h" -#include - -namespace JSC { - -class APIEntryShimWithoutLock { -protected: - APIEntryShimWithoutLock(VM* vm, bool registerThread) - : m_vm(vm) - , m_entryIdentifierTable(wtfThreadData().setCurrentIdentifierTable(vm->identifierTable)) - { - if (registerThread) - vm->heap.machineThreads().addCurrentThread(); - } - - ~APIEntryShimWithoutLock() - { - wtfThreadData().setCurrentIdentifierTable(m_entryIdentifierTable); - } - -protected: - RefPtr m_vm; - IdentifierTable* m_entryIdentifierTable; -}; - -class APIEntryShim : public APIEntryShimWithoutLock { -public: - // Normal API entry - APIEntryShim(ExecState* exec, bool registerThread = true) - : APIEntryShimWithoutLock(&exec->vm(), registerThread) - , m_lockHolder(exec->vm().exclusiveThread ? 0 : exec) - { - } - - // JSPropertyNameAccumulator only has a vm. - APIEntryShim(VM* vm, bool registerThread = true) - : APIEntryShimWithoutLock(vm, registerThread) - , m_lockHolder(vm->exclusiveThread ? 0 : vm) - { - } - - ~APIEntryShim() - { - // Destroying our JSLockHolder should also destroy the VM. - m_vm.clear(); - } - -private: - JSLockHolder m_lockHolder; -}; - -class APICallbackShim { -public: - APICallbackShim(ExecState* exec) - : m_dropAllLocks(shouldDropAllLocks(exec->vm()) ? exec : nullptr) - , m_vm(&exec->vm()) - { - wtfThreadData().resetCurrentIdentifierTable(); - } - - APICallbackShim(VM& vm) - : m_dropAllLocks(shouldDropAllLocks(vm) ? &vm : nullptr) - , m_vm(&vm) - { - wtfThreadData().resetCurrentIdentifierTable(); - } - - ~APICallbackShim() - { - wtfThreadData().setCurrentIdentifierTable(m_vm->identifierTable); - } - -private: - static bool shouldDropAllLocks(VM& vm) - { - if (vm.exclusiveThread) - return false; - - // If the VM is in the middle of being destroyed then we don't want to resurrect it - // by allowing DropAllLocks to ref it. By this point the APILock has already been - // released anyways, so it doesn't matter that DropAllLocks is a no-op. - if (!vm.refCount()) - return false; - - return true; - } - - JSLock::DropAllLocks m_dropAllLocks; - VM* m_vm; -}; - -} - -#endif diff --git a/Source/JavaScriptCore/API/APIUtils.h b/Source/JavaScriptCore/API/APIUtils.h new file mode 100644 index 000000000..e2190c860 --- /dev/null +++ b/Source/JavaScriptCore/API/APIUtils.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef APIUtils_h +#define APIUtils_h + +#include "Exception.h" +#include "JSCJSValue.h" +#include "JSGlobalObjectInspectorController.h" +#include "JSValueRef.h" + +enum class ExceptionStatus { + DidThrow, + DidNotThrow +}; + +inline ExceptionStatus handleExceptionIfNeeded(JSC::ExecState* exec, JSValueRef* returnedExceptionRef) +{ + JSC::VM& vm = exec->vm(); + auto scope = DECLARE_CATCH_SCOPE(vm); + if (UNLIKELY(scope.exception())) { + JSC::Exception* exception = scope.exception(); + if (returnedExceptionRef) + *returnedExceptionRef = toRef(exec, exception->value()); + scope.clearException(); +#if ENABLE(REMOTE_INSPECTOR) + exec->vmEntryGlobalObject()->inspectorController().reportAPIException(exec, exception); +#endif + return ExceptionStatus::DidThrow; + } + return ExceptionStatus::DidNotThrow; +} + +inline void setException(JSC::ExecState* exec, JSValueRef* returnedExceptionRef, JSC::JSValue exception) +{ + if (returnedExceptionRef) + *returnedExceptionRef = toRef(exec, exception); +#if ENABLE(REMOTE_INSPECTOR) + exec->vmEntryGlobalObject()->inspectorController().reportAPIException(exec, JSC::Exception::create(exec->vm(), exception)); +#endif +} + +#endif /* APIUtils_h */ diff --git a/Source/JavaScriptCore/API/JSAPIWrapperObject.h b/Source/JavaScriptCore/API/JSAPIWrapperObject.h index 909039771..14194b6f9 100644 --- a/Source/JavaScriptCore/API/JSAPIWrapperObject.h +++ b/Source/JavaScriptCore/API/JSAPIWrapperObject.h @@ -45,8 +45,6 @@ public: void setWrappedObject(void*); protected: - static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; - JSAPIWrapperObject(VM&, Structure*); private: diff --git a/Source/JavaScriptCore/API/JSBase.cpp b/Source/JavaScriptCore/API/JSBase.cpp index 506561573..983acce8a 100644 --- a/Source/JavaScriptCore/API/JSBase.cpp +++ b/Source/JavaScriptCore/API/JSBase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006, 2007, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2006, 2007, 2013, 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -28,18 +28,23 @@ #include "JSBasePrivate.h" #include "APICast.h" -#include "APIShims.h" #include "CallFrame.h" #include "Completion.h" +#include "Exception.h" +#include "GCActivityCallback.h" #include "InitializeThreading.h" #include "JSGlobalObject.h" #include "JSLock.h" #include "JSObject.h" #include "OpaqueJSString.h" -#include "Operations.h" +#include "JSCInlines.h" #include "SourceCode.h" #include +#if ENABLE(REMOTE_INSPECTOR) +#include "JSGlobalObjectInspectorController.h" +#endif + using namespace JSC; JSValueRef JSEvaluateScript(JSContextRef ctx, JSStringRef script, JSObjectRef thisObject, JSStringRef sourceURL, int startingLineNumber, JSValueRef* exception) @@ -49,7 +54,7 @@ JSValueRef JSEvaluateScript(JSContextRef ctx, JSStringRef script, JSObjectRef th return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSObject* jsThisObject = toJS(thisObject); @@ -57,14 +62,23 @@ JSValueRef JSEvaluateScript(JSContextRef ctx, JSStringRef script, JSObjectRef th // evaluate sets "this" to the global object if it is NULL JSGlobalObject* globalObject = exec->vmEntryGlobalObject(); - SourceCode source = makeSource(script->string(), sourceURL->string(), TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber::first())); + auto sourceURLString = sourceURL ? sourceURL->string() : String(); + SourceCode source = makeSource(script->string(), SourceOrigin { sourceURLString }, sourceURLString, TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber())); - JSValue evaluationException; - JSValue returnValue = evaluate(globalObject->globalExec(), source, jsThisObject, &evaluationException); + NakedPtr evaluationException; + JSValue returnValue = profiledEvaluate(globalObject->globalExec(), ProfilingReason::API, source, jsThisObject, evaluationException); if (evaluationException) { if (exception) - *exception = toRef(exec, evaluationException); + *exception = toRef(exec, evaluationException->value()); +#if ENABLE(REMOTE_INSPECTOR) + // FIXME: If we have a debugger attached we could learn about ParseError exceptions through + // ScriptDebugServer::sourceParsed and this path could produce a duplicate warning. The + // Debugger path is currently ignored by inspector. + // NOTE: If we don't have a debugger, this SourceCode will be forever lost to the inspector. + // We could stash it in the inspector in case an inspector is ever opened. + globalObject->inspectorController().reportAPIException(exec, evaluationException); +#endif return 0; } @@ -82,11 +96,12 @@ bool JSCheckScriptSyntax(JSContextRef ctx, JSStringRef script, JSStringRef sourc return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); startingLineNumber = std::max(1, startingLineNumber); - SourceCode source = makeSource(script->string(), sourceURL->string(), TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber::first())); + auto sourceURLString = sourceURL ? sourceURL->string() : String(); + SourceCode source = makeSource(script->string(), SourceOrigin { sourceURLString }, sourceURLString, TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber())); JSValue syntaxException; bool isValidSyntax = checkSyntax(exec->vmEntryGlobalObject()->globalExec(), source, &syntaxException); @@ -94,6 +109,10 @@ bool JSCheckScriptSyntax(JSContextRef ctx, JSStringRef script, JSStringRef sourc if (!isValidSyntax) { if (exception) *exception = toRef(exec, syntaxException); +#if ENABLE(REMOTE_INSPECTOR) + Exception* exception = Exception::create(exec->vm(), syntaxException); + exec->vmEntryGlobalObject()->inspectorController().reportAPIException(exec, exception); +#endif return false; } @@ -111,7 +130,7 @@ void JSGarbageCollect(JSContextRef ctx) return; ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec, false); + JSLockHolder locker(exec); exec->vm().heap.reportAbandonedObjectGraph(); } @@ -123,11 +142,13 @@ void JSReportExtraMemoryCost(JSContextRef ctx, size_t size) return; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); - exec->vm().heap.reportExtraMemoryCost(size); + JSLockHolder locker(exec); + + exec->vm().heap.deprecatedReportExtraMemory(size); } extern "C" JS_EXPORT void JSSynchronousGarbageCollectForDebugging(JSContextRef); +extern "C" JS_EXPORT void JSSynchronousEdenCollectForDebugging(JSContextRef); void JSSynchronousGarbageCollectForDebugging(JSContextRef ctx) { @@ -135,10 +156,20 @@ void JSSynchronousGarbageCollectForDebugging(JSContextRef ctx) return; ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); exec->vm().heap.collectAllGarbage(); } +void JSSynchronousEdenCollectForDebugging(JSContextRef ctx) +{ + if (!ctx) + return; + + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + exec->vm().heap.collectSync(CollectionScope::Eden); +} + void JSDisableGCTimer(void) { GCActivityCallback::s_shouldCreateGCTimer = false; diff --git a/Source/JavaScriptCore/API/JSBase.h b/Source/JavaScriptCore/API/JSBase.h index 153d359d4..677dff168 100644 --- a/Source/JavaScriptCore/API/JSBase.h +++ b/Source/JavaScriptCore/API/JSBase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -57,6 +57,8 @@ typedef struct OpaqueJSPropertyNameArray* JSPropertyNameArrayRef; /*! @typedef JSPropertyNameAccumulatorRef An ordered set used to collect the names of a JavaScript object's properties. */ typedef struct OpaqueJSPropertyNameAccumulator* JSPropertyNameAccumulatorRef; +/*! @typedef JSTypedArrayBytesDeallocator A function used to deallocate bytes passed to a Typed Array constructor. The function should take two arguments. The first is a pointer to the bytes that were originally passed to the Typed Array constructor. The second is a pointer to additional information desired at the time the bytes are to be freed. */ +typedef void (*JSTypedArrayBytesDeallocator)(void* bytes, void* deallocatorContext); /* JavaScript data types */ @@ -84,11 +86,6 @@ typedef struct OpaqueJSValue* JSObjectRef; #define JS_EXPORT #endif /* defined(JS_NO_EXPORT) */ -/* JS tests uses WTF but has no config.h, so we need to set the export defines here. */ -#ifndef WTF_EXPORT_PRIVATE -#define WTF_EXPORT_PRIVATE JS_EXPORT -#endif - #ifdef __cplusplus extern "C" { #endif @@ -101,7 +98,7 @@ extern "C" { @param ctx The execution context to use. @param script A JSString containing the script to evaluate. @param thisObject The object to use as "this," or NULL to use the global object as "this." -@param sourceURL A JSString containing a URL for the script's source file. This is only used when reporting exceptions. Pass NULL if you do not care to include source file information in exceptions. +@param sourceURL A JSString containing a URL for the script's source file. This is used by debuggers and when reporting exceptions. Pass NULL if you do not care to include source file information. @param startingLineNumber An integer value specifying the script's starting line number in the file located at sourceURL. This is only used when reporting exceptions. The value is one-based, so the first line is line 1 and invalid values are clamped to 1. @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. @result The JSValue that results from evaluating script, or NULL if an exception is thrown. @@ -141,10 +138,10 @@ JS_EXPORT void JSGarbageCollect(JSContextRef ctx); /* Enable the Objective-C API for platforms with a modern runtime. */ #if !defined(JSC_OBJC_API_ENABLED) -#ifndef JSC_OBJC_API_AVAILABLE_MAC_OS_X_1080 -#define JSC_OBJC_API_ENABLED (defined(__clang__) && defined(__APPLE__) && ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090 && !defined(__i386__)) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE))) +#if (defined(__clang__) && defined(__APPLE__) && ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && !defined(__i386__)) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE))) +#define JSC_OBJC_API_ENABLED 1 #else -#define JSC_OBJC_API_ENABLED (defined(__clang__) && defined(__APPLE__) && ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080 && !defined(__i386__)) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE))) +#define JSC_OBJC_API_ENABLED 0 #endif #endif diff --git a/Source/JavaScriptCore/API/JSBasePrivate.h b/Source/JavaScriptCore/API/JSBasePrivate.h index 133176e1c..137594972 100644 --- a/Source/JavaScriptCore/API/JSBasePrivate.h +++ b/Source/JavaScriptCore/API/JSBasePrivate.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2008 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR diff --git a/Source/JavaScriptCore/API/JSCTestRunnerUtils.cpp b/Source/JavaScriptCore/API/JSCTestRunnerUtils.cpp index 2e93ac114..d314c5d48 100644 --- a/Source/JavaScriptCore/API/JSCTestRunnerUtils.cpp +++ b/Source/JavaScriptCore/API/JSCTestRunnerUtils.cpp @@ -27,22 +27,39 @@ #include "JSCTestRunnerUtils.h" #include "APICast.h" -#include "Operations.h" +#include "JSCInlines.h" #include "TestRunnerUtils.h" namespace JSC { + +JSValueRef failNextNewCodeBlock(JSContextRef context) +{ + ExecState* exec= toJS(context); + JSLockHolder holder(exec); + return toRef(exec, failNextNewCodeBlock(exec)); +} + JSValueRef numberOfDFGCompiles(JSContextRef context, JSValueRef theFunctionValueRef) { ExecState* exec= toJS(context); + JSLockHolder holder(exec); return toRef(exec, numberOfDFGCompiles(toJS(exec, theFunctionValueRef))); } JSValueRef setNeverInline(JSContextRef context, JSValueRef theFunctionValueRef) { ExecState* exec= toJS(context); + JSLockHolder holder(exec); return toRef(exec, setNeverInline(toJS(exec, theFunctionValueRef))); } +JSValueRef setNeverOptimize(JSContextRef context, JSValueRef theFunctionValueRef) +{ + ExecState* exec= toJS(context); + JSLockHolder holder(exec); + return toRef(exec, setNeverOptimize(toJS(exec, theFunctionValueRef))); +} + } // namespace JSC diff --git a/Source/JavaScriptCore/API/JSCTestRunnerUtils.h b/Source/JavaScriptCore/API/JSCTestRunnerUtils.h index aaecdd5c9..c52da524b 100644 --- a/Source/JavaScriptCore/API/JSCTestRunnerUtils.h +++ b/Source/JavaScriptCore/API/JSCTestRunnerUtils.h @@ -31,8 +31,10 @@ namespace JSC { +JS_EXPORT_PRIVATE JSValueRef failNextNewCodeBlock(JSContextRef); JS_EXPORT_PRIVATE JSValueRef numberOfDFGCompiles(JSContextRef, JSValueRef theFunction); JS_EXPORT_PRIVATE JSValueRef setNeverInline(JSContextRef, JSValueRef theFunction); +JS_EXPORT_PRIVATE JSValueRef setNeverOptimize(JSContextRef, JSValueRef theFunction); } // namespace JSC diff --git a/Source/JavaScriptCore/API/JSCallbackConstructor.cpp b/Source/JavaScriptCore/API/JSCallbackConstructor.cpp index 8ea97a447..6bda427f5 100644 --- a/Source/JavaScriptCore/API/JSCallbackConstructor.cpp +++ b/Source/JavaScriptCore/API/JSCallbackConstructor.cpp @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -28,17 +28,15 @@ #include "APICallbackFunction.h" #include "APICast.h" -#include "APIShims.h" #include "Error.h" #include "JSGlobalObject.h" #include "JSLock.h" #include "ObjectPrototype.h" -#include "Operations.h" -#include +#include "JSCInlines.h" namespace JSC { -const ClassInfo JSCallbackConstructor::s_info = { "CallbackConstructor", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(JSCallbackConstructor) }; +const ClassInfo JSCallbackConstructor::s_info = { "CallbackConstructor", &Base::s_info, 0, CREATE_METHOD_TABLE(JSCallbackConstructor) }; JSCallbackConstructor::JSCallbackConstructor(JSGlobalObject* globalObject, Structure* structure, JSClassRef jsClass, JSObjectCallAsConstructorCallback callback) : JSDestructibleObject(globalObject->vm(), structure) @@ -50,7 +48,7 @@ JSCallbackConstructor::JSCallbackConstructor(JSGlobalObject* globalObject, Struc void JSCallbackConstructor::finishCreation(JSGlobalObject* globalObject, JSClassRef jsClass) { Base::finishCreation(globalObject->vm()); - ASSERT(inherits(info())); + ASSERT(inherits(*vm(), info())); if (m_class) JSClassRetain(jsClass); } @@ -69,7 +67,7 @@ void JSCallbackConstructor::destroy(JSCell* cell) ConstructType JSCallbackConstructor::getConstructData(JSCell*, ConstructData& constructData) { constructData.native.function = APICallbackFunction::construct; - return ConstructTypeHost; + return ConstructType::Host; } } // namespace JSC diff --git a/Source/JavaScriptCore/API/JSCallbackConstructor.h b/Source/JavaScriptCore/API/JSCallbackConstructor.h index 7eedb52e4..d730ad779 100644 --- a/Source/JavaScriptCore/API/JSCallbackConstructor.h +++ b/Source/JavaScriptCore/API/JSCallbackConstructor.h @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -34,6 +34,7 @@ namespace JSC { class JSCallbackConstructor : public JSDestructibleObject { public: typedef JSDestructibleObject Base; + static const unsigned StructureFlags = Base::StructureFlags | ImplementsHasInstance | ImplementsDefaultHasInstance; static JSCallbackConstructor* create(ExecState* exec, JSGlobalObject* globalObject, Structure* structure, JSClassRef classRef, JSObjectCallAsConstructorCallback callback) { @@ -56,7 +57,6 @@ public: protected: JSCallbackConstructor(JSGlobalObject*, Structure*, JSClassRef, JSObjectCallAsConstructorCallback); void finishCreation(JSGlobalObject*, JSClassRef); - static const unsigned StructureFlags = ImplementsHasInstance | JSObject::StructureFlags; private: friend struct APICallbackFunction; diff --git a/Source/JavaScriptCore/API/JSCallbackFunction.cpp b/Source/JavaScriptCore/API/JSCallbackFunction.cpp index 1996991f7..f8fd5d522 100644 --- a/Source/JavaScriptCore/API/JSCallbackFunction.cpp +++ b/Source/JavaScriptCore/API/JSCallbackFunction.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2006, 2008, 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -28,7 +28,6 @@ #include "APICallbackFunction.h" #include "APICast.h" -#include "APIShims.h" #include "CodeBlock.h" #include "Error.h" #include "ExceptionHelpers.h" @@ -36,14 +35,13 @@ #include "JSFunction.h" #include "JSGlobalObject.h" #include "JSLock.h" -#include "Operations.h" -#include +#include "JSCInlines.h" namespace JSC { STATIC_ASSERT_IS_TRIVIALLY_DESTRUCTIBLE(JSCallbackFunction); -const ClassInfo JSCallbackFunction::s_info = { "CallbackFunction", &InternalFunction::s_info, 0, 0, CREATE_METHOD_TABLE(JSCallbackFunction) }; +const ClassInfo JSCallbackFunction::s_info = { "CallbackFunction", &InternalFunction::s_info, 0, CREATE_METHOD_TABLE(JSCallbackFunction) }; JSCallbackFunction::JSCallbackFunction(VM& vm, Structure* structure, JSObjectCallAsFunctionCallback callback) : InternalFunction(vm, structure) @@ -54,12 +52,13 @@ JSCallbackFunction::JSCallbackFunction(VM& vm, Structure* structure, JSObjectCal void JSCallbackFunction::finishCreation(VM& vm, const String& name) { Base::finishCreation(vm, name); - ASSERT(inherits(info())); + ASSERT(inherits(vm, info())); } JSCallbackFunction* JSCallbackFunction::create(VM& vm, JSGlobalObject* globalObject, JSObjectCallAsFunctionCallback callback, const String& name) { - JSCallbackFunction* function = new (NotNull, allocateCell(vm.heap)) JSCallbackFunction(vm, globalObject->callbackFunctionStructure(), callback); + Structure* structure = globalObject->callbackFunctionStructure(); + JSCallbackFunction* function = new (NotNull, allocateCell(vm.heap)) JSCallbackFunction(vm, structure, callback); function->finishCreation(vm, name); return function; } @@ -67,7 +66,7 @@ JSCallbackFunction* JSCallbackFunction::create(VM& vm, JSGlobalObject* globalObj CallType JSCallbackFunction::getCallData(JSCell*, CallData& callData) { callData.native.function = APICallbackFunction::call; - return CallTypeHost; + return CallType::Host; } } // namespace JSC diff --git a/Source/JavaScriptCore/API/JSCallbackFunction.h b/Source/JavaScriptCore/API/JSCallbackFunction.h index dff18de56..a4fdd068f 100644 --- a/Source/JavaScriptCore/API/JSCallbackFunction.h +++ b/Source/JavaScriptCore/API/JSCallbackFunction.h @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR diff --git a/Source/JavaScriptCore/API/JSCallbackObject.cpp b/Source/JavaScriptCore/API/JSCallbackObject.cpp index 94713da36..dee87dbfb 100644 --- a/Source/JavaScriptCore/API/JSCallbackObject.cpp +++ b/Source/JavaScriptCore/API/JSCallbackObject.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006-2017 Apple Inc. All rights reserved. * Copyright (C) 2007 Eric Seidel * * Redistribution and use in source and binary forms, with or without @@ -11,10 +11,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -28,14 +28,14 @@ #include "JSCallbackObject.h" #include "Heap.h" -#include "Operations.h" +#include "JSCInlines.h" #include namespace JSC { // Define the two types of JSCallbackObjects we support. -template <> const ClassInfo JSCallbackObject::s_info = { "CallbackObject", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(JSCallbackObject) }; -template <> const ClassInfo JSCallbackObject::s_info = { "CallbackGlobalObject", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(JSCallbackObject) }; +template <> const ClassInfo JSCallbackObject::s_info = { "CallbackObject", &Base::s_info, 0, CREATE_METHOD_TABLE(JSCallbackObject) }; +template <> const ClassInfo JSCallbackObject::s_info = { "CallbackGlobalObject", &Base::s_info, 0, CREATE_METHOD_TABLE(JSCallbackObject) }; template<> const bool JSCallbackObject::needsDestruction = true; template<> const bool JSCallbackObject::needsDestruction = false; @@ -45,7 +45,6 @@ JSCallbackObject* JSCallbackObject::create(VM& v { JSCallbackObject* callbackObject = new (NotNull, allocateCell>(vm.heap)) JSCallbackObject(vm, classRef, structure); callbackObject->finishCreation(vm); - vm.heap.addFinalizer(callbackObject, destroy); return callbackObject; } @@ -61,15 +60,4 @@ Structure* JSCallbackObject::createStructure(VM& vm, JSGlobalObj return Structure::create(vm, globalObject, proto, TypeInfo(GlobalObjectType, StructureFlags), info()); } -void JSCallbackObjectData::finalize(Handle handle, void* context) -{ - JSClassRef jsClass = static_cast(context); - JSObjectRef thisRef = toRef(static_cast(handle.get().asCell())); - - for (; jsClass; jsClass = jsClass->parentClass) - if (JSObjectFinalizeCallback finalize = jsClass->finalize) - finalize(thisRef); - WeakSet::deallocate(WeakImpl::asWeakImpl(handle.slot())); -} - } // namespace JSC diff --git a/Source/JavaScriptCore/API/JSCallbackObject.h b/Source/JavaScriptCore/API/JSCallbackObject.h index 3f58906d9..43749e258 100644 --- a/Source/JavaScriptCore/API/JSCallbackObject.h +++ b/Source/JavaScriptCore/API/JSCallbackObject.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006, 2007, 2008, 2010 Apple Inc. All rights reserved. + * Copyright (C) 2006-2016 Apple Inc. All rights reserved. * Copyright (C) 2007 Eric Seidel * * Redistribution and use in source and binary forms, with or without @@ -11,10 +11,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -30,11 +30,12 @@ #include "JSObjectRef.h" #include "JSValueRef.h" #include "JSObject.h" -#include namespace JSC { -struct JSCallbackObjectData : WeakHandleOwner { +struct JSCallbackObjectData { + WTF_MAKE_FAST_ALLOCATED; +public: JSCallbackObjectData(void* privateData, JSClassRef jsClass) : privateData(privateData) , jsClass(jsClass) @@ -42,7 +43,7 @@ struct JSCallbackObjectData : WeakHandleOwner { JSClassRetain(jsClass); } - virtual ~JSCallbackObjectData() + ~JSCallbackObjectData() { JSClassRelease(jsClass); } @@ -57,7 +58,7 @@ struct JSCallbackObjectData : WeakHandleOwner { void setPrivateProperty(VM& vm, JSCell* owner, const Identifier& propertyName, JSValue value) { if (!m_privateProperties) - m_privateProperties = adoptPtr(new JSPrivatePropertyMap); + m_privateProperties = std::make_unique(); m_privateProperties->setPrivateProperty(vm, owner, propertyName, value); } @@ -70,14 +71,17 @@ struct JSCallbackObjectData : WeakHandleOwner { void visitChildren(SlotVisitor& visitor) { - if (!m_privateProperties) + JSPrivatePropertyMap* properties = m_privateProperties.get(); + if (!properties) return; - m_privateProperties->visitChildren(visitor); + properties->visitChildren(visitor); } void* privateData; JSClassRef jsClass; struct JSPrivatePropertyMap { + WTF_MAKE_FAST_ALLOCATED; + public: JSValue getPrivateProperty(const Identifier& propertyName) const { PrivatePropertyMap::const_iterator location = m_propertyMap.find(propertyName.impl()); @@ -88,29 +92,32 @@ struct JSCallbackObjectData : WeakHandleOwner { void setPrivateProperty(VM& vm, JSCell* owner, const Identifier& propertyName, JSValue value) { + LockHolder locker(m_lock); WriteBarrier empty; m_propertyMap.add(propertyName.impl(), empty).iterator->value.set(vm, owner, value); } void deletePrivateProperty(const Identifier& propertyName) { + LockHolder locker(m_lock); m_propertyMap.remove(propertyName.impl()); } void visitChildren(SlotVisitor& visitor) { - for (PrivatePropertyMap::iterator ptr = m_propertyMap.begin(); ptr != m_propertyMap.end(); ++ptr) { - if (ptr->value) - visitor.append(&ptr->value); + LockHolder locker(m_lock); + for (auto& pair : m_propertyMap) { + if (pair.value) + visitor.append(pair.value); } } private: - typedef HashMap, WriteBarrier, IdentifierRepHash> PrivatePropertyMap; + typedef HashMap, WriteBarrier, IdentifierRepHash> PrivatePropertyMap; PrivatePropertyMap m_propertyMap; + Lock m_lock; }; - OwnPtr m_privateProperties; - virtual void finalize(Handle, void*) override; + std::unique_ptr m_privateProperties; }; @@ -125,6 +132,9 @@ protected: public: typedef Parent Base; + static const unsigned StructureFlags = Base::StructureFlags | ProhibitsPropertyCaching | OverridesGetOwnPropertySlot | InterceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero | ImplementsHasInstance | OverridesGetPropertyNames | TypeOfShouldCallGetCallData; + + ~JSCallbackObject(); static JSCallbackObject* create(ExecState* exec, JSGlobalObject* globalObject, Structure* structure, JSClassRef classRef, void* data) { @@ -144,7 +154,19 @@ public: void setPrivate(void* data); void* getPrivate(); + // FIXME: We should fix the warnings for extern-template in JSObject template classes: https://bugs.webkit.org/show_bug.cgi?id=161979 +#if COMPILER(CLANG) +#if __has_warning("-Wundefined-var-template") +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wundefined-var-template" +#endif +#endif DECLARE_INFO; +#if COMPILER(CLANG) +#if __has_warning("-Wundefined-var-template") +#pragma clang diagnostic pop +#endif +#endif JSClassRef classRef() const { return m_callbackObjectData->jsClass; } bool inherits(JSClassRef) const; @@ -168,9 +190,6 @@ public: using Parent::methodTable; -protected: - static const unsigned StructureFlags = ProhibitsPropertyCaching | OverridesGetOwnPropertySlot | InterceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero | ImplementsHasInstance | OverridesHasInstance | OverridesVisitChildren | OverridesGetPropertyNames | Parent::StructureFlags; - private: static String className(const JSObject*); @@ -179,8 +198,8 @@ private: static bool getOwnPropertySlot(JSObject*, ExecState*, PropertyName, PropertySlot&); static bool getOwnPropertySlotByIndex(JSObject*, ExecState*, unsigned propertyName, PropertySlot&); - static void put(JSCell*, ExecState*, PropertyName, JSValue, PutPropertySlot&); - static void putByIndex(JSCell*, ExecState*, unsigned, JSValue, bool shouldThrow); + static bool put(JSCell*, ExecState*, PropertyName, JSValue, PutPropertySlot&); + static bool putByIndex(JSCell*, ExecState*, unsigned, JSValue, bool shouldThrow); static bool deleteProperty(JSCell*, ExecState*, PropertyName); static bool deletePropertyByIndex(JSCell*, ExecState*, unsigned); @@ -196,8 +215,6 @@ private: { JSCallbackObject* thisObject = jsCast(cell); ASSERT_GC_OBJECT_INHERITS((static_cast(thisObject)), JSCallbackObject::info()); - COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag); - ASSERT(thisObject->Parent::structure()->typeInfo().overridesVisitChildren()); Parent::visitChildren(thisObject, visitor); thisObject->m_callbackObjectData->visitChildren(visitor); } @@ -211,10 +228,11 @@ private: static EncodedJSValue JSC_HOST_CALL construct(ExecState*); JSValue getStaticValue(ExecState*, PropertyName); - static EncodedJSValue staticFunctionGetter(ExecState*, EncodedJSValue, EncodedJSValue, PropertyName); - static EncodedJSValue callbackGetter(ExecState*, EncodedJSValue, EncodedJSValue, PropertyName); + static EncodedJSValue staticFunctionGetter(ExecState*, EncodedJSValue, PropertyName); + static EncodedJSValue callbackGetter(ExecState*, EncodedJSValue, PropertyName); - OwnPtr m_callbackObjectData; + std::unique_ptr m_callbackObjectData; + const ClassInfo* m_classInfo; }; } // namespace JSC diff --git a/Source/JavaScriptCore/API/JSCallbackObjectFunctions.h b/Source/JavaScriptCore/API/JSCallbackObjectFunctions.h index 5be053f1e..ee3ee2f31 100644 --- a/Source/JavaScriptCore/API/JSCallbackObjectFunctions.h +++ b/Source/JavaScriptCore/API/JSCallbackObjectFunctions.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2006, 2008, 2016 Apple Inc. All rights reserved. * Copyright (C) 2007 Eric Seidel * * Redistribution and use in source and binary forms, with or without @@ -11,10 +11,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -24,7 +24,6 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include "APIShims.h" #include "APICast.h" #include "Error.h" #include "ExceptionHelpers.h" @@ -45,21 +44,22 @@ namespace JSC { template inline JSCallbackObject* JSCallbackObject::asCallbackObject(JSValue value) { - ASSERT(asObject(value)->inherits(info())); + ASSERT(asObject(value)->inherits(*value.getObject()->vm(), info())); return jsCast(asObject(value)); } template -inline JSCallbackObject* JSCallbackObject::asCallbackObject(EncodedJSValue value) +inline JSCallbackObject* JSCallbackObject::asCallbackObject(EncodedJSValue encodedValue) { - ASSERT(asObject(JSValue::decode(value))->inherits(info())); - return jsCast(asObject(JSValue::decode(value))); + JSValue value = JSValue::decode(encodedValue); + ASSERT(asObject(value)->inherits(*value.getObject()->vm(), info())); + return jsCast(asObject(value)); } template JSCallbackObject::JSCallbackObject(ExecState* exec, Structure* structure, JSClassRef jsClass, void* data) : Parent(exec->vm(), structure) - , m_callbackObjectData(adoptPtr(new JSCallbackObjectData(data, jsClass))) + , m_callbackObjectData(std::make_unique(data, jsClass)) { } @@ -68,15 +68,32 @@ JSCallbackObject::JSCallbackObject(ExecState* exec, Structure* structure template JSCallbackObject::JSCallbackObject(VM& vm, JSClassRef jsClass, Structure* structure) : Parent(vm, structure) - , m_callbackObjectData(adoptPtr(new JSCallbackObjectData(0, jsClass))) + , m_callbackObjectData(std::make_unique(nullptr, jsClass)) { } +template +JSCallbackObject::~JSCallbackObject() +{ + VM* vm = this->HeapCell::vm(); + vm->currentlyDestructingCallbackObject = this; + ASSERT(m_classInfo); + vm->currentlyDestructingCallbackObjectClassInfo = m_classInfo; + JSObjectRef thisRef = toRef(static_cast(this)); + for (JSClassRef jsClass = classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectFinalizeCallback finalize = jsClass->finalize) + finalize(thisRef); + } + vm->currentlyDestructingCallbackObject = nullptr; + vm->currentlyDestructingCallbackObjectClassInfo = nullptr; +} + template void JSCallbackObject::finishCreation(ExecState* exec) { - Base::finishCreation(exec->vm()); - ASSERT(Parent::inherits(info())); + VM& vm = exec->vm(); + Base::finishCreation(vm); + ASSERT(Parent::inherits(vm, info())); init(exec); } @@ -84,7 +101,7 @@ void JSCallbackObject::finishCreation(ExecState* exec) template void JSCallbackObject::finishCreation(VM& vm) { - ASSERT(Parent::inherits(info())); + ASSERT(Parent::inherits(vm, info())); ASSERT(Parent::isGlobalObject()); Base::finishCreation(vm); init(jsCast(this)->globalExec()); @@ -104,17 +121,12 @@ void JSCallbackObject::init(ExecState* exec) // initialize from base to derived for (int i = static_cast(initRoutines.size()) - 1; i >= 0; i--) { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); JSObjectInitializeCallback initialize = initRoutines[i]; initialize(toRef(exec), toRef(this)); } - - for (JSClassRef jsClassPtr = classRef(); jsClassPtr; jsClassPtr = jsClassPtr->parentClass) { - if (jsClassPtr->finalize) { - WeakSet::allocate(this, m_callbackObjectData.get(), classRef()); - break; - } - } + + m_classInfo = this->classInfo(); } template @@ -131,18 +143,21 @@ String JSCallbackObject::className(const JSObject* object) template bool JSCallbackObject::getOwnPropertySlot(JSObject* object, ExecState* exec, PropertyName propertyName, PropertySlot& slot) { + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSCallbackObject* thisObject = jsCast(object); JSContextRef ctx = toRef(exec); JSObjectRef thisRef = toRef(thisObject); RefPtr propertyNameRef; - if (StringImpl* name = propertyName.publicName()) { + if (StringImpl* name = propertyName.uid()) { for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { // optional optimization to bypass getProperty in cases when we only need to know if the property exists if (JSObjectHasPropertyCallback hasProperty = jsClass->hasProperty) { if (!propertyNameRef) propertyNameRef = OpaqueJSString::create(name); - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); if (hasProperty(ctx, thisRef, propertyNameRef.get())) { slot.setCustom(thisObject, ReadOnly | DontEnum, callbackGetter); return true; @@ -153,11 +168,11 @@ bool JSCallbackObject::getOwnPropertySlot(JSObject* object, ExecState* e JSValueRef exception = 0; JSValueRef value; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); value = getProperty(ctx, thisRef, propertyNameRef.get(), &exception); } if (exception) { - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); slot.setValue(thisObject, ReadOnly | DontEnum, jsUndefined()); return true; } @@ -198,6 +213,9 @@ bool JSCallbackObject::getOwnPropertySlotByIndex(JSObject* object, ExecS template JSValue JSCallbackObject::defaultValue(const JSObject* object, ExecState* exec, PreferredPrimitiveType hint) { + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + const JSCallbackObject* thisObject = jsCast(object); JSContextRef ctx = toRef(exec); JSObjectRef thisRef = toRef(thisObject); @@ -208,7 +226,7 @@ JSValue JSCallbackObject::defaultValue(const JSObject* object, ExecState JSValueRef exception = 0; JSValueRef result = convertToType(ctx, thisRef, jsHint, &exception); if (exception) { - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); return jsUndefined(); } if (result) @@ -220,15 +238,18 @@ JSValue JSCallbackObject::defaultValue(const JSObject* object, ExecState } template -void JSCallbackObject::put(JSCell* cell, ExecState* exec, PropertyName propertyName, JSValue value, PutPropertySlot& slot) +bool JSCallbackObject::put(JSCell* cell, ExecState* exec, PropertyName propertyName, JSValue value, PutPropertySlot& slot) { + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSCallbackObject* thisObject = jsCast(cell); JSContextRef ctx = toRef(exec); JSObjectRef thisRef = toRef(thisObject); RefPtr propertyNameRef; JSValueRef valueRef = toRef(exec, value); - if (StringImpl* name = propertyName.publicName()) { + if (StringImpl* name = propertyName.uid()) { for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { if (JSObjectSetPropertyCallback setProperty = jsClass->setProperty) { if (!propertyNameRef) @@ -236,40 +257,42 @@ void JSCallbackObject::put(JSCell* cell, ExecState* exec, PropertyName p JSValueRef exception = 0; bool result; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); result = setProperty(ctx, thisRef, propertyNameRef.get(), valueRef, &exception); } if (exception) - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); if (result || exception) - return; + return result; } if (OpaqueJSClassStaticValuesTable* staticValues = jsClass->staticValues(exec)) { if (StaticValueEntry* entry = staticValues->get(name)) { if (entry->attributes & kJSPropertyAttributeReadOnly) - return; + return false; if (JSObjectSetPropertyCallback setProperty = entry->setProperty) { JSValueRef exception = 0; bool result; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); result = setProperty(ctx, thisRef, entry->propertyNameRef.get(), valueRef, &exception); } if (exception) - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); if (result || exception) - return; + return result; } } } if (OpaqueJSClassStaticFunctionsTable* staticFunctions = jsClass->staticFunctions(exec)) { if (StaticFunctionEntry* entry = staticFunctions->get(name)) { + PropertySlot getSlot(thisObject, PropertySlot::InternalMethodType::VMInquiry); + if (Parent::getOwnPropertySlot(thisObject, exec, propertyName, getSlot)) + return Parent::put(thisObject, exec, propertyName, value, slot); if (entry->attributes & kJSPropertyAttributeReadOnly) - return; - thisObject->JSCallbackObject::putDirect(exec->vm(), propertyName, value); // put as override property - return; + return false; + return thisObject->JSCallbackObject::putDirect(vm, propertyName, value); // put as override property } } } @@ -279,8 +302,11 @@ void JSCallbackObject::put(JSCell* cell, ExecState* exec, PropertyName p } template -void JSCallbackObject::putByIndex(JSCell* cell, ExecState* exec, unsigned propertyIndex, JSValue value, bool shouldThrow) +bool JSCallbackObject::putByIndex(JSCell* cell, ExecState* exec, unsigned propertyIndex, JSValue value, bool shouldThrow) { + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSCallbackObject* thisObject = jsCast(cell); JSContextRef ctx = toRef(exec); JSObjectRef thisRef = toRef(thisObject); @@ -295,30 +321,30 @@ void JSCallbackObject::putByIndex(JSCell* cell, ExecState* exec, unsigne JSValueRef exception = 0; bool result; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); result = setProperty(ctx, thisRef, propertyNameRef.get(), valueRef, &exception); } if (exception) - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); if (result || exception) - return; + return result; } if (OpaqueJSClassStaticValuesTable* staticValues = jsClass->staticValues(exec)) { if (StaticValueEntry* entry = staticValues->get(propertyName.impl())) { if (entry->attributes & kJSPropertyAttributeReadOnly) - return; + return false; if (JSObjectSetPropertyCallback setProperty = entry->setProperty) { JSValueRef exception = 0; bool result; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); result = setProperty(ctx, thisRef, entry->propertyNameRef.get(), valueRef, &exception); } if (exception) - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); if (result || exception) - return; + return result; } } } @@ -326,7 +352,7 @@ void JSCallbackObject::putByIndex(JSCell* cell, ExecState* exec, unsigne if (OpaqueJSClassStaticFunctionsTable* staticFunctions = jsClass->staticFunctions(exec)) { if (StaticFunctionEntry* entry = staticFunctions->get(propertyName.impl())) { if (entry->attributes & kJSPropertyAttributeReadOnly) - return; + return false; break; } } @@ -338,12 +364,15 @@ void JSCallbackObject::putByIndex(JSCell* cell, ExecState* exec, unsigne template bool JSCallbackObject::deleteProperty(JSCell* cell, ExecState* exec, PropertyName propertyName) { + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSCallbackObject* thisObject = jsCast(cell); JSContextRef ctx = toRef(exec); JSObjectRef thisRef = toRef(thisObject); RefPtr propertyNameRef; - if (StringImpl* name = propertyName.publicName()) { + if (StringImpl* name = propertyName.uid()) { for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { if (JSObjectDeletePropertyCallback deleteProperty = jsClass->deleteProperty) { if (!propertyNameRef) @@ -351,11 +380,11 @@ bool JSCallbackObject::deleteProperty(JSCell* cell, ExecState* exec, Pro JSValueRef exception = 0; bool result; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); result = deleteProperty(ctx, thisRef, propertyNameRef.get(), &exception); } if (exception) - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); if (result || exception) return true; } @@ -395,16 +424,19 @@ ConstructType JSCallbackObject::getConstructData(JSCell* cell, Construct for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { if (jsClass->callAsConstructor) { constructData.native.function = construct; - return ConstructTypeHost; + return ConstructType::Host; } } - return ConstructTypeNone; + return ConstructType::None; } template EncodedJSValue JSCallbackObject::construct(ExecState* exec) { - JSObject* constructor = exec->callee(); + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSObject* constructor = exec->jsCallee(); JSContextRef execRef = toRef(exec); JSObjectRef constructorRef = toRef(constructor); @@ -418,11 +450,11 @@ EncodedJSValue JSCallbackObject::construct(ExecState* exec) JSValueRef exception = 0; JSObject* result; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); result = toJS(callAsConstructor(execRef, constructorRef, argumentCount, arguments.data(), &exception)); } if (exception) - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); return JSValue::encode(result); } } @@ -434,6 +466,9 @@ EncodedJSValue JSCallbackObject::construct(ExecState* exec) template bool JSCallbackObject::customHasInstance(JSObject* object, ExecState* exec, JSValue value) { + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSCallbackObject* thisObject = jsCast(object); JSContextRef execRef = toRef(exec); JSObjectRef thisRef = toRef(thisObject); @@ -444,11 +479,11 @@ bool JSCallbackObject::customHasInstance(JSObject* object, ExecState* ex JSValueRef exception = 0; bool result; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); result = hasInstance(execRef, thisRef, valueRef, &exception); } if (exception) - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); return result; } } @@ -462,18 +497,21 @@ CallType JSCallbackObject::getCallData(JSCell* cell, CallData& callData) for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { if (jsClass->callAsFunction) { callData.native.function = call; - return CallTypeHost; + return CallType::Host; } } - return CallTypeNone; + return CallType::None; } template EncodedJSValue JSCallbackObject::call(ExecState* exec) { + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSContextRef execRef = toRef(exec); - JSObjectRef functionRef = toRef(exec->callee()); - JSObjectRef thisObjRef = toRef(jsCast(exec->hostThisValue().toThis(exec, NotStrictMode))); + JSObjectRef functionRef = toRef(exec->jsCallee()); + JSObjectRef thisObjRef = toRef(jsCast(exec->thisValue().toThis(exec, NotStrictMode))); for (JSClassRef jsClass = jsCast*>(toJS(functionRef))->classRef(); jsClass; jsClass = jsClass->parentClass) { if (JSObjectCallAsFunctionCallback callAsFunction = jsClass->callAsFunction) { @@ -485,11 +523,11 @@ EncodedJSValue JSCallbackObject::call(ExecState* exec) JSValueRef exception = 0; JSValue result; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); result = toJS(exec, callAsFunction(execRef, functionRef, thisObjRef, argumentCount, arguments.data(), &exception)); } if (exception) - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); return JSValue::encode(result); } } @@ -507,7 +545,7 @@ void JSCallbackObject::getOwnNonIndexPropertyNames(JSObject* object, Exe for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { if (JSObjectGetPropertyNamesCallback getPropertyNames = jsClass->getPropertyNames) { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); getPropertyNames(execRef, thisRef, toRef(&propertyNames)); } @@ -517,8 +555,10 @@ void JSCallbackObject::getOwnNonIndexPropertyNames(JSObject* object, Exe for (iterator it = staticValues->begin(); it != end; ++it) { StringImpl* name = it->key.get(); StaticValueEntry* entry = it->value.get(); - if (entry->getProperty && (!(entry->attributes & kJSPropertyAttributeDontEnum) || (mode == IncludeDontEnumProperties))) - propertyNames.add(Identifier(exec, name)); + if (entry->getProperty && (!(entry->attributes & kJSPropertyAttributeDontEnum) || mode.includeDontEnumProperties())) { + ASSERT(!name->isSymbol()); + propertyNames.add(Identifier::fromString(exec, String(name))); + } } } @@ -528,8 +568,10 @@ void JSCallbackObject::getOwnNonIndexPropertyNames(JSObject* object, Exe for (iterator it = staticFunctions->begin(); it != end; ++it) { StringImpl* name = it->key.get(); StaticFunctionEntry* entry = it->value.get(); - if (!(entry->attributes & kJSPropertyAttributeDontEnum) || (mode == IncludeDontEnumProperties)) - propertyNames.add(Identifier(exec, name)); + if (!(entry->attributes & kJSPropertyAttributeDontEnum) || mode.includeDontEnumProperties()) { + ASSERT(!name->isSymbol()); + propertyNames.add(Identifier::fromString(exec, String(name))); + } } } } @@ -562,9 +604,12 @@ bool JSCallbackObject::inherits(JSClassRef c) const template JSValue JSCallbackObject::getStaticValue(ExecState* exec, PropertyName propertyName) { + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSObjectRef thisRef = toRef(this); - if (StringImpl* name = propertyName.publicName()) { + if (StringImpl* name = propertyName.uid()) { for (JSClassRef jsClass = classRef(); jsClass; jsClass = jsClass->parentClass) { if (OpaqueJSClassStaticValuesTable* staticValues = jsClass->staticValues(exec)) { if (StaticValueEntry* entry = staticValues->get(name)) { @@ -572,11 +617,11 @@ JSValue JSCallbackObject::getStaticValue(ExecState* exec, PropertyName p JSValueRef exception = 0; JSValueRef value; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); value = getProperty(toRef(exec), thisRef, entry->propertyNameRef.get(), &exception); } if (exception) { - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); return jsUndefined(); } if (value) @@ -591,21 +636,23 @@ JSValue JSCallbackObject::getStaticValue(ExecState* exec, PropertyName p } template -EncodedJSValue JSCallbackObject::staticFunctionGetter(ExecState* exec, EncodedJSValue slotParent, EncodedJSValue, PropertyName propertyName) +EncodedJSValue JSCallbackObject::staticFunctionGetter(ExecState* exec, EncodedJSValue thisValue, PropertyName propertyName) { - JSCallbackObject* thisObj = asCallbackObject(slotParent); + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSCallbackObject* thisObj = asCallbackObject(thisValue); // Check for cached or override property. - PropertySlot slot2(thisObj); + PropertySlot slot2(thisObj, PropertySlot::InternalMethodType::VMInquiry); if (Parent::getOwnPropertySlot(thisObj, exec, propertyName, slot2)) return JSValue::encode(slot2.getValue(exec, propertyName)); - if (StringImpl* name = propertyName.publicName()) { + if (StringImpl* name = propertyName.uid()) { for (JSClassRef jsClass = thisObj->classRef(); jsClass; jsClass = jsClass->parentClass) { if (OpaqueJSClassStaticFunctionsTable* staticFunctions = jsClass->staticFunctions(exec)) { if (StaticFunctionEntry* entry = staticFunctions->get(name)) { if (JSObjectCallAsFunctionCallback callAsFunction = entry->callAsFunction) { - VM& vm = exec->vm(); JSObject* o = JSCallbackFunction::create(vm, thisObj->globalObject(), callAsFunction, name); thisObj->putDirect(vm, propertyName, o, entry->attributes); return JSValue::encode(o); @@ -615,18 +662,21 @@ EncodedJSValue JSCallbackObject::staticFunctionGetter(ExecState* exec, E } } - return JSValue::encode(exec->vm().throwException(exec, createReferenceError(exec, ASCIILiteral("Static function property defined with NULL callAsFunction callback.")))); + return JSValue::encode(throwException(exec, scope, createReferenceError(exec, ASCIILiteral("Static function property defined with NULL callAsFunction callback.")))); } template -EncodedJSValue JSCallbackObject::callbackGetter(ExecState* exec, EncodedJSValue slotParent, EncodedJSValue, PropertyName propertyName) +EncodedJSValue JSCallbackObject::callbackGetter(ExecState* exec, EncodedJSValue thisValue, PropertyName propertyName) { - JSCallbackObject* thisObj = asCallbackObject(slotParent); + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSCallbackObject* thisObj = asCallbackObject(thisValue); JSObjectRef thisRef = toRef(thisObj); RefPtr propertyNameRef; - if (StringImpl* name = propertyName.publicName()) { + if (StringImpl* name = propertyName.uid()) { for (JSClassRef jsClass = thisObj->classRef(); jsClass; jsClass = jsClass->parentClass) { if (JSObjectGetPropertyCallback getProperty = jsClass->getProperty) { if (!propertyNameRef) @@ -634,11 +684,11 @@ EncodedJSValue JSCallbackObject::callbackGetter(ExecState* exec, Encoded JSValueRef exception = 0; JSValueRef value; { - APICallbackShim callbackShim(exec); + JSLock::DropAllLocks dropAllLocks(exec); value = getProperty(toRef(exec), thisRef, propertyNameRef.get(), &exception); } if (exception) { - exec->vm().throwException(exec, toJS(exec, exception)); + throwException(exec, scope, toJS(exec, exception)); return JSValue::encode(jsUndefined()); } if (value) @@ -647,7 +697,7 @@ EncodedJSValue JSCallbackObject::callbackGetter(ExecState* exec, Encoded } } - return JSValue::encode(exec->vm().throwException(exec, createReferenceError(exec, ASCIILiteral("hasProperty callback returned true for a property that doesn't exist.")))); + return JSValue::encode(throwException(exec, scope, createReferenceError(exec, ASCIILiteral("hasProperty callback returned true for a property that doesn't exist.")))); } } // namespace JSC diff --git a/Source/JavaScriptCore/API/JSClassRef.cpp b/Source/JavaScriptCore/API/JSClassRef.cpp index 544c359b1..eb525f138 100644 --- a/Source/JavaScriptCore/API/JSClassRef.cpp +++ b/Source/JavaScriptCore/API/JSClassRef.cpp @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -33,7 +33,7 @@ #include "JSGlobalObject.h" #include "JSObjectRef.h" #include "ObjectPrototype.h" -#include "Operations.h" +#include "JSCInlines.h" #include #include @@ -62,7 +62,7 @@ OpaqueJSClass::OpaqueJSClass(const JSClassDefinition* definition, OpaqueJSClass* initializeThreading(); if (const JSStaticValue* staticValue = definition->staticValues) { - m_staticValues = adoptPtr(new OpaqueJSClassStaticValuesTable); + m_staticValues = std::make_unique(); while (staticValue->name) { String valueName = String::fromUTF8(staticValue->name); if (!valueName.isNull()) @@ -72,7 +72,7 @@ OpaqueJSClass::OpaqueJSClass(const JSClassDefinition* definition, OpaqueJSClass* } if (const JSStaticFunction* staticFunction = definition->staticFunctions) { - m_staticFunctions = adoptPtr(new OpaqueJSClassStaticFunctionsTable); + m_staticFunctions = std::make_unique(); while (staticFunction->name) { String functionName = String::fromUTF8(staticFunction->name); if (!functionName.isNull()) @@ -88,19 +88,19 @@ OpaqueJSClass::OpaqueJSClass(const JSClassDefinition* definition, OpaqueJSClass* OpaqueJSClass::~OpaqueJSClass() { // The empty string is shared across threads & is an identifier, in all other cases we should have done a deep copy in className(), below. - ASSERT(!m_className.length() || !m_className.impl()->isIdentifier()); + ASSERT(!m_className.length() || !m_className.impl()->isAtomic()); #ifndef NDEBUG if (m_staticValues) { OpaqueJSClassStaticValuesTable::const_iterator end = m_staticValues->end(); for (OpaqueJSClassStaticValuesTable::const_iterator it = m_staticValues->begin(); it != end; ++it) - ASSERT(!it->key->isIdentifier()); + ASSERT(!it->key->isAtomic()); } if (m_staticFunctions) { OpaqueJSClassStaticFunctionsTable::const_iterator end = m_staticFunctions->end(); for (OpaqueJSClassStaticFunctionsTable::const_iterator it = m_staticFunctions->begin(); it != end; ++it) - ASSERT(!it->key->isIdentifier()); + ASSERT(!it->key->isAtomic()); } #endif @@ -108,12 +108,12 @@ OpaqueJSClass::~OpaqueJSClass() JSClassRelease(prototypeClass); } -PassRefPtr OpaqueJSClass::createNoAutomaticPrototype(const JSClassDefinition* definition) +Ref OpaqueJSClass::createNoAutomaticPrototype(const JSClassDefinition* definition) { - return adoptRef(new OpaqueJSClass(definition, 0)); + return adoptRef(*new OpaqueJSClass(definition, 0)); } -PassRefPtr OpaqueJSClass::create(const JSClassDefinition* clientDefinition) +Ref OpaqueJSClass::create(const JSClassDefinition* clientDefinition) { JSClassDefinition definition = *clientDefinition; // Avoid modifying client copy. @@ -124,7 +124,7 @@ PassRefPtr OpaqueJSClass::create(const JSClassDefinition* clientD // We are supposed to use JSClassRetain/Release but since we know that we currently have // the only reference to this class object we cheat and use a RefPtr instead. RefPtr protoClass = adoptRef(new OpaqueJSClass(&protoDefinition, 0)); - return adoptRef(new OpaqueJSClass(&definition, protoClass.get())); + return adoptRef(*new OpaqueJSClass(&definition, protoClass.get())); } OpaqueJSClassContextData::OpaqueJSClassContextData(JSC::VM&, OpaqueJSClass* jsClass) @@ -134,7 +134,7 @@ OpaqueJSClassContextData::OpaqueJSClassContextData(JSC::VM&, OpaqueJSClass* jsCl staticValues = std::make_unique(); OpaqueJSClassStaticValuesTable::const_iterator end = jsClass->m_staticValues->end(); for (OpaqueJSClassStaticValuesTable::const_iterator it = jsClass->m_staticValues->begin(); it != end; ++it) { - ASSERT(!it->key->isIdentifier()); + ASSERT(!it->key->isAtomic()); String valueName = it->key->isolatedCopy(); staticValues->add(valueName.impl(), std::make_unique(it->value->getProperty, it->value->setProperty, it->value->attributes, valueName)); } @@ -144,7 +144,7 @@ OpaqueJSClassContextData::OpaqueJSClassContextData(JSC::VM&, OpaqueJSClass* jsCl staticFunctions = std::make_unique(); OpaqueJSClassStaticFunctionsTable::const_iterator end = jsClass->m_staticFunctions->end(); for (OpaqueJSClassStaticFunctionsTable::const_iterator it = jsClass->m_staticFunctions->begin(); it != end; ++it) { - ASSERT(!it->key->isIdentifier()); + ASSERT(!it->key->isAtomic()); staticFunctions->add(it->key->isolatedCopy(), std::make_unique(it->value->callAsFunction, it->value->attributes)); } } @@ -160,7 +160,7 @@ OpaqueJSClassContextData& OpaqueJSClass::contextData(ExecState* exec) String OpaqueJSClass::className() { - // Make a deep copy, so that the caller has no chance to put the original into IdentifierTable. + // Make a deep copy, so that the caller has no chance to put the original into AtomicStringTable. return m_className.isolatedCopy(); } @@ -174,14 +174,6 @@ OpaqueJSClassStaticFunctionsTable* OpaqueJSClass::staticFunctions(JSC::ExecState return contextData(exec).staticFunctions.get(); } -/*! -// Doc here in case we make this public. (Hopefully we won't.) -@function - @abstract Returns the prototype that will be used when constructing an object with a given class. - @param ctx The execution context to use. - @param jsClass A JSClass whose prototype you want to get. - @result The JSObject prototype that was automatically generated for jsClass, or NULL if no prototype was automatically generated. This is the prototype that will be used when constructing an object using jsClass. -*/ JSObject* OpaqueJSClass::prototype(ExecState* exec) { /* Class (C++) and prototype (JS) inheritance are parallel, so: @@ -204,7 +196,7 @@ JSObject* OpaqueJSClass::prototype(ExecState* exec) JSObject* prototype = JSCallbackObject::create(exec, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->callbackObjectStructure(), prototypeClass, &jsClassData); // set jsClassData as the object's private data, so it can clear our reference on destruction if (parentClass) { if (JSObject* parentPrototype = parentClass->prototype(exec)) - prototype->setPrototype(exec->vm(), parentPrototype); + prototype->setPrototypeDirect(exec->vm(), parentPrototype); } jsClassData.cachedPrototype = Weak(prototype); diff --git a/Source/JavaScriptCore/API/JSClassRef.h b/Source/JavaScriptCore/API/JSClassRef.h index f979f3b2c..fa024d344 100644 --- a/Source/JavaScriptCore/API/JSClassRef.h +++ b/Source/JavaScriptCore/API/JSClassRef.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -85,8 +85,8 @@ public: }; struct OpaqueJSClass : public ThreadSafeRefCounted { - static PassRefPtr create(const JSClassDefinition*); - static PassRefPtr createNoAutomaticPrototype(const JSClassDefinition*); + static Ref create(const JSClassDefinition*); + static Ref createNoAutomaticPrototype(const JSClassDefinition*); JS_EXPORT_PRIVATE ~OpaqueJSClass(); String className(); @@ -118,10 +118,10 @@ private: OpaqueJSClassContextData& contextData(JSC::ExecState*); - // Strings in these data members should not be put into any IdentifierTable. + // Strings in these data members should not be put into any AtomicStringTable. String m_className; - OwnPtr m_staticValues; - OwnPtr m_staticFunctions; + std::unique_ptr m_staticValues; + std::unique_ptr m_staticFunctions; }; #endif // JSClassRef_h diff --git a/Source/JavaScriptCore/API/JSContext.h b/Source/JavaScriptCore/API/JSContext.h new file mode 100644 index 000000000..194e352bf --- /dev/null +++ b/Source/JavaScriptCore/API/JSContext.h @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContext_h +#define JSContext_h + +#include +#include + +#if JSC_OBJC_API_ENABLED + +@class JSVirtualMachine, JSValue; + +/*! +@interface +@discussion A JSContext is a JavaScript execution environment. All + JavaScript execution takes place within a context, and all JavaScript values + are tied to a context. +*/ +NS_CLASS_AVAILABLE(10_9, 7_0) +@interface JSContext : NSObject + +/*! +@methodgroup Creating New JSContexts +*/ +/*! +@method +@abstract Create a JSContext. +@result The new context. +*/ +- (instancetype)init; + +/*! +@method +@abstract Create a JSContext in the specified virtual machine. +@param virtualMachine The JSVirtualMachine in which the context will be created. +@result The new context. +*/ +- (instancetype)initWithVirtualMachine:(JSVirtualMachine *)virtualMachine; + +/*! +@methodgroup Evaluating Scripts +*/ +/*! +@method +@abstract Evaluate a string of JavaScript code. +@param script A string containing the JavaScript code to evaluate. +@result The last value generated by the script. +*/ +- (JSValue *)evaluateScript:(NSString *)script; + +/*! +@method +@abstract Evaluate a string of JavaScript code, with a URL for the script's source file. +@param script A string containing the JavaScript code to evaluate. +@param sourceURL A URL for the script's source file. Used by debuggers and when reporting exceptions. This parameter is informative only: it does not change the behavior of the script. +@result The last value generated by the script. +*/ +- (JSValue *)evaluateScript:(NSString *)script withSourceURL:(NSURL *)sourceURL NS_AVAILABLE(10_10, 8_0); + +/*! +@methodgroup Callback Accessors +*/ +/*! +@method +@abstract Get the JSContext that is currently executing. +@discussion This method may be called from within an Objective-C block or method invoked + as a callback from JavaScript to retrieve the callback's context. Outside of + a callback from JavaScript this method will return nil. +@result The currently executing JSContext or nil if there isn't one. +*/ ++ (JSContext *)currentContext; + +/*! +@method +@abstract Get the JavaScript function that is currently executing. +@discussion This method may be called from within an Objective-C block or method invoked + as a callback from JavaScript to retrieve the callback's context. Outside of + a callback from JavaScript this method will return nil. +@result The currently executing JavaScript function or nil if there isn't one. +*/ ++ (JSValue *)currentCallee NS_AVAILABLE(10_10, 8_0); + +/*! +@method +@abstract Get the this value of the currently executing method. +@discussion This method may be called from within an Objective-C block or method invoked + as a callback from JavaScript to retrieve the callback's this value. Outside + of a callback from JavaScript this method will return nil. +@result The current this value or nil if there isn't one. +*/ ++ (JSValue *)currentThis; + +/*! +@method +@abstract Get the arguments to the current callback. +@discussion This method may be called from within an Objective-C block or method invoked + as a callback from JavaScript to retrieve the callback's arguments, objects + in the returned array are instances of JSValue. Outside of a callback from + JavaScript this method will return nil. +@result An NSArray of the arguments nil if there is no current callback. +*/ ++ (NSArray *)currentArguments; + +/*! +@functiongroup Global Properties +*/ + +/*! +@property +@abstract Get the global object of the context. +@discussion This method retrieves the global object of the JavaScript execution context. + Instances of JSContext originating from WebKit will return a reference to the + WindowProxy object. +@result The global object. +*/ +@property (readonly, strong) JSValue *globalObject; + +/*! +@property +@discussion The exception property may be used to throw an exception to JavaScript. + + Before a callback is made from JavaScript to an Objective-C block or method, + the prior value of the exception property will be preserved and the property + will be set to nil. After the callback has completed the new value of the + exception property will be read, and prior value restored. If the new value + of exception is not nil, the callback will result in that value being thrown. + + This property may also be used to check for uncaught exceptions arising from + API function calls (since the default behaviour of exceptionHandler is to + assign an uncaught exception to this property). +*/ +@property (strong) JSValue *exception; + +/*! +@property +@discussion If a call to an API function results in an uncaught JavaScript exception, the + exceptionHandler block will be invoked. The default implementation for the + exception handler will store the exception to the exception property on + context. As a consequence the default behaviour is for uncaught exceptions + occurring within a callback from JavaScript to be rethrown upon return. + Setting this value to nil will cause all exceptions occurring + within a callback from JavaScript to be silently caught. +*/ +@property (copy) void(^exceptionHandler)(JSContext *context, JSValue *exception); + +/*! +@property +@discussion All instances of JSContext are associated with a JSVirtualMachine. +*/ +@property (readonly, strong) JSVirtualMachine *virtualMachine; + +/*! +@property +@discussion Name of the JSContext. Exposed when remote debugging the context. +*/ +@property (copy) NSString *name NS_AVAILABLE(10_10, 8_0); + +@end + +/*! +@category +@discussion Instances of JSContext implement the following methods in order to enable + support for subscript access by key and index, for example: + +@textblock + JSContext *context; + JSValue *v = context[@"X"]; // Get value for "X" from the global object. + context[@"Y"] = v; // Assign 'v' to "Y" on the global object. +@/textblock + + An object key passed as a subscript will be converted to a JavaScript value, + and then the value converted to a string used to resolve a property of the + global object. +*/ +@interface JSContext (SubscriptSupport) + +/*! +@method +@abstract Get a particular property on the global object. +@result The JSValue for the global object's property. +*/ +- (JSValue *)objectForKeyedSubscript:(id)key; + +/*! +@method +@abstract Set a particular property on the global object. +*/ +- (void)setObject:(id)object forKeyedSubscript:(NSObject *)key; + +@end + +/*! +@category +@discussion These functions are for bridging between the C API and the Objective-C API. +*/ +@interface JSContext (JSContextRefSupport) + +/*! +@method +@abstract Create a JSContext, wrapping its C API counterpart. +@result The JSContext equivalent of the provided JSGlobalContextRef. +*/ ++ (JSContext *)contextWithJSGlobalContextRef:(JSGlobalContextRef)jsGlobalContextRef; + +/*! +@property +@abstract Get the C API counterpart wrapped by a JSContext. +@result The C API equivalent of this JSContext. +*/ +@property (readonly) JSGlobalContextRef JSGlobalContextRef; +@end + +#endif + +#endif // JSContext_h diff --git a/Source/JavaScriptCore/API/JSContextInternal.h b/Source/JavaScriptCore/API/JSContextInternal.h new file mode 100644 index 000000000..5308fbb92 --- /dev/null +++ b/Source/JavaScriptCore/API/JSContextInternal.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContextInternal_h +#define JSContextInternal_h + +#import + +#if JSC_OBJC_API_ENABLED + +#import + +struct CallbackData { + CallbackData *next; + JSContext *context; + JSValue *preservedException; + JSValueRef calleeValue; + JSValueRef thisValue; + size_t argumentCount; + const JSValueRef *arguments; + NSArray *currentArguments; +}; + +class WeakContextRef { +public: + WeakContextRef(JSContext * = nil); + ~WeakContextRef(); + + JSContext * get(); + void set(JSContext *); + +private: + JSContext *m_weakContext; +}; + +@class JSWrapperMap; + +@interface JSContext(Internal) + +- (id)initWithGlobalContextRef:(JSGlobalContextRef)context; + +- (void)notifyException:(JSValueRef)exception; +- (JSValue *)valueFromNotifyException:(JSValueRef)exception; +- (BOOL)boolFromNotifyException:(JSValueRef)exception; + +- (void)beginCallbackWithData:(CallbackData *)callbackData calleeValue:(JSValueRef)calleeValue thisValue:(JSValueRef)thisValue argumentCount:(size_t)argumentCount arguments:(const JSValueRef *)arguments; +- (void)endCallbackWithData:(CallbackData *)callbackData; + +- (JSValue *)wrapperForObjCObject:(id)object; +- (JSValue *)wrapperForJSObject:(JSValueRef)value; + +@property (readonly, retain) JSWrapperMap *wrapperMap; + +@end + +#endif + +#endif // JSContextInternal_h diff --git a/Source/JavaScriptCore/API/JSContextPrivate.h b/Source/JavaScriptCore/API/JSContextPrivate.h new file mode 100644 index 000000000..7d1d0cbdb --- /dev/null +++ b/Source/JavaScriptCore/API/JSContextPrivate.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContextPrivate_h +#define JSContextPrivate_h + +#if JSC_OBJC_API_ENABLED + +#import + +@interface JSContext(Private) + +/*! +@property +@discussion Remote inspection setting of the JSContext. Default value is YES. +*/ +@property (setter=_setRemoteInspectionEnabled:) BOOL _remoteInspectionEnabled NS_AVAILABLE(10_10, 8_0); + +/*! +@property +@discussion Set whether or not the native call stack is included when reporting exceptions. Default value is YES. +*/ +@property (setter=_setIncludesNativeCallStackWhenReportingExceptions:) BOOL _includesNativeCallStackWhenReportingExceptions NS_AVAILABLE(10_10, 8_0); + +/*! +@property +@discussion Set the run loop the Web Inspector debugger should use when evaluating JavaScript in the JSContext. +*/ +@property (setter=_setDebuggerRunLoop:) CFRunLoopRef _debuggerRunLoop NS_AVAILABLE(10_10, 8_0); + +@end + +#endif + +#endif // JSContextInternal_h diff --git a/Source/JavaScriptCore/API/JSContextRef.cpp b/Source/JavaScriptCore/API/JSContextRef.cpp index 81b61cf47..e7dad19b0 100644 --- a/Source/JavaScriptCore/API/JSContextRef.cpp +++ b/Source/JavaScriptCore/API/JSContextRef.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006, 2007, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2006, 2007, 2013, 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,37 +10,47 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "JSContextRef.h" -#include "JSContextRefPrivate.h" +#include "JSContextRefInternal.h" #include "APICast.h" #include "CallFrame.h" -#include "CallFrameInlines.h" #include "InitializeThreading.h" #include "JSCallbackObject.h" #include "JSClassRef.h" #include "JSGlobalObject.h" #include "JSObject.h" -#include "Operations.h" +#include "JSCInlines.h" #include "SourceProvider.h" #include "StackVisitor.h" +#include "Watchdog.h" #include #include +#if ENABLE(REMOTE_INSPECTOR) +#include "JSGlobalObjectDebuggable.h" +#include "JSGlobalObjectInspectorController.h" +#include "JSRemoteInspector.h" +#endif + +#if ENABLE(INSPECTOR_ALTERNATE_DISPATCHERS) +#include "JSContextRefInspectorSupport.h" +#endif + #if OS(DARWIN) #include @@ -57,7 +67,7 @@ using namespace JSC; JSContextGroupRef JSContextGroupCreate() { initializeThreading(); - return toRef(VM::createContextGroup().leakRef()); + return toRef(&VM::createContextGroup().leakRef()); } JSContextGroupRef JSContextGroupRetain(JSContextGroupRef group) @@ -68,16 +78,10 @@ JSContextGroupRef JSContextGroupRetain(JSContextGroupRef group) void JSContextGroupRelease(JSContextGroupRef group) { - IdentifierTable* savedIdentifierTable; VM& vm = *toJS(group); - { - JSLockHolder lock(vm); - savedIdentifierTable = wtfThreadData().setCurrentIdentifierTable(vm.identifierTable); - vm.deref(); - } - - wtfThreadData().setCurrentIdentifierTable(savedIdentifierTable); + JSLockHolder locker(&vm); + vm.deref(); } static bool internalScriptTimeoutCallback(ExecState* exec, void* callbackPtr, void* callbackData) @@ -91,21 +95,21 @@ static bool internalScriptTimeoutCallback(ExecState* exec, void* callbackPtr, vo void JSContextGroupSetExecutionTimeLimit(JSContextGroupRef group, double limit, JSShouldTerminateCallback callback, void* callbackData) { VM& vm = *toJS(group); - APIEntryShim entryShim(&vm); - Watchdog& watchdog = vm.watchdog; + JSLockHolder locker(&vm); + Watchdog& watchdog = vm.ensureWatchdog(); if (callback) { void* callbackPtr = reinterpret_cast(callback); - watchdog.setTimeLimit(vm, limit, internalScriptTimeoutCallback, callbackPtr, callbackData); + watchdog.setTimeLimit(std::chrono::duration_cast(std::chrono::duration(limit)), internalScriptTimeoutCallback, callbackPtr, callbackData); } else - watchdog.setTimeLimit(vm, limit); + watchdog.setTimeLimit(std::chrono::duration_cast(std::chrono::duration(limit))); } void JSContextGroupClearExecutionTimeLimit(JSContextGroupRef group) { VM& vm = *toJS(group); - APIEntryShim entryShim(&vm); - Watchdog& watchdog = vm.watchdog; - watchdog.setTimeLimit(vm, std::numeric_limits::infinity()); + JSLockHolder locker(&vm); + if (vm.watchdog()) + vm.watchdog()->setTimeLimit(Watchdog::noTimeLimit); } // From the API's perspective, a global context remains alive iff it has been JSGlobalContextRetained. @@ -129,30 +133,36 @@ JSGlobalContextRef JSGlobalContextCreateInGroup(JSContextGroupRef group, JSClass { initializeThreading(); - RefPtr vm = group ? PassRefPtr(toJS(group)) : VM::createContextGroup(); + Ref vm = group ? Ref(*toJS(group)) : VM::createContextGroup(); - APIEntryShim entryShim(vm.get(), false); - vm->makeUsableFromMultipleThreads(); + JSLockHolder locker(vm.ptr()); if (!globalObjectClass) { - JSGlobalObject* globalObject = JSGlobalObject::create(*vm, JSGlobalObject::createStructure(*vm, jsNull())); - globalObject->setGlobalThis(*vm, JSProxy::create(*vm, JSProxy::createStructure(*vm, globalObject, globalObject->prototype()), globalObject)); + JSGlobalObject* globalObject = JSGlobalObject::create(vm.get(), JSGlobalObject::createStructure(vm.get(), jsNull())); +#if ENABLE(REMOTE_INSPECTOR) + if (JSRemoteInspectorGetInspectionEnabledByDefault()) + globalObject->setRemoteDebuggingEnabled(true); +#endif return JSGlobalContextRetain(toGlobalRef(globalObject->globalExec())); } - JSGlobalObject* globalObject = JSCallbackObject::create(*vm, globalObjectClass, JSCallbackObject::createStructure(*vm, 0, jsNull())); + JSGlobalObject* globalObject = JSCallbackObject::create(vm.get(), globalObjectClass, JSCallbackObject::createStructure(vm.get(), 0, jsNull())); ExecState* exec = globalObject->globalExec(); JSValue prototype = globalObjectClass->prototype(exec); if (!prototype) prototype = jsNull(); - globalObject->resetPrototype(*vm, prototype); + globalObject->resetPrototype(vm.get(), prototype); +#if ENABLE(REMOTE_INSPECTOR) + if (JSRemoteInspectorGetInspectionEnabledByDefault()) + globalObject->setRemoteDebuggingEnabled(true); +#endif return JSGlobalContextRetain(toGlobalRef(exec)); } JSGlobalContextRef JSGlobalContextRetain(JSGlobalContextRef ctx) { ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); VM& vm = exec->vm(); gcProtect(exec->vmEntryGlobalObject()); @@ -162,21 +172,14 @@ JSGlobalContextRef JSGlobalContextRetain(JSGlobalContextRef ctx) void JSGlobalContextRelease(JSGlobalContextRef ctx) { - IdentifierTable* savedIdentifierTable; ExecState* exec = toJS(ctx); - { - JSLockHolder lock(exec); - - VM& vm = exec->vm(); - savedIdentifierTable = wtfThreadData().setCurrentIdentifierTable(vm.identifierTable); + JSLockHolder locker(exec); - bool protectCountIsZero = Heap::heap(exec->vmEntryGlobalObject())->unprotect(exec->vmEntryGlobalObject()); - if (protectCountIsZero) - vm.heap.reportAbandonedObjectGraph(); - vm.deref(); - } - - wtfThreadData().setCurrentIdentifierTable(savedIdentifierTable); + VM& vm = exec->vm(); + bool protectCountIsZero = Heap::heap(exec->vmEntryGlobalObject())->unprotect(exec->vmEntryGlobalObject()); + if (protectCountIsZero) + vm.heap.reportAbandonedObjectGraph(); + vm.deref(); } JSObjectRef JSContextGetGlobalObject(JSContextRef ctx) @@ -186,7 +189,7 @@ JSObjectRef JSContextGetGlobalObject(JSContextRef ctx) return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); return toRef(jsCast(exec->lexicalGlobalObject()->methodTable()->toThis(exec->lexicalGlobalObject(), exec, NotStrictMode))); } @@ -208,7 +211,7 @@ JSGlobalContextRef JSContextGetGlobalContext(JSContextRef ctx) return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); return toGlobalRef(exec->lexicalGlobalObject()->globalExec()); } @@ -221,7 +224,7 @@ JSStringRef JSGlobalContextCopyName(JSGlobalContextRef ctx) } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); String name = exec->vmEntryGlobalObject()->name(); if (name.isNull()) @@ -238,7 +241,7 @@ void JSGlobalContextSetName(JSGlobalContextRef ctx, JSStringRef name) } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); exec->vmEntryGlobalObject()->setName(name ? name->string() : String()); } @@ -252,12 +255,12 @@ public: { } - StackVisitor::Status operator()(StackVisitor& visitor) + StackVisitor::Status operator()(StackVisitor& visitor) const { if (m_remainingCapacityForFrameCapture) { // If callee is unknown, but we've not added any frame yet, we should // still add the frame, because something called us, and gave us arguments. - JSObject* callee = visitor->callee(); + JSCell* callee = visitor->callee(); if (!callee && visitor->index()) return StackVisitor::Done; @@ -270,7 +273,7 @@ public: builder.append(visitor->functionName()); builder.appendLiteral("() at "); builder.append(visitor->sourceURL()); - if (visitor->isJSFrame()) { + if (visitor->hasLineAndColumnInfo()) { builder.append(':'); unsigned lineNumber; unsigned unusedColumn; @@ -289,7 +292,7 @@ public: private: StringBuilder& m_builder; - unsigned m_remainingCapacityForFrameCapture; + mutable unsigned m_remainingCapacityForFrameCapture; }; JSStringRef JSContextCreateBacktrace(JSContextRef ctx, unsigned maxStackSize) @@ -310,4 +313,119 @@ JSStringRef JSContextCreateBacktrace(JSContextRef ctx, unsigned maxStackSize) return OpaqueJSString::create(builder.toString()).leakRef(); } +bool JSGlobalContextGetRemoteInspectionEnabled(JSGlobalContextRef ctx) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + return exec->vmEntryGlobalObject()->remoteDebuggingEnabled(); +} + +void JSGlobalContextSetRemoteInspectionEnabled(JSGlobalContextRef ctx, bool enabled) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + exec->vmEntryGlobalObject()->setRemoteDebuggingEnabled(enabled); +} + +bool JSGlobalContextGetIncludesNativeCallStackWhenReportingExceptions(JSGlobalContextRef ctx) +{ +#if ENABLE(REMOTE_INSPECTOR) + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + JSGlobalObject* globalObject = exec->vmEntryGlobalObject(); + return globalObject->inspectorController().includesNativeCallStackWhenReportingExceptions(); +#else + UNUSED_PARAM(ctx); + return false; +#endif +} + +void JSGlobalContextSetIncludesNativeCallStackWhenReportingExceptions(JSGlobalContextRef ctx, bool includesNativeCallStack) +{ +#if ENABLE(REMOTE_INSPECTOR) + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + JSGlobalObject* globalObject = exec->vmEntryGlobalObject(); + globalObject->inspectorController().setIncludesNativeCallStackWhenReportingExceptions(includesNativeCallStack); +#else + UNUSED_PARAM(ctx); + UNUSED_PARAM(includesNativeCallStack); +#endif +} + +#if USE(CF) +CFRunLoopRef JSGlobalContextGetDebuggerRunLoop(JSGlobalContextRef ctx) +{ +#if ENABLE(REMOTE_INSPECTOR) + if (!ctx) { + ASSERT_NOT_REACHED(); + return nullptr; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + return exec->vmEntryGlobalObject()->inspectorDebuggable().targetRunLoop(); +#else + UNUSED_PARAM(ctx); + return nullptr; +#endif +} +void JSGlobalContextSetDebuggerRunLoop(JSGlobalContextRef ctx, CFRunLoopRef runLoop) +{ +#if ENABLE(REMOTE_INSPECTOR) + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + exec->vmEntryGlobalObject()->inspectorDebuggable().setTargetRunLoop(runLoop); +#else + UNUSED_PARAM(ctx); + UNUSED_PARAM(runLoop); +#endif +} +#endif // USE(CF) + +#if ENABLE(INSPECTOR_ALTERNATE_DISPATCHERS) +Inspector::AugmentableInspectorController* JSGlobalContextGetAugmentableInspectorController(JSGlobalContextRef ctx) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return nullptr; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + return &exec->vmEntryGlobalObject()->inspectorController(); +} +#endif diff --git a/Source/JavaScriptCore/API/JSContextRef.h b/Source/JavaScriptCore/API/JSContextRef.h index c8db1e56d..0c800bced 100644 --- a/Source/JavaScriptCore/API/JSContextRef.h +++ b/Source/JavaScriptCore/API/JSContextRef.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -48,7 +48,7 @@ extern "C" { synchronization is required. @result The created JSContextGroup. */ -JS_EXPORT JSContextGroupRef JSContextGroupCreate() CF_AVAILABLE(10_6, 7_0); +JS_EXPORT JSContextGroupRef JSContextGroupCreate(void) CF_AVAILABLE(10_6, 7_0); /*! @function @@ -141,7 +141,7 @@ JS_EXPORT JSGlobalContextRef JSContextGetGlobalContext(JSContextRef ctx) CF_AVAI @discussion A JSGlobalContext's name is exposed for remote debugging to make it easier to identify the context you would like to attach to. */ -JS_EXPORT JSStringRef JSGlobalContextCopyName(JSGlobalContextRef ctx); +JS_EXPORT JSStringRef JSGlobalContextCopyName(JSGlobalContextRef ctx) CF_AVAILABLE(10_10, 8_0); /*! @function @@ -149,7 +149,7 @@ JS_EXPORT JSStringRef JSGlobalContextCopyName(JSGlobalContextRef ctx); @param ctx The JSGlobalContext that you want to name. @param name The remote debugging name to set on ctx. */ -JS_EXPORT void JSGlobalContextSetName(JSGlobalContextRef ctx, JSStringRef name); +JS_EXPORT void JSGlobalContextSetName(JSGlobalContextRef ctx, JSStringRef name) CF_AVAILABLE(10_10, 8_0); #ifdef __cplusplus } diff --git a/Source/JavaScriptCore/API/JSContextRefInspectorSupport.h b/Source/JavaScriptCore/API/JSContextRefInspectorSupport.h new file mode 100644 index 000000000..a09d828bd --- /dev/null +++ b/Source/JavaScriptCore/API/JSContextRefInspectorSupport.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContextRefInspectorSupport_h +#define JSContextRefInspectorSupport_h + +#ifndef __cplusplus +#error Requires C++ Support. +#endif + +#include + +namespace Inspector { +class AugmentableInspectorController; +} + +extern "C" { +JS_EXPORT Inspector::AugmentableInspectorController* JSGlobalContextGetAugmentableInspectorController(JSGlobalContextRef); +} + +#endif // JSContextRefInspectorSupport_h diff --git a/Source/JavaScriptCore/API/JSContextRefInternal.h b/Source/JavaScriptCore/API/JSContextRefInternal.h new file mode 100644 index 000000000..79d7eb6f3 --- /dev/null +++ b/Source/JavaScriptCore/API/JSContextRefInternal.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContextRefInternal_h +#define JSContextRefInternal_h + +#include "JSContextRefPrivate.h" + +#if USE(CF) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if USE(CF) +/*! +@function +@abstract Gets the run loop used by the Web Inspector debugger when evaluating JavaScript in this context. +@param ctx The JSGlobalContext whose setting you want to get. +*/ +JS_EXPORT CFRunLoopRef JSGlobalContextGetDebuggerRunLoop(JSGlobalContextRef ctx) CF_AVAILABLE(10_10, 8_0); + +/*! +@function +@abstract Sets the run loop used by the Web Inspector debugger when evaluating JavaScript in this context. +@param ctx The JSGlobalContext that you want to change. +@param runLoop The new value of the setting for the context. +*/ +JS_EXPORT void JSGlobalContextSetDebuggerRunLoop(JSGlobalContextRef ctx, CFRunLoopRef runLoop) CF_AVAILABLE(10_10, 8_0); +#endif + +#ifdef __cplusplus +} +#endif + +#endif // JSContextRefInternal_h diff --git a/Source/JavaScriptCore/API/JSContextRefPrivate.h b/Source/JavaScriptCore/API/JSContextRefPrivate.h index 780a60306..19604ea74 100644 --- a/Source/JavaScriptCore/API/JSContextRefPrivate.h +++ b/Source/JavaScriptCore/API/JSContextRefPrivate.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2009 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,17 +10,17 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef JSContextRefPrivate_h @@ -85,14 +85,48 @@ typedef bool need to call JSContextGroupSetExecutionTimeLimit before you start executing any scripts. */ -JS_EXPORT void JSContextGroupSetExecutionTimeLimit(JSContextGroupRef, double limit, JSShouldTerminateCallback, void* context) CF_AVAILABLE(10_6, 7_0); +JS_EXPORT void JSContextGroupSetExecutionTimeLimit(JSContextGroupRef group, double limit, JSShouldTerminateCallback callback, void* context) CF_AVAILABLE(10_6, 7_0); /*! @function @abstract Clears the script execution time limit. @param group The JavaScript context group that the time limit is cleared on. */ -JS_EXPORT void JSContextGroupClearExecutionTimeLimit(JSContextGroupRef) CF_AVAILABLE(10_6, 7_0); +JS_EXPORT void JSContextGroupClearExecutionTimeLimit(JSContextGroupRef group) CF_AVAILABLE(10_6, 7_0); + +/*! +@function +@abstract Gets a whether or not remote inspection is enabled on the context. +@param ctx The JSGlobalContext whose setting you want to get. +@result The value of the setting, true if remote inspection is enabled, otherwise false. +@discussion Remote inspection is true by default. +*/ +JS_EXPORT bool JSGlobalContextGetRemoteInspectionEnabled(JSGlobalContextRef ctx) CF_AVAILABLE(10_10, 8_0); + +/*! +@function +@abstract Sets the remote inspection setting for a context. +@param ctx The JSGlobalContext that you want to change. +@param enabled The new remote inspection enabled setting for the context. +*/ +JS_EXPORT void JSGlobalContextSetRemoteInspectionEnabled(JSGlobalContextRef ctx, bool enabled) CF_AVAILABLE(10_10, 8_0); + +/*! +@function +@abstract Gets the include native call stack when reporting exceptions setting for a context. +@param ctx The JSGlobalContext whose setting you want to get. +@result The value of the setting, true if remote inspection is enabled, otherwise false. +@discussion This setting is true by default. +*/ +JS_EXPORT bool JSGlobalContextGetIncludesNativeCallStackWhenReportingExceptions(JSGlobalContextRef ctx) CF_AVAILABLE(10_10, 8_0); + +/*! +@function +@abstract Sets the include native call stack when reporting exceptions setting for a context. +@param ctx The JSGlobalContext that you want to change. +@param includesNativeCallStack The new value of the setting for the context. +*/ +JS_EXPORT void JSGlobalContextSetIncludesNativeCallStackWhenReportingExceptions(JSGlobalContextRef ctx, bool includesNativeCallStack) CF_AVAILABLE(10_10, 8_0); #ifdef __cplusplus } diff --git a/Source/JavaScriptCore/API/JSExport.h b/Source/JavaScriptCore/API/JSExport.h new file mode 100644 index 000000000..b8a484909 --- /dev/null +++ b/Source/JavaScriptCore/API/JSExport.h @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import + +#if JSC_OBJC_API_ENABLED + +/*! +@protocol +@abstract JSExport provides a declarative way to export Objective-C objects and + classes -- including properties, instance methods, class methods, and + initializers -- to JavaScript. + +@discussion When an Objective-C object is exported to JavaScript, a JavaScript + wrapper object is created. + + In JavaScript, inheritance works via a chain of prototype objects. + For each Objective-C class in each JSContext, an object appropriate for use + as a prototype will be provided. For the class NSObject the prototype + will be the Object prototype. For all other Objective-C + classes a prototype will be created. The prototype for a given + Objective-C class will have its internal [Prototype] property set to point to + the prototype created for the Objective-C class's superclass. As such the + prototype chain for a JavaScript wrapper object will reflect the wrapped + Objective-C type's inheritance hierarchy. + + JavaScriptCore also produces a constructor for each Objective-C class. The + constructor has a property named 'prototype' that references the prototype, + and the prototype has a property named 'constructor' that references the + constructor. + + By default JavaScriptCore does not export any methods or properties from an + Objective-C class to JavaScript; however methods and properties may be exported + explicitly using JSExport. For each protocol that a class conforms to, if the + protocol incorporates the protocol JSExport, JavaScriptCore exports the methods + and properties in that protocol to JavaScript + + For each exported instance method JavaScriptCore will assign a corresponding + JavaScript function to the prototype. For each exported Objective-C property + JavaScriptCore will assign a corresponding JavaScript accessor to the prototype. + For each exported class method JavaScriptCore will assign a corresponding + JavaScript function to the constructor. For example: + +
+@textblock
+    @protocol MyClassJavaScriptMethods 
+    - (void)foo;
+    @end
+
+    @interface MyClass : NSObject 
+    - (void)foo;
+    - (void)bar;
+    @end
+@/textblock
+
+ + Data properties that are created on the prototype or constructor objects have + the attributes: writable:true, enumerable:false, configurable:true. + Accessor properties have the attributes: enumerable:false and configurable:true. + + If an instance of MyClass is converted to a JavaScript value, the resulting + wrapper object will (via its prototype) export the method foo to JavaScript, + since the class conforms to the MyClassJavaScriptMethods protocol, and this + protocol incorporates JSExport. bar will not be exported. + + JSExport supports properties, arguments, and return values of the following types: + + Primitive numbers: signed values up to 32-bits convert using JSValue's + valueWithInt32/toInt32. Unsigned values up to 32-bits convert using JSValue's + valueWithUInt32/toUInt32. All other numeric values convert using JSValue's + valueWithDouble/toDouble. + + BOOL: values convert using JSValue's valueWithBool/toBool. + + id: values convert using JSValue's valueWithObject/toObject. + + Objective-C instance pointers: Pointers convert using JSValue's + valueWithObjectOfClass/toObject. + + C structs: C structs for CGPoint, NSRange, CGRect, and CGSize convert using + JSValue's appropriate methods. Other C structs are not supported. + + Blocks: Blocks convert using JSValue's valueWithObject/toObject. + + All objects that conform to JSExport convert to JavaScript wrapper objects, + even if they subclass classes that would otherwise behave differently. For + example, if a subclass of NSString conforms to JSExport, it converts to + JavaScript as a wrapper object rather than a JavaScript string. +*/ +@protocol JSExport +@end + +/*! +@define +@abstract Rename a selector when it's exported to JavaScript. +@discussion When a selector that takes one or more arguments is converted to a JavaScript + property name, by default a property name will be generated by performing the + following conversion: + + - All colons are removed from the selector + + - Any lowercase letter that had followed a colon will be capitalized. + + Under the default conversion a selector doFoo:withBar: will be exported as + doFooWithBar. The default conversion may be overriden using the JSExportAs + macro, for example to export a method doFoo:withBar: as doFoo: + +
+@textblock
+    @protocol MyClassJavaScriptMethods 
+    JSExportAs(doFoo,
+    - (void)doFoo:(id)foo withBar:(id)bar
+    );
+    @end
+@/textblock
+
+ + Note that the JSExport macro may only be applied to a selector that takes one + or more argument. +*/ +#define JSExportAs(PropertyName, Selector) \ + @optional Selector __JS_EXPORT_AS__##PropertyName:(id)argument; @required Selector + +#endif diff --git a/Source/JavaScriptCore/API/JSManagedValue.h b/Source/JavaScriptCore/API/JSManagedValue.h new file mode 100644 index 000000000..01073fad9 --- /dev/null +++ b/Source/JavaScriptCore/API/JSManagedValue.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSManagedValue_h +#define JSManagedValue_h + +#import +#import + +#if JSC_OBJC_API_ENABLED + +@class JSValue; +@class JSContext; + +/*! +@interface +@discussion JSManagedValue represents a "conditionally retained" JSValue. + "Conditionally retained" means that as long as the JSManagedValue's + JSValue is reachable through the JavaScript object graph, + or through the Objective-C object graph reported to the JSVirtualMachine using + addManagedReference:withOwner:, the corresponding JSValue will + be retained. However, if neither graph reaches the JSManagedValue, the + corresponding JSValue will be released and set to nil. + +The primary use for a JSManagedValue is to store a JSValue in an Objective-C +or Swift object that is exported to JavaScript. It is incorrect to store a JSValue +in an object that is exported to JavaScript, since doing so creates a retain cycle. +*/ +NS_CLASS_AVAILABLE(10_9, 7_0) +@interface JSManagedValue : NSObject + +/*! +@method +@abstract Create a JSManagedValue from a JSValue. +@result The new JSManagedValue. +*/ ++ (JSManagedValue *)managedValueWithValue:(JSValue *)value; ++ (JSManagedValue *)managedValueWithValue:(JSValue *)value andOwner:(id)owner NS_AVAILABLE(10_10, 8_0); + +/*! +@method +@abstract Create a JSManagedValue. +@result The new JSManagedValue. +*/ +- (instancetype)initWithValue:(JSValue *)value; + +/*! +@property +@abstract Get the JSValue from the JSManagedValue. +@result The corresponding JSValue for this JSManagedValue or + nil if the JSValue has been collected. +*/ +@property (readonly, strong) JSValue *value; + +@end + +#endif // JSC_OBJC_API_ENABLED + +#endif // JSManagedValue_h diff --git a/Source/JavaScriptCore/API/JSManagedValueInternal.h b/Source/JavaScriptCore/API/JSManagedValueInternal.h new file mode 100644 index 000000000..2443fe5a9 --- /dev/null +++ b/Source/JavaScriptCore/API/JSManagedValueInternal.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSManagedValueInternal_h +#define JSManagedValueInternal_h + +#import + +#if JSC_OBJC_API_ENABLED + +@interface JSManagedValue(Internal) + +- (void)didAddOwner:(id)owner; +- (void)didRemoveOwner:(id)owner; + +@end + +#endif // JSC_OBJC_API_ENABLED + +#endif // JSManagedValueInternal_h diff --git a/Source/JavaScriptCore/API/JSObjectRef.cpp b/Source/JavaScriptCore/API/JSObjectRef.cpp index 56fe90b47..98823676e 100644 --- a/Source/JavaScriptCore/API/JSObjectRef.cpp +++ b/Source/JavaScriptCore/API/JSObjectRef.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2006, 2007, 2008, 2016 Apple Inc. All rights reserved. * Copyright (C) 2008 Kelvin W Sherlock (ksherlock@gmail.com) * * Redistribution and use in source and binary forms, with or without @@ -11,10 +11,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -29,16 +29,16 @@ #include "JSObjectRefPrivate.h" #include "APICast.h" -#include "ButterflyInlines.h" -#include "CodeBlock.h" -#include "CopiedSpaceInlines.h" +#include "APIUtils.h" #include "DateConstructor.h" #include "ErrorConstructor.h" +#include "Exception.h" #include "FunctionConstructor.h" #include "Identifier.h" #include "InitializeThreading.h" #include "JSAPIWrapperObject.h" #include "JSArray.h" +#include "JSCInlines.h" #include "JSCallbackConstructor.h" #include "JSCallbackFunction.h" #include "JSCallbackObject.h" @@ -51,20 +51,23 @@ #include "JSValueRef.h" #include "ObjectConstructor.h" #include "ObjectPrototype.h" -#include "Operations.h" #include "PropertyNameArray.h" #include "RegExpConstructor.h" +#if ENABLE(REMOTE_INSPECTOR) +#include "JSGlobalObjectInspectorController.h" +#endif + using namespace JSC; JSClassRef JSClassCreate(const JSClassDefinition* definition) { initializeThreading(); - RefPtr jsClass = (definition->attributes & kJSClassAttributeNoAutomaticPrototype) + auto jsClass = (definition->attributes & kJSClassAttributeNoAutomaticPrototype) ? OpaqueJSClass::createNoAutomaticPrototype(definition) : OpaqueJSClass::create(definition); - return jsClass.release().leakRef(); + return &jsClass.leakRef(); } JSClassRef JSClassRetain(JSClassRef jsClass) @@ -85,14 +88,14 @@ JSObjectRef JSObjectMake(JSContextRef ctx, JSClassRef jsClass, void* data) return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); if (!jsClass) return toRef(constructEmptyObject(exec)); JSCallbackObject* object = JSCallbackObject::create(exec, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->callbackObjectStructure(), jsClass, data); if (JSObject* prototype = jsClass->prototype(exec)) - object->setPrototype(exec->vm(), prototype); + object->setPrototypeDirect(exec->vm(), prototype); return toRef(object); } @@ -104,7 +107,7 @@ JSObjectRef JSObjectMakeFunctionWithCallback(JSContextRef ctx, JSStringRef name, return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); return toRef(JSCallbackFunction::create(exec->vm(), exec->lexicalGlobalObject(), callAsFunction, name ? name->string() : ASCIILiteral("anonymous"))); } @@ -115,7 +118,7 @@ JSObjectRef JSObjectMakeConstructor(JSContextRef ctx, JSClassRef jsClass, JSObje return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsPrototype = jsClass ? jsClass->prototype(exec) : 0; if (!jsPrototype) @@ -133,23 +136,20 @@ JSObjectRef JSObjectMakeFunction(JSContextRef ctx, JSStringRef name, unsigned pa return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); startingLineNumber = std::max(1, startingLineNumber); - Identifier nameID = name ? name->identifier(&exec->vm()) : Identifier(exec, "anonymous"); + Identifier nameID = name ? name->identifier(&exec->vm()) : Identifier::fromString(exec, "anonymous"); MarkedArgumentBuffer args; for (unsigned i = 0; i < parameterCount; i++) args.append(jsString(exec, parameterNames[i]->string())); args.append(jsString(exec, body->string())); - JSObject* result = constructFunction(exec, exec->lexicalGlobalObject(), args, nameID, sourceURL->string(), TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber::first())); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); + auto sourceURLString = sourceURL ? sourceURL->string() : String(); + JSObject* result = constructFunction(exec, exec->lexicalGlobalObject(), args, nameID, SourceOrigin { sourceURLString }, sourceURLString, TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber())); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) result = 0; - } return toRef(result); } @@ -160,7 +160,7 @@ JSObjectRef JSObjectMakeArray(JSContextRef ctx, size_t argumentCount, const JSVa return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSObject* result; if (argumentCount) { @@ -172,12 +172,8 @@ JSObjectRef JSObjectMakeArray(JSContextRef ctx, size_t argumentCount, const JSVa } else result = constructEmptyArray(exec, 0); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) result = 0; - } return toRef(result); } @@ -189,19 +185,15 @@ JSObjectRef JSObjectMakeDate(JSContextRef ctx, size_t argumentCount, const JSVal return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); MarkedArgumentBuffer argList; for (size_t i = 0; i < argumentCount; ++i) argList.append(toJS(exec, arguments[i])); - JSObject* result = constructDate(exec, exec->lexicalGlobalObject(), argList); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); + JSObject* result = constructDate(exec, exec->lexicalGlobalObject(), JSValue(), argList); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) result = 0; - } return toRef(result); } @@ -213,18 +205,14 @@ JSObjectRef JSObjectMakeError(JSContextRef ctx, size_t argumentCount, const JSVa return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue message = argumentCount ? toJS(exec, arguments[0]) : jsUndefined(); Structure* errorStructure = exec->lexicalGlobalObject()->errorStructure(); JSObject* result = ErrorInstance::create(exec, errorStructure, message); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) result = 0; - } return toRef(result); } @@ -236,19 +224,15 @@ JSObjectRef JSObjectMakeRegExp(JSContextRef ctx, size_t argumentCount, const JSV return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); MarkedArgumentBuffer argList; for (size_t i = 0; i < argumentCount; ++i) argList.append(toJS(exec, arguments[i])); - JSObject* result = constructRegExp(exec, exec->lexicalGlobalObject(), argList); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); + JSObject* result = constructRegExp(exec, exec->lexicalGlobalObject(), argList); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) result = 0; - } return toRef(result); } @@ -260,10 +244,10 @@ JSValueRef JSObjectGetPrototype(JSContextRef ctx, JSObjectRef object) return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); - JSObject* jsObject = toJS(object); - return toRef(exec, jsObject->prototype()); + JSObject* jsObject = toJS(object); + return toRef(exec, jsObject->getPrototypeDirect()); } void JSObjectSetPrototype(JSContextRef ctx, JSObjectRef object, JSValueRef value) @@ -273,12 +257,21 @@ void JSObjectSetPrototype(JSContextRef ctx, JSObjectRef object, JSValueRef value return; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + VM& vm = exec->vm(); + JSLockHolder locker(exec); JSObject* jsObject = toJS(object); JSValue jsValue = toJS(exec, value); - jsObject->setPrototypeWithCycleCheck(exec, jsValue.isObject() ? jsValue : jsNull()); + if (JSProxy* proxy = jsDynamicCast(vm, jsObject)) { + if (JSGlobalObject* globalObject = jsDynamicCast(vm, proxy->target())) { + globalObject->resetPrototype(exec->vm(), jsValue.isObject() ? jsValue : jsNull()); + return; + } + // Someday we might use proxies for something other than JSGlobalObjects, but today is not that day. + RELEASE_ASSERT_NOT_REACHED(); + } + jsObject->setPrototype(exec->vm(), exec, jsValue.isObject() ? jsValue : jsNull()); } bool JSObjectHasProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName) @@ -288,7 +281,7 @@ bool JSObjectHasProperty(JSContextRef ctx, JSObjectRef object, JSStringRef prope return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSObject* jsObject = toJS(object); @@ -302,16 +295,12 @@ JSValueRef JSObjectGetProperty(JSContextRef ctx, JSObjectRef object, JSStringRef return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSObject* jsObject = toJS(object); JSValue jsValue = jsObject->get(exec, propertyName->identifier(&exec->vm())); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); - } + handleExceptionIfNeeded(exec, exception); return toRef(exec, jsValue); } @@ -322,25 +311,25 @@ void JSObjectSetProperty(JSContextRef ctx, JSObjectRef object, JSStringRef prope return; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + VM& vm = exec->vm(); + JSLockHolder locker(vm); + auto scope = DECLARE_CATCH_SCOPE(vm); JSObject* jsObject = toJS(object); Identifier name(propertyName->identifier(&exec->vm())); JSValue jsValue = toJS(exec, value); - if (attributes && !jsObject->hasProperty(exec, name)) { - PropertyDescriptor desc(jsValue, attributes); - jsObject->methodTable()->defineOwnProperty(jsObject, exec, name, desc, false); - } else { - PutPropertySlot slot(jsObject); - jsObject->methodTable()->put(jsObject, exec, name, jsValue, slot); - } - - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); + bool doesNotHaveProperty = attributes && !jsObject->hasProperty(exec, name); + if (LIKELY(!scope.exception())) { + if (doesNotHaveProperty) { + PropertyDescriptor desc(jsValue, attributes); + jsObject->methodTable()->defineOwnProperty(jsObject, exec, name, desc, false); + } else { + PutPropertySlot slot(jsObject); + jsObject->methodTable()->put(jsObject, exec, name, jsValue, slot); + } } + handleExceptionIfNeeded(exec, exception); } JSValueRef JSObjectGetPropertyAtIndex(JSContextRef ctx, JSObjectRef object, unsigned propertyIndex, JSValueRef* exception) @@ -350,16 +339,12 @@ JSValueRef JSObjectGetPropertyAtIndex(JSContextRef ctx, JSObjectRef object, unsi return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSObject* jsObject = toJS(object); JSValue jsValue = jsObject->get(exec, propertyIndex); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); - } + handleExceptionIfNeeded(exec, exception); return toRef(exec, jsValue); } @@ -371,17 +356,13 @@ void JSObjectSetPropertyAtIndex(JSContextRef ctx, JSObjectRef object, unsigned p return; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSObject* jsObject = toJS(object); JSValue jsValue = toJS(exec, value); jsObject->methodTable()->putByIndex(jsObject, exec, propertyIndex, jsValue, false); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); - } + handleExceptionIfNeeded(exec, exception); } bool JSObjectDeleteProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception) @@ -391,30 +372,48 @@ bool JSObjectDeleteProperty(JSContextRef ctx, JSObjectRef object, JSStringRef pr return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSObject* jsObject = toJS(object); bool result = jsObject->methodTable()->deleteProperty(jsObject, exec, propertyName->identifier(&exec->vm())); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); - } + handleExceptionIfNeeded(exec, exception); return result; } +// API objects have private properties, which may get accessed during destruction. This +// helper lets us get the ClassInfo of an API object from a function that may get called +// during destruction. +static const ClassInfo* classInfoPrivate(JSObject* jsObject) +{ + VM& vm = *jsObject->vm(); + + if (vm.currentlyDestructingCallbackObject != jsObject) + return jsObject->classInfo(vm); + + return vm.currentlyDestructingCallbackObjectClassInfo; +} + void* JSObjectGetPrivate(JSObjectRef object) { JSObject* jsObject = uncheckedToJS(object); + VM& vm = *jsObject->vm(); + + const ClassInfo* classInfo = classInfoPrivate(jsObject); - if (jsObject->inherits(JSCallbackObject::info())) - return jsCast*>(jsObject)->getPrivate(); - if (jsObject->inherits(JSCallbackObject::info())) - return jsCast*>(jsObject)->getPrivate(); + // Get wrapped object if proxied + if (classInfo->isSubClassOf(JSProxy::info())) { + jsObject = static_cast(jsObject)->target(); + classInfo = jsObject->classInfo(vm); + } + + if (classInfo->isSubClassOf(JSCallbackObject::info())) + return static_cast*>(jsObject)->getPrivate(); + if (classInfo->isSubClassOf(JSCallbackObject::info())) + return static_cast*>(jsObject)->getPrivate(); #if JSC_OBJC_API_ENABLED - if (jsObject->inherits(JSCallbackObject::info())) - return jsCast*>(jsObject)->getPrivate(); + if (classInfo->isSubClassOf(JSCallbackObject::info())) + return static_cast*>(jsObject)->getPrivate(); #endif return 0; @@ -423,18 +422,27 @@ void* JSObjectGetPrivate(JSObjectRef object) bool JSObjectSetPrivate(JSObjectRef object, void* data) { JSObject* jsObject = uncheckedToJS(object); + VM& vm = *jsObject->vm(); + + const ClassInfo* classInfo = classInfoPrivate(jsObject); - if (jsObject->inherits(JSCallbackObject::info())) { - jsCast*>(jsObject)->setPrivate(data); + // Get wrapped object if proxied + if (classInfo->isSubClassOf(JSProxy::info())) { + jsObject = static_cast(jsObject)->target(); + classInfo = jsObject->classInfo(vm); + } + + if (classInfo->isSubClassOf(JSCallbackObject::info())) { + static_cast*>(jsObject)->setPrivate(data); return true; } - if (jsObject->inherits(JSCallbackObject::info())) { - jsCast*>(jsObject)->setPrivate(data); + if (classInfo->isSubClassOf(JSCallbackObject::info())) { + static_cast*>(jsObject)->setPrivate(data); return true; } #if JSC_OBJC_API_ENABLED - if (jsObject->inherits(JSCallbackObject::info())) { - jsCast*>(jsObject)->setPrivate(data); + if (classInfo->isSubClassOf(JSCallbackObject::info())) { + static_cast*>(jsObject)->setPrivate(data); return true; } #endif @@ -445,16 +453,23 @@ bool JSObjectSetPrivate(JSObjectRef object, void* data) JSValueRef JSObjectGetPrivateProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName) { ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + VM& vm = exec->vm(); + JSLockHolder locker(exec); JSObject* jsObject = toJS(object); JSValue result; Identifier name(propertyName->identifier(&exec->vm())); - if (jsObject->inherits(JSCallbackObject::info())) + + + // Get wrapped object if proxied + if (jsObject->inherits(vm, JSProxy::info())) + jsObject = jsCast(jsObject)->target(); + + if (jsObject->inherits(vm, JSCallbackObject::info())) result = jsCast*>(jsObject)->getPrivateProperty(name); - else if (jsObject->inherits(JSCallbackObject::info())) + else if (jsObject->inherits(vm, JSCallbackObject::info())) result = jsCast*>(jsObject)->getPrivateProperty(name); #if JSC_OBJC_API_ENABLED - else if (jsObject->inherits(JSCallbackObject::info())) + else if (jsObject->inherits(vm, JSCallbackObject::info())) result = jsCast*>(jsObject)->getPrivateProperty(name); #endif return toRef(exec, result); @@ -463,20 +478,26 @@ JSValueRef JSObjectGetPrivateProperty(JSContextRef ctx, JSObjectRef object, JSSt bool JSObjectSetPrivateProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value) { ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + VM& vm = exec->vm(); + JSLockHolder locker(exec); JSObject* jsObject = toJS(object); JSValue jsValue = value ? toJS(exec, value) : JSValue(); Identifier name(propertyName->identifier(&exec->vm())); - if (jsObject->inherits(JSCallbackObject::info())) { + + // Get wrapped object if proxied + if (jsObject->inherits(vm, JSProxy::info())) + jsObject = jsCast(jsObject)->target(); + + if (jsObject->inherits(vm, JSCallbackObject::info())) { jsCast*>(jsObject)->setPrivateProperty(exec->vm(), name, jsValue); return true; } - if (jsObject->inherits(JSCallbackObject::info())) { + if (jsObject->inherits(vm, JSCallbackObject::info())) { jsCast*>(jsObject)->setPrivateProperty(exec->vm(), name, jsValue); return true; } #if JSC_OBJC_API_ENABLED - if (jsObject->inherits(JSCallbackObject::info())) { + if (jsObject->inherits(vm, JSCallbackObject::info())) { jsCast*>(jsObject)->setPrivateProperty(exec->vm(), name, jsValue); return true; } @@ -487,19 +508,25 @@ bool JSObjectSetPrivateProperty(JSContextRef ctx, JSObjectRef object, JSStringRe bool JSObjectDeletePrivateProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName) { ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + VM& vm = exec->vm(); + JSLockHolder locker(exec); JSObject* jsObject = toJS(object); Identifier name(propertyName->identifier(&exec->vm())); - if (jsObject->inherits(JSCallbackObject::info())) { + + // Get wrapped object if proxied + if (jsObject->inherits(vm, JSProxy::info())) + jsObject = jsCast(jsObject)->target(); + + if (jsObject->inherits(vm, JSCallbackObject::info())) { jsCast*>(jsObject)->deletePrivateProperty(name); return true; } - if (jsObject->inherits(JSCallbackObject::info())) { + if (jsObject->inherits(vm, JSCallbackObject::info())) { jsCast*>(jsObject)->deletePrivateProperty(name); return true; } #if JSC_OBJC_API_ENABLED - if (jsObject->inherits(JSCallbackObject::info())) { + if (jsObject->inherits(vm, JSCallbackObject::info())) { jsCast*>(jsObject)->deletePrivateProperty(name); return true; } @@ -507,19 +534,20 @@ bool JSObjectDeletePrivateProperty(JSContextRef ctx, JSObjectRef object, JSStrin return false; } -bool JSObjectIsFunction(JSContextRef, JSObjectRef object) +bool JSObjectIsFunction(JSContextRef ctx, JSObjectRef object) { if (!object) return false; + JSLockHolder locker(toJS(ctx)); CallData callData; JSCell* cell = toJS(object); - return cell->methodTable()->getCallData(cell, callData) != CallTypeNone; + return cell->methodTable()->getCallData(cell, callData) != CallType::None; } JSValueRef JSObjectCallAsFunction(JSContextRef ctx, JSObjectRef object, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) { ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); if (!object) return 0; @@ -536,16 +564,12 @@ JSValueRef JSObjectCallAsFunction(JSContextRef ctx, JSObjectRef object, JSObject CallData callData; CallType callType = jsObject->methodTable()->getCallData(jsObject, callData); - if (callType == CallTypeNone) + if (callType == CallType::None) return 0; - JSValueRef result = toRef(exec, call(exec, jsObject, callType, callData, jsThisObject, argList)); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); + JSValueRef result = toRef(exec, profiledCall(exec, ProfilingReason::API, jsObject, callType, callData, jsThisObject, argList)); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) result = 0; - } return result; } @@ -555,13 +579,13 @@ bool JSObjectIsConstructor(JSContextRef, JSObjectRef object) return false; JSObject* jsObject = toJS(object); ConstructData constructData; - return jsObject->methodTable()->getConstructData(jsObject, constructData) != ConstructTypeNone; + return jsObject->methodTable()->getConstructData(jsObject, constructData) != ConstructType::None; } JSObjectRef JSObjectCallAsConstructor(JSContextRef ctx, JSObjectRef object, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) { ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); if (!object) return 0; @@ -570,19 +594,16 @@ JSObjectRef JSObjectCallAsConstructor(JSContextRef ctx, JSObjectRef object, size ConstructData constructData; ConstructType constructType = jsObject->methodTable()->getConstructData(jsObject, constructData); - if (constructType == ConstructTypeNone) + if (constructType == ConstructType::None) return 0; MarkedArgumentBuffer argList; for (size_t i = 0; i < argumentCount; i++) argList.append(toJS(exec, arguments[i])); - JSObjectRef result = toRef(construct(exec, jsObject, constructType, constructData, argList)); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); + + JSObjectRef result = toRef(profiledConstruct(exec, ProfilingReason::API, jsObject, constructType, constructData, argList)); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) result = 0; - } return result; } @@ -606,15 +627,15 @@ JSPropertyNameArrayRef JSObjectCopyPropertyNames(JSContextRef ctx, JSObjectRef o ASSERT_NOT_REACHED(); return 0; } - JSObject* jsObject = toJS(object); ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); VM* vm = &exec->vm(); + JSObject* jsObject = toJS(object); JSPropertyNameArrayRef propertyNames = new OpaqueJSPropertyNameArray(vm); - PropertyNameArray array(vm); - jsObject->methodTable()->getPropertyNames(jsObject, exec, array, ExcludeDontEnumProperties); + PropertyNameArray array(vm, PropertyNameMode::Strings); + jsObject->methodTable()->getPropertyNames(jsObject, exec, array, EnumerationMode()); size_t size = array.size(); propertyNames->array.reserveInitialCapacity(size); @@ -633,7 +654,7 @@ JSPropertyNameArrayRef JSPropertyNameArrayRetain(JSPropertyNameArrayRef array) void JSPropertyNameArrayRelease(JSPropertyNameArrayRef array) { if (--array->refCount == 0) { - APIEntryShim entryShim(array->vm, false); + JSLockHolder locker(array->vm); delete array; } } @@ -651,6 +672,6 @@ JSStringRef JSPropertyNameArrayGetNameAtIndex(JSPropertyNameArrayRef array, size void JSPropertyNameAccumulatorAddName(JSPropertyNameAccumulatorRef array, JSStringRef propertyName) { PropertyNameArray* propertyNames = toJS(array); - APIEntryShim entryShim(propertyNames->vm()); + JSLockHolder locker(propertyNames->vm()); propertyNames->add(propertyName->identifier(propertyNames->vm())); } diff --git a/Source/JavaScriptCore/API/JSObjectRef.h b/Source/JavaScriptCore/API/JSObjectRef.h index 5e7fd69a9..95d53b7f4 100644 --- a/Source/JavaScriptCore/API/JSObjectRef.h +++ b/Source/JavaScriptCore/API/JSObjectRef.h @@ -11,10 +11,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -186,7 +186,7 @@ typedef bool @abstract The callback invoked when collecting the names of an object's properties. @param ctx The execution context to use. @param object The JSObject whose property names are being collected. -@param accumulator A JavaScript property name accumulator in which to accumulate the names of object's properties. +@param propertyNames A JavaScript property name accumulator in which to accumulate the names of object's properties. @discussion If you named your function GetPropertyNames, you would declare it like this: void GetPropertyNames(JSContextRef ctx, JSObjectRef object, JSPropertyNameAccumulatorRef propertyNames); diff --git a/Source/JavaScriptCore/API/JSRemoteInspector.cpp b/Source/JavaScriptCore/API/JSRemoteInspector.cpp new file mode 100644 index 000000000..3be61d092 --- /dev/null +++ b/Source/JavaScriptCore/API/JSRemoteInspector.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSRemoteInspector.h" + +#include "JSGlobalObjectConsoleClient.h" + +#if ENABLE(REMOTE_INSPECTOR) +#include "RemoteInspector.h" +#endif + +using namespace Inspector; + +static bool remoteInspectionEnabledByDefault = true; + +void JSRemoteInspectorDisableAutoStart(void) +{ +#if ENABLE(REMOTE_INSPECTOR) + RemoteInspector::startDisabled(); +#endif +} + +void JSRemoteInspectorStart(void) +{ +#if ENABLE(REMOTE_INSPECTOR) + RemoteInspector::singleton(); +#endif +} + +void JSRemoteInspectorSetParentProcessInformation(pid_t pid, const uint8_t* auditData, size_t auditLength) +{ +#if ENABLE(REMOTE_INSPECTOR) && PLATFORM(COCOA) + RetainPtr auditDataRef = adoptCF(CFDataCreate(kCFAllocatorDefault, auditData, auditLength)); + RemoteInspector::singleton().setParentProcessInformation(pid, auditDataRef); +#else + UNUSED_PARAM(pid); + UNUSED_PARAM(auditData); + UNUSED_PARAM(auditLength); +#endif +} + +void JSRemoteInspectorSetLogToSystemConsole(bool logToSystemConsole) +{ + JSGlobalObjectConsoleClient::setLogToSystemConsole(logToSystemConsole); +} + +bool JSRemoteInspectorGetInspectionEnabledByDefault(void) +{ + return remoteInspectionEnabledByDefault; +} + +void JSRemoteInspectorSetInspectionEnabledByDefault(bool enabledByDefault) +{ + remoteInspectionEnabledByDefault = enabledByDefault; +} diff --git a/Source/JavaScriptCore/API/JSRemoteInspector.h b/Source/JavaScriptCore/API/JSRemoteInspector.h new file mode 100644 index 000000000..2bde47949 --- /dev/null +++ b/Source/JavaScriptCore/API/JSRemoteInspector.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSRemoteInspector_h +#define JSRemoteInspector_h + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +JS_EXPORT void JSRemoteInspectorDisableAutoStart(void) CF_AVAILABLE(10_11, 9_0); +JS_EXPORT void JSRemoteInspectorStart(void) CF_AVAILABLE(10_11, 9_0); +JS_EXPORT void JSRemoteInspectorSetParentProcessInformation(pid_t, const uint8_t* auditData, size_t auditLength) CF_AVAILABLE(10_11, 9_0); + +JS_EXPORT void JSRemoteInspectorSetLogToSystemConsole(bool) CF_AVAILABLE(10_11, 9_0); + +JS_EXPORT bool JSRemoteInspectorGetInspectionEnabledByDefault(void) CF_AVAILABLE(10_11, 9_0); +JS_EXPORT void JSRemoteInspectorSetInspectionEnabledByDefault(bool) CF_AVAILABLE(10_11, 9_0); + +#ifdef __cplusplus +} +#endif + +#endif /* JSRemoteInspector_h */ diff --git a/Source/JavaScriptCore/API/JSRetainPtr.h b/Source/JavaScriptCore/API/JSRetainPtr.h index 574f7aaf1..e40084080 100644 --- a/Source/JavaScriptCore/API/JSRetainPtr.h +++ b/Source/JavaScriptCore/API/JSRetainPtr.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -57,11 +57,8 @@ public: T operator->() const { return m_ptr; } bool operator!() const { return !m_ptr; } + explicit operator bool() const { return m_ptr; } - // This conversion operator allows implicit conversion to bool but not to other integer types. - typedef T JSRetainPtr::*UnspecifiedBoolType; - operator UnspecifiedBoolType() const { return m_ptr ? &JSRetainPtr::m_ptr : 0; } - JSRetainPtr& operator=(const JSRetainPtr&); template JSRetainPtr& operator=(const JSRetainPtr&); JSRetainPtr& operator=(T); @@ -75,6 +72,16 @@ private: T m_ptr; }; +inline JSRetainPtr adopt(JSStringRef o) +{ + return JSRetainPtr(Adopt, o); +} + +inline JSRetainPtr adopt(JSGlobalContextRef o) +{ + return JSRetainPtr(Adopt, o); +} + template inline JSRetainPtr::JSRetainPtr(const JSRetainPtr& o) : m_ptr(o.m_ptr) { diff --git a/Source/JavaScriptCore/API/JSScriptRef.cpp b/Source/JavaScriptCore/API/JSScriptRef.cpp index 9277001d2..791738b95 100644 --- a/Source/JavaScriptCore/API/JSScriptRef.cpp +++ b/Source/JavaScriptCore/API/JSScriptRef.cpp @@ -26,13 +26,13 @@ #include "config.h" #include "APICast.h" -#include "APIShims.h" #include "Completion.h" +#include "Exception.h" #include "JSBasePrivate.h" #include "VM.h" #include "JSScriptRefPrivate.h" #include "OpaqueJSString.h" -#include "Operations.h" +#include "JSCInlines.h" #include "Parser.h" #include "SourceCode.h" #include "SourceProvider.h" @@ -41,43 +41,51 @@ using namespace JSC; struct OpaqueJSScript : public SourceProvider { public: - static WTF::PassRefPtr create(VM* vm, const String& url, int startingLineNumber, const String& source) + static WTF::Ref create(VM& vm, const SourceOrigin& sourceOrigin, const String& url, int startingLineNumber, const String& source) { - return WTF::adoptRef(new OpaqueJSScript(vm, url, startingLineNumber, source)); + return WTF::adoptRef(*new OpaqueJSScript(vm, sourceOrigin, url, startingLineNumber, source)); } - virtual const String& source() const override + unsigned hash() const override { - return m_source; + return m_source.get().hash(); } - VM* vm() const { return m_vm; } + StringView source() const override + { + return m_source.get(); + } + + VM& vm() const { return m_vm; } private: - OpaqueJSScript(VM* vm, const String& url, int startingLineNumber, const String& source) - : SourceProvider(url, TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber::first())) + OpaqueJSScript(VM& vm, const SourceOrigin& sourceOrigin, const String& url, int startingLineNumber, const String& source) + : SourceProvider(sourceOrigin, url, TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber()), SourceProviderSourceType::Program) , m_vm(vm) - , m_source(source) + , m_source(source.isNull() ? *StringImpl::empty() : *source.impl()) { } virtual ~OpaqueJSScript() { } - VM* m_vm; - String m_source; + VM& m_vm; + Ref m_source; }; -static bool parseScript(VM* vm, const SourceCode& source, ParserError& error) +static bool parseScript(VM& vm, const SourceCode& source, ParserError& error) { - return JSC::parse(vm, source, 0, Identifier(), JSParseNormal, JSParseProgramCode, error); + return !!JSC::parse( + &vm, source, Identifier(), JSParserBuiltinMode::NotBuiltin, + JSParserStrictMode::NotStrict, JSParserScriptMode::Classic, SourceParseMode::ProgramMode, SuperBinding::NotNeeded, + error); } extern "C" { JSScriptRef JSScriptCreateReferencingImmortalASCIIText(JSContextGroupRef contextGroup, JSStringRef url, int startingLineNumber, const char* source, size_t length, JSStringRef* errorMessage, int* errorLine) { - VM* vm = toJS(contextGroup); - APIEntryShim entryShim(vm); + auto& vm = *toJS(contextGroup); + JSLockHolder locker(&vm); for (size_t i = 0; i < length; i++) { if (!isASCII(source[i])) return 0; @@ -85,67 +93,69 @@ JSScriptRef JSScriptCreateReferencingImmortalASCIIText(JSContextGroupRef context startingLineNumber = std::max(1, startingLineNumber); - RefPtr result = OpaqueJSScript::create(vm, url->string(), startingLineNumber, String(StringImpl::createFromLiteral(source, length))); + auto sourceURLString = url ? url->string() : String(); + auto result = OpaqueJSScript::create(vm, SourceOrigin { sourceURLString }, sourceURLString, startingLineNumber, String(StringImpl::createFromLiteral(source, length))); ParserError error; - if (!parseScript(vm, SourceCode(result), error)) { + if (!parseScript(vm, SourceCode(result.copyRef()), error)) { if (errorMessage) - *errorMessage = OpaqueJSString::create(error.m_message).leakRef(); + *errorMessage = OpaqueJSString::create(error.message()).leakRef(); if (errorLine) - *errorLine = error.m_line; - return 0; + *errorLine = error.line(); + return nullptr; } - return result.release().leakRef(); + return &result.leakRef(); } JSScriptRef JSScriptCreateFromString(JSContextGroupRef contextGroup, JSStringRef url, int startingLineNumber, JSStringRef source, JSStringRef* errorMessage, int* errorLine) { - VM* vm = toJS(contextGroup); - APIEntryShim entryShim(vm); + auto& vm = *toJS(contextGroup); + JSLockHolder locker(&vm); startingLineNumber = std::max(1, startingLineNumber); - RefPtr result = OpaqueJSScript::create(vm, url->string(), startingLineNumber, source->string()); + auto sourceURLString = url ? url->string() : String(); + auto result = OpaqueJSScript::create(vm, SourceOrigin { sourceURLString }, sourceURLString, startingLineNumber, source->string()); ParserError error; - if (!parseScript(vm, SourceCode(result), error)) { + if (!parseScript(vm, SourceCode(result.copyRef()), error)) { if (errorMessage) - *errorMessage = OpaqueJSString::create(error.m_message).leakRef(); + *errorMessage = OpaqueJSString::create(error.message()).leakRef(); if (errorLine) - *errorLine = error.m_line; - return 0; + *errorLine = error.line(); + return nullptr; } - return result.release().leakRef(); + return &result.leakRef(); } void JSScriptRetain(JSScriptRef script) { - APIEntryShim entryShim(script->vm()); + JSLockHolder locker(&script->vm()); script->ref(); } void JSScriptRelease(JSScriptRef script) { - APIEntryShim entryShim(script->vm()); + JSLockHolder locker(&script->vm()); script->deref(); } JSValueRef JSScriptEvaluate(JSContextRef context, JSScriptRef script, JSValueRef thisValueRef, JSValueRef* exception) { ExecState* exec = toJS(context); - APIEntryShim entryShim(exec); - if (script->vm() != &exec->vm()) { + JSLockHolder locker(exec); + if (&script->vm() != &exec->vm()) { RELEASE_ASSERT_NOT_REACHED(); return 0; } - JSValue internalException; + NakedPtr internalException; JSValue thisValue = thisValueRef ? toJS(exec, thisValueRef) : jsUndefined(); - JSValue result = evaluate(exec, SourceCode(script), thisValue, &internalException); + JSValue result = evaluate(exec, SourceCode(*script), thisValue, internalException); if (internalException) { if (exception) - *exception = toRef(exec, internalException); + *exception = toRef(exec, internalException->value()); return 0; } ASSERT(result); diff --git a/Source/JavaScriptCore/API/JSStringRef.cpp b/Source/JavaScriptCore/API/JSStringRef.cpp index 25b84c7d2..909540481 100644 --- a/Source/JavaScriptCore/API/JSStringRef.cpp +++ b/Source/JavaScriptCore/API/JSStringRef.cpp @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -37,7 +37,7 @@ using namespace WTF::Unicode; JSStringRef JSStringCreateWithCharacters(const JSChar* chars, size_t numChars) { initializeThreading(); - return OpaqueJSString::create(chars, numChars).leakRef(); + return &OpaqueJSString::create(reinterpret_cast(chars), numChars).leakRef(); } JSStringRef JSStringCreateWithUTF8CString(const char* string) @@ -51,18 +51,18 @@ JSStringRef JSStringCreateWithUTF8CString(const char* string) const LChar* stringStart = reinterpret_cast(string); if (conversionOK == convertUTF8ToUTF16(&string, string + length, &p, p + length, &sourceIsAllASCII)) { if (sourceIsAllASCII) - return OpaqueJSString::create(stringStart, length).leakRef(); - return OpaqueJSString::create(buffer.data(), p - buffer.data()).leakRef(); + return &OpaqueJSString::create(stringStart, length).leakRef(); + return &OpaqueJSString::create(buffer.data(), p - buffer.data()).leakRef(); } } - return OpaqueJSString::create().leakRef(); + return &OpaqueJSString::create().leakRef(); } JSStringRef JSStringCreateWithCharactersNoCopy(const JSChar* chars, size_t numChars) { initializeThreading(); - return OpaqueJSString::create(StringImpl::createWithoutCopying(chars, numChars)).leakRef(); + return OpaqueJSString::create(StringImpl::createWithoutCopying(reinterpret_cast(chars), numChars)).leakRef(); } JSStringRef JSStringRetain(JSStringRef string) @@ -78,12 +78,16 @@ void JSStringRelease(JSStringRef string) size_t JSStringGetLength(JSStringRef string) { + if (!string) + return 0; return string->length(); } const JSChar* JSStringGetCharactersPtr(JSStringRef string) { - return string->characters(); + if (!string) + return nullptr; + return reinterpret_cast(string->characters()); } size_t JSStringGetMaximumUTF8CStringSize(JSStringRef string) @@ -94,7 +98,7 @@ size_t JSStringGetMaximumUTF8CStringSize(JSStringRef string) size_t JSStringGetUTF8CString(JSStringRef string, char* buffer, size_t bufferSize) { - if (!bufferSize) + if (!string || !buffer || !bufferSize) return 0; char* destination = buffer; diff --git a/Source/JavaScriptCore/API/JSStringRef.h b/Source/JavaScriptCore/API/JSStringRef.h index aded73626..bc03ed70d 100644 --- a/Source/JavaScriptCore/API/JSStringRef.h +++ b/Source/JavaScriptCore/API/JSStringRef.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -37,11 +37,14 @@ extern "C" { #endif -#if !defined(WIN32) && !defined(_WIN32) \ - && !((defined(__CC_ARM) || defined(__ARMCC__)) && !defined(__linux__)) /* RVCT */ +#if !defined(_NATIVE_WCHAR_T_DEFINED) /* MSVC */ \ + && (!defined(__WCHAR_MAX__) || (__WCHAR_MAX__ > 0xffffU)) /* ISO C/C++ */ \ + && (!defined(WCHAR_MAX) || (WCHAR_MAX > 0xffffU)) /* RVCT */ /*! @typedef JSChar -@abstract A Unicode character. +@abstract A UTF-16 code unit. One, or a sequence of two, can encode any Unicode + character. As with all scalar types, endianness depends on the underlying + architecture. */ typedef unsigned short JSChar; #else diff --git a/Source/JavaScriptCore/API/JSStringRefBSTR.cpp b/Source/JavaScriptCore/API/JSStringRefBSTR.cpp new file mode 100644 index 000000000..e900d24d8 --- /dev/null +++ b/Source/JavaScriptCore/API/JSStringRefBSTR.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSStringRefBSTR.h" + +#include "JSStringRef.h" + +JSStringRef JSStringCreateWithBSTR(BSTR string) +{ + return JSStringCreateWithCharacters(string ? string : L"", string ? SysStringLen(string) : 0); +} + +BSTR JSStringCopyBSTR(const JSStringRef string) +{ + return SysAllocStringLen(JSStringGetCharactersPtr(string), JSStringGetLength(string)); +} diff --git a/Source/JavaScriptCore/API/JSStringRefBSTR.h b/Source/JavaScriptCore/API/JSStringRefBSTR.h new file mode 100644 index 000000000..066c68d53 --- /dev/null +++ b/Source/JavaScriptCore/API/JSStringRefBSTR.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSStringRefBSTR_h +#define JSStringRefBSTR_h + +#include "JSBase.h" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* COM convenience methods */ + +/*! +@function +@abstract Creates a JavaScript string from a BSTR. +@param string The BSTR to copy into the new JSString. +@result A JSString containing string. Ownership follows the Create Rule. +*/ +JS_EXPORT JSStringRef JSStringCreateWithBSTR(const BSTR string); + +/*! +@function +@abstract Creates a BSTR from a JavaScript string. +@param string The JSString to copy into the new BSTR. +@result A BSTR containing string. Ownership follows the Create Rule. +*/ +JS_EXPORT BSTR JSStringCopyBSTR(const JSStringRef string); + +#ifdef __cplusplus +} +#endif + +#endif /* JSStringRefBSTR_h */ diff --git a/Source/JavaScriptCore/API/JSStringRefCF.cpp b/Source/JavaScriptCore/API/JSStringRefCF.cpp new file mode 100644 index 000000000..05872593f --- /dev/null +++ b/Source/JavaScriptCore/API/JSStringRefCF.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSStringRefCF.h" + +#include "APICast.h" +#include "InitializeThreading.h" +#include "JSCJSValue.h" +#include "JSStringRef.h" +#include "OpaqueJSString.h" +#include + +JSStringRef JSStringCreateWithCFString(CFStringRef string) +{ + JSC::initializeThreading(); + + // We cannot use CFIndex here since CFStringGetLength can return values larger than + // it can hold. () + size_t length = CFStringGetLength(string); + if (!length) + return &OpaqueJSString::create(reinterpret_cast(""), 0).leakRef(); + + Vector lcharBuffer(length); + CFIndex usedBufferLength; + CFIndex convertedSize = CFStringGetBytes(string, CFRangeMake(0, length), kCFStringEncodingISOLatin1, 0, false, lcharBuffer.data(), length, &usedBufferLength); + if (static_cast(convertedSize) == length && static_cast(usedBufferLength) == length) + return &OpaqueJSString::create(lcharBuffer.data(), length).leakRef(); + + auto buffer = std::make_unique(length); + CFStringGetCharacters(string, CFRangeMake(0, length), buffer.get()); + static_assert(sizeof(UniChar) == sizeof(UChar), "UniChar and UChar must be same size"); + return &OpaqueJSString::create(reinterpret_cast(buffer.get()), length).leakRef(); +} + +CFStringRef JSStringCopyCFString(CFAllocatorRef allocator, JSStringRef string) +{ + if (!string || !string->length()) + return CFSTR(""); + + if (string->is8Bit()) + return CFStringCreateWithBytes(allocator, reinterpret_cast(string->characters8()), string->length(), kCFStringEncodingISOLatin1, false); + + return CFStringCreateWithCharacters(allocator, reinterpret_cast(string->characters16()), string->length()); +} diff --git a/Source/JavaScriptCore/API/JSStringRefCF.h b/Source/JavaScriptCore/API/JSStringRefCF.h new file mode 100644 index 000000000..1e210c7a9 --- /dev/null +++ b/Source/JavaScriptCore/API/JSStringRefCF.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSStringRefCF_h +#define JSStringRefCF_h + +#include "JSBase.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* CFString convenience methods */ + +/*! +@function +@abstract Creates a JavaScript string from a CFString. +@discussion This function is optimized to take advantage of cases when + CFStringGetCharactersPtr returns a valid pointer. +@param string The CFString to copy into the new JSString. +@result A JSString containing string. Ownership follows the Create Rule. +*/ +JS_EXPORT JSStringRef JSStringCreateWithCFString(CFStringRef string); +/*! +@function +@abstract Creates a CFString from a JavaScript string. +@param alloc The alloc parameter to pass to CFStringCreate. +@param string The JSString to copy into the new CFString. +@result A CFString containing string. Ownership follows the Create Rule. +*/ +JS_EXPORT CFStringRef JSStringCopyCFString(CFAllocatorRef alloc, JSStringRef string) CF_RETURNS_RETAINED; + +#ifdef __cplusplus +} +#endif + +#endif /* JSStringRefCF_h */ diff --git a/Source/JavaScriptCore/API/JSTypedArray.cpp b/Source/JavaScriptCore/API/JSTypedArray.cpp new file mode 100644 index 000000000..0902e609f --- /dev/null +++ b/Source/JavaScriptCore/API/JSTypedArray.cpp @@ -0,0 +1,345 @@ +/* + * Copyright (C) 2015 Dominic Szablewski (dominic@phoboslab.org) + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSTypedArray.h" + +#include "APICast.h" +#include "APIUtils.h" +#include "ClassInfo.h" +#include "Error.h" +#include "JSArrayBufferViewInlines.h" +#include "JSCInlines.h" +#include "JSDataView.h" +#include "JSGenericTypedArrayViewInlines.h" +#include "JSTypedArrays.h" +#include "TypedArrayController.h" +#include + +using namespace JSC; + +// Helper functions. + +inline JSTypedArrayType toJSTypedArrayType(TypedArrayType type) +{ + switch (type) { + case JSC::TypeDataView: + case NotTypedArray: + return kJSTypedArrayTypeNone; + case TypeInt8: + return kJSTypedArrayTypeInt8Array; + case TypeUint8: + return kJSTypedArrayTypeUint8Array; + case TypeUint8Clamped: + return kJSTypedArrayTypeUint8ClampedArray; + case TypeInt16: + return kJSTypedArrayTypeInt16Array; + case TypeUint16: + return kJSTypedArrayTypeUint16Array; + case TypeInt32: + return kJSTypedArrayTypeInt32Array; + case TypeUint32: + return kJSTypedArrayTypeUint32Array; + case TypeFloat32: + return kJSTypedArrayTypeFloat32Array; + case TypeFloat64: + return kJSTypedArrayTypeFloat64Array; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +inline TypedArrayType toTypedArrayType(JSTypedArrayType type) +{ + switch (type) { + case kJSTypedArrayTypeArrayBuffer: + case kJSTypedArrayTypeNone: + return NotTypedArray; + case kJSTypedArrayTypeInt8Array: + return TypeInt8; + case kJSTypedArrayTypeUint8Array: + return TypeUint8; + case kJSTypedArrayTypeUint8ClampedArray: + return TypeUint8Clamped; + case kJSTypedArrayTypeInt16Array: + return TypeInt16; + case kJSTypedArrayTypeUint16Array: + return TypeUint16; + case kJSTypedArrayTypeInt32Array: + return TypeInt32; + case kJSTypedArrayTypeUint32Array: + return TypeUint32; + case kJSTypedArrayTypeFloat32Array: + return TypeFloat32; + case kJSTypedArrayTypeFloat64Array: + return TypeFloat64; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +static JSObject* createTypedArray(ExecState* exec, JSTypedArrayType type, RefPtr&& buffer, size_t offset, size_t length) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSGlobalObject* globalObject = exec->lexicalGlobalObject(); + if (!buffer) { + throwOutOfMemoryError(exec, scope); + return nullptr; + } + switch (type) { + case kJSTypedArrayTypeInt8Array: + return JSInt8Array::create(exec, globalObject->typedArrayStructure(TypeInt8), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeInt16Array: + return JSInt16Array::create(exec, globalObject->typedArrayStructure(TypeInt16), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeInt32Array: + return JSInt32Array::create(exec, globalObject->typedArrayStructure(TypeInt32), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeUint8Array: + return JSUint8Array::create(exec, globalObject->typedArrayStructure(TypeUint8), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeUint8ClampedArray: + return JSUint8ClampedArray::create(exec, globalObject->typedArrayStructure(TypeUint8Clamped), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeUint16Array: + return JSUint16Array::create(exec, globalObject->typedArrayStructure(TypeUint16), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeUint32Array: + return JSUint32Array::create(exec, globalObject->typedArrayStructure(TypeUint32), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeFloat32Array: + return JSFloat32Array::create(exec, globalObject->typedArrayStructure(TypeFloat32), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeFloat64Array: + return JSFloat64Array::create(exec, globalObject->typedArrayStructure(TypeFloat64), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeArrayBuffer: + case kJSTypedArrayTypeNone: + RELEASE_ASSERT_NOT_REACHED(); + } + return nullptr; +} + +// Implementations of the API functions. + +JSTypedArrayType JSValueGetTypedArrayType(JSContextRef ctx, JSValueRef valueRef, JSValueRef*) +{ + + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSLockHolder locker(exec); + + JSValue value = toJS(exec, valueRef); + if (!value.isObject()) + return kJSTypedArrayTypeNone; + JSObject* object = value.getObject(); + + if (jsDynamicCast(vm, object)) + return kJSTypedArrayTypeArrayBuffer; + + return toJSTypedArrayType(object->classInfo(vm)->typedArrayStorageType); +} + +JSObjectRef JSObjectMakeTypedArray(JSContextRef ctx, JSTypedArrayType arrayType, size_t length, JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + if (arrayType == kJSTypedArrayTypeNone || arrayType == kJSTypedArrayTypeArrayBuffer) + return nullptr; + + unsigned elementByteSize = elementSize(toTypedArrayType(arrayType)); + + auto buffer = ArrayBuffer::tryCreate(length, elementByteSize); + JSObject* result = createTypedArray(exec, arrayType, WTFMove(buffer), 0, length); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return nullptr; + return toRef(result); +} + +JSObjectRef JSObjectMakeTypedArrayWithBytesNoCopy(JSContextRef ctx, JSTypedArrayType arrayType, void* bytes, size_t length, JSTypedArrayBytesDeallocator destructor, void* destructorContext, JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + if (arrayType == kJSTypedArrayTypeNone || arrayType == kJSTypedArrayTypeArrayBuffer) + return nullptr; + + unsigned elementByteSize = elementSize(toTypedArrayType(arrayType)); + + RefPtr buffer = ArrayBuffer::createFromBytes(bytes, length, [=](void* p) { + if (destructor) + destructor(p, destructorContext); + }); + JSObject* result = createTypedArray(exec, arrayType, WTFMove(buffer), 0, length / elementByteSize); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return nullptr; + return toRef(result); +} + +JSObjectRef JSObjectMakeTypedArrayWithArrayBuffer(JSContextRef ctx, JSTypedArrayType arrayType, JSObjectRef jsBufferRef, JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSLockHolder locker(exec); + + if (arrayType == kJSTypedArrayTypeNone || arrayType == kJSTypedArrayTypeArrayBuffer) + return nullptr; + + JSArrayBuffer* jsBuffer = jsDynamicCast(vm, toJS(jsBufferRef)); + if (!jsBuffer) { + setException(exec, exception, createTypeError(exec, "JSObjectMakeTypedArrayWithArrayBuffer expects buffer to be an Array Buffer object")); + return nullptr; + } + + RefPtr buffer = jsBuffer->impl(); + unsigned elementByteSize = elementSize(toTypedArrayType(arrayType)); + + JSObject* result = createTypedArray(exec, arrayType, WTFMove(buffer), 0, buffer->byteLength() / elementByteSize); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return nullptr; + return toRef(result); +} + +JSObjectRef JSObjectMakeTypedArrayWithArrayBufferAndOffset(JSContextRef ctx, JSTypedArrayType arrayType, JSObjectRef jsBufferRef, size_t offset, size_t length, JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSLockHolder locker(exec); + + if (arrayType == kJSTypedArrayTypeNone || arrayType == kJSTypedArrayTypeArrayBuffer) + return nullptr; + + JSArrayBuffer* jsBuffer = jsDynamicCast(vm, toJS(jsBufferRef)); + if (!jsBuffer) { + setException(exec, exception, createTypeError(exec, "JSObjectMakeTypedArrayWithArrayBuffer expects buffer to be an Array Buffer object")); + return nullptr; + } + + JSObject* result = createTypedArray(exec, arrayType, jsBuffer->impl(), offset, length); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return nullptr; + return toRef(result); +} + +void* JSObjectGetTypedArrayBytesPtr(JSContextRef ctx, JSObjectRef objectRef, JSValueRef*) +{ + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSLockHolder locker(exec); + JSObject* object = toJS(objectRef); + + if (JSArrayBufferView* typedArray = jsDynamicCast(vm, object)) { + ArrayBuffer* buffer = typedArray->possiblySharedBuffer(); + buffer->pinAndLock(); + return buffer->data(); + } + return nullptr; +} + +size_t JSObjectGetTypedArrayLength(JSContextRef ctx, JSObjectRef objectRef, JSValueRef*) +{ + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSObject* object = toJS(objectRef); + + if (JSArrayBufferView* typedArray = jsDynamicCast(vm, object)) + return typedArray->length(); + + return 0; +} + +size_t JSObjectGetTypedArrayByteLength(JSContextRef ctx, JSObjectRef objectRef, JSValueRef*) +{ + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSObject* object = toJS(objectRef); + + if (JSArrayBufferView* typedArray = jsDynamicCast(vm, object)) + return typedArray->length() * elementSize(typedArray->classInfo(vm)->typedArrayStorageType); + + return 0; +} + +size_t JSObjectGetTypedArrayByteOffset(JSContextRef ctx, JSObjectRef objectRef, JSValueRef*) +{ + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSObject* object = toJS(objectRef); + + if (JSArrayBufferView* typedArray = jsDynamicCast(vm, object)) + return typedArray->byteOffset(); + + return 0; +} + +JSObjectRef JSObjectGetTypedArrayBuffer(JSContextRef ctx, JSObjectRef objectRef, JSValueRef*) +{ + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSLockHolder locker(exec); + JSObject* object = toJS(objectRef); + + if (JSArrayBufferView* typedArray = jsDynamicCast(vm, object)) + return toRef(exec->vm().m_typedArrayController->toJS(exec, typedArray->globalObject(), typedArray->possiblySharedBuffer())); + + return nullptr; +} + +JSObjectRef JSObjectMakeArrayBufferWithBytesNoCopy(JSContextRef ctx, void* bytes, size_t byteLength, JSTypedArrayBytesDeallocator bytesDeallocator, void* deallocatorContext, JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + auto buffer = ArrayBuffer::createFromBytes(bytes, byteLength, [=](void* p) { + if (bytesDeallocator) + bytesDeallocator(p, deallocatorContext); + }); + + JSArrayBuffer* jsBuffer = JSArrayBuffer::create(exec->vm(), exec->lexicalGlobalObject()->arrayBufferStructure(ArrayBufferSharingMode::Default), WTFMove(buffer)); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return nullptr; + + return toRef(jsBuffer); +} + +void* JSObjectGetArrayBufferBytesPtr(JSContextRef ctx, JSObjectRef objectRef, JSValueRef*) +{ + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSLockHolder locker(exec); + JSObject* object = toJS(objectRef); + + if (JSArrayBuffer* jsBuffer = jsDynamicCast(vm, object)) { + ArrayBuffer* buffer = jsBuffer->impl(); + buffer->pinAndLock(); + return buffer->data(); + } + return nullptr; +} + +size_t JSObjectGetArrayBufferByteLength(JSContextRef ctx, JSObjectRef objectRef, JSValueRef*) +{ + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSObject* object = toJS(objectRef); + + if (JSArrayBuffer* jsBuffer = jsDynamicCast(vm, object)) + return jsBuffer->impl()->byteLength(); + + return 0; +} diff --git a/Source/JavaScriptCore/API/JSTypedArray.h b/Source/JavaScriptCore/API/JSTypedArray.h new file mode 100644 index 000000000..e23b76d2e --- /dev/null +++ b/Source/JavaScriptCore/API/JSTypedArray.h @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2015 Dominic Szablewski (dominic@phoboslab.org) + * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSTypedArray_h +#define JSTypedArray_h + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// ------------- Typed Array functions -------------- + +/*! + @function + @abstract Creates a JavaScript Typed Array object with the given number of elements. + @param ctx The execution context to use. + @param arrayType A value identifying the type of array to create. If arrayType is kJSTypedArrayTypeNone or kJSTypedArrayTypeArrayBuffer then NULL will be returned. + @param length The number of elements to be in the new Typed Array. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef that is a Typed Array with all elements set to zero or NULL if there was an error. + */ +JS_EXPORT JSObjectRef JSObjectMakeTypedArray(JSContextRef ctx, JSTypedArrayType arrayType, size_t length, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Creates a JavaScript Typed Array object from an existing pointer. + @param ctx The execution context to use. + @param arrayType A value identifying the type of array to create. If arrayType is kJSTypedArrayTypeNone or kJSTypedArrayTypeArrayBuffer then NULL will be returned. + @param bytes A pointer to the byte buffer to be used as the backing store of the Typed Array object. + @param byteLength The number of bytes pointed to by the parameter bytes. + @param bytesDeallocator The allocator to use to deallocate the external buffer when the JSTypedArrayData object is deallocated. + @param deallocatorContext A pointer to pass back to the deallocator. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef Typed Array whose backing store is the same as the one pointed to by bytes or NULL if there was an error. + @discussion If an exception is thrown during this function the bytesDeallocator will always be called. + */ +JS_EXPORT JSObjectRef JSObjectMakeTypedArrayWithBytesNoCopy(JSContextRef ctx, JSTypedArrayType arrayType, void* bytes, size_t byteLength, JSTypedArrayBytesDeallocator bytesDeallocator, void* deallocatorContext, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Creates a JavaScript Typed Array object from an existing JavaScript Array Buffer object. + @param ctx The execution context to use. + @param arrayType A value identifying the type of array to create. If arrayType is kJSTypedArrayTypeNone or kJSTypedArrayTypeArrayBuffer then NULL will be returned. + @param buffer An Array Buffer object that should be used as the backing store for the created JavaScript Typed Array object. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef that is a Typed Array or NULL if there was an error. The backing store of the Typed Array will be buffer. + */ +JS_EXPORT JSObjectRef JSObjectMakeTypedArrayWithArrayBuffer(JSContextRef ctx, JSTypedArrayType arrayType, JSObjectRef buffer, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Creates a JavaScript Typed Array object from an existing JavaScript Array Buffer object with the given offset and length. + @param ctx The execution context to use. + @param arrayType A value identifying the type of array to create. If arrayType is kJSTypedArrayTypeNone or kJSTypedArrayTypeArrayBuffer then NULL will be returned. + @param buffer An Array Buffer object that should be used as the backing store for the created JavaScript Typed Array object. + @param byteOffset The byte offset for the created Typed Array. byteOffset should aligned with the element size of arrayType. + @param length The number of elements to include in the Typed Array. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef that is a Typed Array or NULL if there was an error. The backing store of the Typed Array will be buffer. + */ +JS_EXPORT JSObjectRef JSObjectMakeTypedArrayWithArrayBufferAndOffset(JSContextRef ctx, JSTypedArrayType arrayType, JSObjectRef buffer, size_t byteOffset, size_t length, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns a temporary pointer to the backing store of a JavaScript Typed Array object. + @param ctx The execution context to use. + @param object The Typed Array object whose backing store pointer to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A pointer to the raw data buffer that serves as object's backing store or NULL if object is not a Typed Array object. + @discussion The pointer returned by this function is temporary and is not guaranteed to remain valid across JavaScriptCore API calls. + */ +JS_EXPORT void* JSObjectGetTypedArrayBytesPtr(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns the length of a JavaScript Typed Array object. + @param ctx The execution context to use. + @param object The Typed Array object whose length to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result The length of the Typed Array object or 0 if the object is not a Typed Array object. + */ +JS_EXPORT size_t JSObjectGetTypedArrayLength(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns the byte length of a JavaScript Typed Array object. + @param ctx The execution context to use. + @param object The Typed Array object whose byte length to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result The byte length of the Typed Array object or 0 if the object is not a Typed Array object. + */ +JS_EXPORT size_t JSObjectGetTypedArrayByteLength(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns the byte offset of a JavaScript Typed Array object. + @param ctx The execution context to use. + @param object The Typed Array object whose byte offset to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result The byte offset of the Typed Array object or 0 if the object is not a Typed Array object. + */ +JS_EXPORT size_t JSObjectGetTypedArrayByteOffset(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns the JavaScript Array Buffer object that is used as the backing of a JavaScript Typed Array object. + @param ctx The execution context to use. + @param object The JSObjectRef whose Typed Array type data pointer to obtain. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef with a JSTypedArrayType of kJSTypedArrayTypeArrayBuffer or NULL if object is not a Typed Array. + */ +JS_EXPORT JSObjectRef JSObjectGetTypedArrayBuffer(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +// ------------- Array Buffer functions ------------- + +/*! + @function + @abstract Creates a JavaScript Array Buffer object from an existing pointer. + @param ctx The execution context to use. + @param bytes A pointer to the byte buffer to be used as the backing store of the Typed Array object. + @param byteLength The number of bytes pointed to by the parameter bytes. + @param bytesDeallocator The allocator to use to deallocate the external buffer when the Typed Array data object is deallocated. + @param deallocatorContext A pointer to pass back to the deallocator. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef Array Buffer whose backing store is the same as the one pointed to by bytes or NULL if there was an error. + @discussion If an exception is thrown during this function the bytesDeallocator will always be called. + */ +JS_EXPORT JSObjectRef JSObjectMakeArrayBufferWithBytesNoCopy(JSContextRef ctx, void* bytes, size_t byteLength, JSTypedArrayBytesDeallocator bytesDeallocator, void* deallocatorContext, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns a pointer to the data buffer that serves as the backing store for a JavaScript Typed Array object. + @param object The Array Buffer object whose internal backing store pointer to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A pointer to the raw data buffer that serves as object's backing store or NULL if object is not an Array Buffer object. + @discussion The pointer returned by this function is temporary and is not guaranteed to remain valid across JavaScriptCore API calls. + */ +JS_EXPORT void* JSObjectGetArrayBufferBytesPtr(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns the number of bytes in a JavaScript data object. + @param ctx The execution context to use. + @param object The JS Arary Buffer object whose length in bytes to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result The number of bytes stored in the data object. + */ +JS_EXPORT size_t JSObjectGetArrayBufferByteLength(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +#ifdef __cplusplus +} +#endif + +#endif /* JSTypedArray_h */ diff --git a/Source/JavaScriptCore/API/JSValue.h b/Source/JavaScriptCore/API/JSValue.h new file mode 100644 index 000000000..1410dd74a --- /dev/null +++ b/Source/JavaScriptCore/API/JSValue.h @@ -0,0 +1,668 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSValue_h +#define JSValue_h + +#if JSC_OBJC_API_ENABLED + +#import + +@class JSContext; + +/*! +@interface +@discussion A JSValue is a reference to a JavaScript value. Every JSValue + originates from a JSContext and holds a strong reference to it. + When a JSValue instance method creates a new JSValue, the new value + originates from the same JSContext. + + All JSValues values also originate from a JSVirtualMachine + (available indirectly via the context property). It is an error to pass a + JSValue to a method or property of a JSValue or JSContext originating from a + different JSVirtualMachine. Doing so will raise an Objective-C exception. +*/ +NS_CLASS_AVAILABLE(10_9, 7_0) +@interface JSValue : NSObject + +/*! +@property +@abstract The JSContext that this value originates from. +*/ +@property (readonly, strong) JSContext *context; + +/*! +@methodgroup Creating JavaScript Values +*/ +/*! +@method +@abstract Create a JSValue by converting an Objective-C object. +@discussion The resulting JSValue retains the provided Objective-C object. +@param value The Objective-C object to be converted. +@result The new JSValue. +*/ ++ (JSValue *)valueWithObject:(id)value inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JavaScript value from a BOOL primitive. +@param context The JSContext in which the resulting JSValue will be created. +@result The new JSValue representing the equivalent boolean value. +*/ ++ (JSValue *)valueWithBool:(BOOL)value inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JavaScript value from a double primitive. +@param context The JSContext in which the resulting JSValue will be created. +@result The new JSValue representing the equivalent boolean value. +*/ ++ (JSValue *)valueWithDouble:(double)value inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JavaScript value from an int32_t primitive. +@param context The JSContext in which the resulting JSValue will be created. +@result The new JSValue representing the equivalent boolean value. +*/ ++ (JSValue *)valueWithInt32:(int32_t)value inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JavaScript value from a uint32_t primitive. +@param context The JSContext in which the resulting JSValue will be created. +@result The new JSValue representing the equivalent boolean value. +*/ ++ (JSValue *)valueWithUInt32:(uint32_t)value inContext:(JSContext *)context; + +/*! +@method +@abstract Create a new, empty JavaScript object. +@param context The JSContext in which the resulting object will be created. +@result The new JavaScript object. +*/ ++ (JSValue *)valueWithNewObjectInContext:(JSContext *)context; + +/*! +@method +@abstract Create a new, empty JavaScript array. +@param context The JSContext in which the resulting array will be created. +@result The new JavaScript array. +*/ ++ (JSValue *)valueWithNewArrayInContext:(JSContext *)context; + +/*! +@method +@abstract Create a new JavaScript regular expression object. +@param pattern The regular expression pattern. +@param flags The regular expression flags. +@param context The JSContext in which the resulting regular expression object will be created. +@result The new JavaScript regular expression object. +*/ ++ (JSValue *)valueWithNewRegularExpressionFromPattern:(NSString *)pattern flags:(NSString *)flags inContext:(JSContext *)context; + +/*! +@method +@abstract Create a new JavaScript error object. +@param message The error message. +@param context The JSContext in which the resulting error object will be created. +@result The new JavaScript error object. +*/ ++ (JSValue *)valueWithNewErrorFromMessage:(NSString *)message inContext:(JSContext *)context; + +/*! +@method +@abstract Create the JavaScript value null. +@param context The JSContext to which the resulting JSValue belongs. +@result The JSValue representing the JavaScript value null. +*/ ++ (JSValue *)valueWithNullInContext:(JSContext *)context; + +/*! +@method +@abstract Create the JavaScript value undefined. +@param context The JSContext to which the resulting JSValue belongs. +@result The JSValue representing the JavaScript value undefined. +*/ ++ (JSValue *)valueWithUndefinedInContext:(JSContext *)context; + +/*! +@methodgroup Converting to Objective-C Types +@discussion When converting between JavaScript values and Objective-C objects a copy is + performed. Values of types listed below are copied to the corresponding + types on conversion in each direction. For NSDictionaries, entries in the + dictionary that are keyed by strings are copied onto a JavaScript object. + For dictionaries and arrays, conversion is recursive, with the same object + conversion being applied to all entries in the collection. + +
+@textblock
+   Objective-C type  |   JavaScript type
+ --------------------+---------------------
+         nil         |     undefined
+        NSNull       |        null
+       NSString      |       string
+       NSNumber      |   number, boolean
+     NSDictionary    |   Object object
+       NSArray       |    Array object
+        NSDate       |     Date object
+       NSBlock (1)   |   Function object (1)
+          id (2)     |   Wrapper object (2)
+        Class (3)    | Constructor object (3)
+@/textblock
+
+ + (1) Instances of NSBlock with supported arguments types will be presented to + JavaScript as a callable Function object. For more information on supported + argument types see JSExport.h. If a JavaScript Function originating from an + Objective-C block is converted back to an Objective-C object the block will + be returned. All other JavaScript functions will be converted in the same + manner as a JavaScript object of type Object. + + (2) For Objective-C instances that do not derive from the set of types listed + above, a wrapper object to provide a retaining handle to the Objective-C + instance from JavaScript. For more information on these wrapper objects, see + JSExport.h. When a JavaScript wrapper object is converted back to Objective-C + the Objective-C instance being retained by the wrapper is returned. + + (3) For Objective-C Class objects a constructor object containing exported + class methods will be returned. See JSExport.h for more information on + constructor objects. + + For all methods taking arguments of type id, arguments will be converted + into a JavaScript value according to the above conversion. +*/ +/*! +@method +@abstract Convert this JSValue to an Objective-C object. +@discussion The JSValue is converted to an Objective-C object according + to the conversion rules specified above. +@result The Objective-C representation of this JSValue. +*/ +- (id)toObject; + +/*! +@method +@abstract Convert a JSValue to an Objective-C object of a specific class. +@discussion The JSValue is converted to an Objective-C object of the specified Class. + If the result is not of the specified Class then nil will be returned. +@result An Objective-C object of the specified Class or nil. +*/ +- (id)toObjectOfClass:(Class)expectedClass; + +/*! +@method +@abstract Convert a JSValue to a boolean. +@discussion The JSValue is converted to a boolean according to the rules specified + by the JavaScript language. +@result The boolean result of the conversion. +*/ +- (BOOL)toBool; + +/*! +@method +@abstract Convert a JSValue to a double. +@discussion The JSValue is converted to a number according to the rules specified + by the JavaScript language. +@result The double result of the conversion. +*/ +- (double)toDouble; + +/*! +@method +@abstract Convert a JSValue to an int32_t. +@discussion The JSValue is converted to an integer according to the rules specified + by the JavaScript language. +@result The int32_t result of the conversion. +*/ +- (int32_t)toInt32; + +/*! +@method +@abstract Convert a JSValue to a uint32_t. +@discussion The JSValue is converted to an integer according to the rules specified + by the JavaScript language. +@result The uint32_t result of the conversion. +*/ +- (uint32_t)toUInt32; + +/*! +@method +@abstract Convert a JSValue to a NSNumber. +@discussion If the JSValue represents a boolean, a NSNumber value of YES or NO + will be returned. For all other types the value will be converted to a number according + to the rules specified by the JavaScript language. +@result The NSNumber result of the conversion. +*/ +- (NSNumber *)toNumber; + +/*! +@method +@abstract Convert a JSValue to a NSString. +@discussion The JSValue is converted to a string according to the rules specified + by the JavaScript language. +@result The NSString containing the result of the conversion. +*/ +- (NSString *)toString; + +/*! +@method +@abstract Convert a JSValue to a NSDate. +@discussion The value is converted to a number representing a time interval + since 1970 which is then used to create a new NSDate instance. +@result The NSDate created using the converted time interval. +*/ +- (NSDate *)toDate; + +/*! +@method +@abstract Convert a JSValue to a NSArray. +@discussion If the value is null or undefined then nil is returned. + If the value is not an object then a JavaScript TypeError will be thrown. + The property length is read from the object, converted to an unsigned + integer, and an NSArray of this size is allocated. Properties corresponding + to indicies within the array bounds will be copied to the array, with + JSValues converted to equivalent Objective-C objects as specified. +@result The NSArray containing the recursively converted contents of the + converted JavaScript array. +*/ +- (NSArray *)toArray; + +/*! +@method +@abstract Convert a JSValue to a NSDictionary. +@discussion If the value is null or undefined then nil is returned. + If the value is not an object then a JavaScript TypeError will be thrown. + All enumerable properties of the object are copied to the dictionary, with + JSValues converted to equivalent Objective-C objects as specified. +@result The NSDictionary containing the recursively converted contents of + the converted JavaScript object. +*/ +- (NSDictionary *)toDictionary; + +/*! +@methodgroup Accessing Properties +*/ +/*! +@method +@abstract Access a property of a JSValue. +@result The JSValue for the requested property or the JSValue undefined + if the property does not exist. +*/ +- (JSValue *)valueForProperty:(NSString *)property; + +/*! +@method +@abstract Set a property on a JSValue. +*/ +- (void)setValue:(id)value forProperty:(NSString *)property; + +/*! +@method +@abstract Delete a property from a JSValue. +@result YES if deletion is successful, NO otherwise. +*/ +- (BOOL)deleteProperty:(NSString *)property; + +/*! +@method +@abstract Check if a JSValue has a property. +@discussion This method has the same function as the JavaScript operator in. +@result Returns YES if property is present on the value. +*/ +- (BOOL)hasProperty:(NSString *)property; + +/*! +@method +@abstract Define properties with custom descriptors on JSValues. +@discussion This method may be used to create a data or accessor property on an object. + This method operates in accordance with the Object.defineProperty method in the + JavaScript language. +*/ +- (void)defineProperty:(NSString *)property descriptor:(id)descriptor; + +/*! +@method +@abstract Access an indexed (numerical) property on a JSValue. +@result The JSValue for the property at the specified index. + Returns the JavaScript value undefined if no property exists at that index. +*/ +- (JSValue *)valueAtIndex:(NSUInteger)index; + +/*! +@method +@abstract Set an indexed (numerical) property on a JSValue. +@discussion For JSValues that are JavaScript arrays, indices greater than + UINT_MAX - 1 will not affect the length of the array. +*/ +- (void)setValue:(id)value atIndex:(NSUInteger)index; + +/*! +@functiongroup Checking JavaScript Types +*/ + +/*! +@property +@abstract Check if a JSValue corresponds to the JavaScript value undefined. +*/ +@property (readonly) BOOL isUndefined; + +/*! +@property +@abstract Check if a JSValue corresponds to the JavaScript value null. +*/ +@property (readonly) BOOL isNull; + +/*! +@property +@abstract Check if a JSValue is a boolean. +*/ +@property (readonly) BOOL isBoolean; + +/*! +@property +@abstract Check if a JSValue is a number. +@discussion In JavaScript, there is no differentiation between types of numbers. + Semantically all numbers behave like doubles except in special cases like bit + operations. +*/ +@property (readonly) BOOL isNumber; + +/*! +@property +@abstract Check if a JSValue is a string. +*/ +@property (readonly) BOOL isString; + +/*! +@property +@abstract Check if a JSValue is an object. +*/ +@property (readonly) BOOL isObject; + +/*! +@property +@abstract Check if a JSValue is an array. +*/ +@property (readonly) BOOL isArray NS_AVAILABLE(10_11, 9_0); + +/*! +@property +@abstract Check if a JSValue is a date. +*/ +@property (readonly) BOOL isDate NS_AVAILABLE(10_11, 9_0); + +/*! +@method +@abstract Compare two JSValues using JavaScript's === operator. +*/ +- (BOOL)isEqualToObject:(id)value; + +/*! +@method +@abstract Compare two JSValues using JavaScript's == operator. +*/ +- (BOOL)isEqualWithTypeCoercionToObject:(id)value; + +/*! +@method +@abstract Check if a JSValue is an instance of another object. +@discussion This method has the same function as the JavaScript operator instanceof. + If an object other than a JSValue is passed, it will first be converted according to + the aforementioned rules. +*/ +- (BOOL)isInstanceOf:(id)value; + +/*! +@methodgroup Calling Functions and Constructors +*/ +/*! +@method +@abstract Invoke a JSValue as a function. +@discussion In JavaScript, if a function doesn't explicitly return a value then it + implicitly returns the JavaScript value undefined. +@param arguments The arguments to pass to the function. +@result The return value of the function call. +*/ +- (JSValue *)callWithArguments:(NSArray *)arguments; + +/*! +@method +@abstract Invoke a JSValue as a constructor. +@discussion This is equivalent to using the new syntax in JavaScript. +@param arguments The arguments to pass to the constructor. +@result The return value of the constructor call. +*/ +- (JSValue *)constructWithArguments:(NSArray *)arguments; + +/*! +@method +@abstract Invoke a method on a JSValue. +@discussion Accesses the property named method from this value and + calls the resulting value as a function, passing this JSValue as the this + value along with the specified arguments. +@param method The name of the method to be invoked. +@param arguments The arguments to pass to the method. +@result The return value of the method call. +*/ +- (JSValue *)invokeMethod:(NSString *)method withArguments:(NSArray *)arguments; + +@end + +/*! +@category +@discussion Objective-C methods exported to JavaScript may have argument and/or return + values of struct types, provided that conversion to and from the struct is + supported by JSValue. Support is provided for any types where JSValue + contains both a class method valueWith:inContext:, and and instance + method to- where the string in these selector names match, + with the first argument to the former being of the same struct type as the + return type of the latter. + Support is provided for structs of type CGPoint, NSRange, CGRect and CGSize. +*/ +@interface JSValue (StructSupport) + +/*! +@method +@abstract Create a JSValue from a CGPoint. +@result A newly allocated JavaScript object containing properties + named x and y, with values from the CGPoint. +*/ ++ (JSValue *)valueWithPoint:(CGPoint)point inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JSValue from a NSRange. +@result A newly allocated JavaScript object containing properties + named location and length, with values from the NSRange. +*/ ++ (JSValue *)valueWithRange:(NSRange)range inContext:(JSContext *)context; + +/*! +@method +@abstract +Create a JSValue from a CGRect. +@result A newly allocated JavaScript object containing properties + named x, y, width, and height, with values from the CGRect. +*/ ++ (JSValue *)valueWithRect:(CGRect)rect inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JSValue from a CGSize. +@result A newly allocated JavaScript object containing properties + named width and height, with values from the CGSize. +*/ ++ (JSValue *)valueWithSize:(CGSize)size inContext:(JSContext *)context; + +/*! +@method +@abstract Convert a JSValue to a CGPoint. +@discussion Reads the properties named x and y from + this JSValue, and converts the results to double. +@result The new CGPoint. +*/ +- (CGPoint)toPoint; + +/*! +@method +@abstract Convert a JSValue to an NSRange. +@discussion Reads the properties named location and + length from this JSValue and converts the results to double. +@result The new NSRange. +*/ +- (NSRange)toRange; + +/*! +@method +@abstract Convert a JSValue to a CGRect. +@discussion Reads the properties named x, y, + width, and height from this JSValue and converts the results to double. +@result The new CGRect. +*/ +- (CGRect)toRect; + +/*! +@method +@abstract Convert a JSValue to a CGSize. +@discussion Reads the properties named width and + height from this JSValue and converts the results to double. +@result The new CGSize. +*/ +- (CGSize)toSize; + +@end + +/*! +@category +@discussion Instances of JSValue implement the following methods in order to enable + support for subscript access by key and index, for example: + +@textblock + JSValue *objectA, *objectB; + JSValue *v1 = object[@"X"]; // Get value for property "X" from 'object'. + JSValue *v2 = object[42]; // Get value for index 42 from 'object'. + object[@"Y"] = v1; // Assign 'v1' to property "Y" of 'object'. + object[101] = v2; // Assign 'v2' to index 101 of 'object'. +@/textblock + + An object key passed as a subscript will be converted to a JavaScript value, + and then the value converted to a string used as a property name. +*/ +@interface JSValue (SubscriptSupport) + +- (JSValue *)objectForKeyedSubscript:(id)key; +- (JSValue *)objectAtIndexedSubscript:(NSUInteger)index; +- (void)setObject:(id)object forKeyedSubscript:(NSObject *)key; +- (void)setObject:(id)object atIndexedSubscript:(NSUInteger)index; + +@end + +/*! +@category +@discussion These functions are for bridging between the C API and the Objective-C API. +*/ +@interface JSValue (JSValueRefSupport) + +/*! +@method +@abstract Creates a JSValue, wrapping its C API counterpart. +@result The Objective-C API equivalent of the specified JSValueRef. +*/ ++ (JSValue *)valueWithJSValueRef:(JSValueRef)value inContext:(JSContext *)context; + +/*! +@property +@abstract Returns the C API counterpart wrapped by a JSContext. +@result The C API equivalent of this JSValue. +*/ +@property (readonly) JSValueRef JSValueRef; +@end + +#ifdef __cplusplus +extern "C" { +#endif + +/*! +@group Property Descriptor Constants +@discussion These keys may assist in creating a property descriptor for use with the + defineProperty method on JSValue. + Property descriptors must fit one of three descriptions: + + Data Descriptor: + - A descriptor containing one or both of the keys value and writable, + and optionally containing one or both of the keys enumerable and + configurable. A data descriptor may not contain either the get or + set key. + A data descriptor may be used to create or modify the attributes of a + data property on an object (replacing any existing accessor property). + + Accessor Descriptor: + - A descriptor containing one or both of the keys get and set, and + optionally containing one or both of the keys enumerable and + configurable. An accessor descriptor may not contain either the value + or writable key. + An accessor descriptor may be used to create or modify the attributes of + an accessor property on an object (replacing any existing data property). + + Generic Descriptor: + - A descriptor containing one or both of the keys enumerable and + configurable. A generic descriptor may not contain any of the keys + value, writable, get, or set. + A generic descriptor may be used to modify the attributes of an existing + data or accessor property, or to create a new data property. +*/ +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorWritableKey; +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorEnumerableKey; +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorConfigurableKey; +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorValueKey; +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorGetKey; +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorSetKey; + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif + +#endif // JSValue_h diff --git a/Source/JavaScriptCore/API/JSValueInternal.h b/Source/JavaScriptCore/API/JSValueInternal.h new file mode 100644 index 000000000..4f1a8f69c --- /dev/null +++ b/Source/JavaScriptCore/API/JSValueInternal.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSValueInternal_h +#define JSValueInternal_h + +#import +#import + +#if JSC_OBJC_API_ENABLED + +@interface JSValue(Internal) + +JSValueRef valueInternalValue(JSValue *); + +- (JSValue *)initWithValue:(JSValueRef)value inContext:(JSContext *)context; + +JSValueRef objectToValue(JSContext *, id); +id valueToObject(JSContext *, JSValueRef); +id valueToNumber(JSGlobalContextRef, JSValueRef, JSValueRef* exception); +id valueToString(JSGlobalContextRef, JSValueRef, JSValueRef* exception); +id valueToDate(JSGlobalContextRef, JSValueRef, JSValueRef* exception); +id valueToArray(JSGlobalContextRef, JSValueRef, JSValueRef* exception); +id valueToDictionary(JSGlobalContextRef, JSValueRef, JSValueRef* exception); + ++ (SEL)selectorForStructToValue:(const char *)structTag; ++ (SEL)selectorForValueToStruct:(const char *)structTag; + +@end + +NSInvocation *typeToValueInvocationFor(const char* encodedType); +NSInvocation *valueToTypeInvocationFor(const char* encodedType); + +#endif + +#endif // JSValueInternal_h diff --git a/Source/JavaScriptCore/API/JSValueRef.cpp b/Source/JavaScriptCore/API/JSValueRef.cpp index 3e0cbbd7a..64ac6c324 100644 --- a/Source/JavaScriptCore/API/JSValueRef.cpp +++ b/Source/JavaScriptCore/API/JSValueRef.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. + * Copyright (C) 2006, 2007, 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,44 +10,48 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "JSValueRef.h" #include "APICast.h" -#include "APIShims.h" +#include "APIUtils.h" +#include "DateInstance.h" +#include "Exception.h" #include "JSAPIWrapperObject.h" +#include "JSCInlines.h" #include "JSCJSValue.h" #include "JSCallbackObject.h" #include "JSGlobalObject.h" #include "JSONObject.h" #include "JSString.h" #include "LiteralParser.h" -#include "Operations.h" #include "Protect.h" - +#include #include #include #include -#include // for std::min - #if PLATFORM(MAC) #include #endif +#if ENABLE(REMOTE_INSPECTOR) +#include "JSGlobalObjectInspectorController.h" +#endif + using namespace JSC; #if PLATFORM(MAC) @@ -68,7 +72,7 @@ static bool evernoteHackNeeded() return kJSTypeUndefined; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsValue = toJS(exec, value); @@ -93,10 +97,9 @@ bool JSValueIsUndefined(JSContextRef ctx, JSValueRef value) return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); - JSValue jsValue = toJS(exec, value); - return jsValue.isUndefined(); + return toJS(exec, value).isUndefined(); } bool JSValueIsNull(JSContextRef ctx, JSValueRef value) @@ -106,10 +109,9 @@ bool JSValueIsNull(JSContextRef ctx, JSValueRef value) return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); - JSValue jsValue = toJS(exec, value); - return jsValue.isNull(); + return toJS(exec, value).isNull(); } bool JSValueIsBoolean(JSContextRef ctx, JSValueRef value) @@ -119,10 +121,9 @@ bool JSValueIsBoolean(JSContextRef ctx, JSValueRef value) return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); - JSValue jsValue = toJS(exec, value); - return jsValue.isBoolean(); + return toJS(exec, value).isBoolean(); } bool JSValueIsNumber(JSContextRef ctx, JSValueRef value) @@ -132,10 +133,9 @@ bool JSValueIsNumber(JSContextRef ctx, JSValueRef value) return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); - JSValue jsValue = toJS(exec, value); - return jsValue.isNumber(); + return toJS(exec, value).isNumber(); } bool JSValueIsString(JSContextRef ctx, JSValueRef value) @@ -145,10 +145,9 @@ bool JSValueIsString(JSContextRef ctx, JSValueRef value) return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); - JSValue jsValue = toJS(exec, value); - return jsValue.isString(); + return toJS(exec, value).isString(); } bool JSValueIsObject(JSContextRef ctx, JSValueRef value) @@ -158,10 +157,35 @@ bool JSValueIsObject(JSContextRef ctx, JSValueRef value) return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); - JSValue jsValue = toJS(exec, value); - return jsValue.isObject(); + return toJS(exec, value).isObject(); +} + +bool JSValueIsArray(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSLockHolder locker(exec); + + return toJS(exec, value).inherits(vm, JSArray::info()); +} + +bool JSValueIsDate(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSLockHolder locker(exec); + + return toJS(exec, value).inherits(vm, DateInstance::info()); } bool JSValueIsObjectOfClass(JSContextRef ctx, JSValueRef value, JSClassRef jsClass) @@ -171,17 +195,21 @@ bool JSValueIsObjectOfClass(JSContextRef ctx, JSValueRef value, JSClassRef jsCla return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + VM& vm = exec->vm(); + JSLockHolder locker(exec); JSValue jsValue = toJS(exec, value); if (JSObject* o = jsValue.getObject()) { - if (o->inherits(JSCallbackObject::info())) + if (o->inherits(vm, JSProxy::info())) + o = jsCast(o)->target(); + + if (o->inherits(vm, JSCallbackObject::info())) return jsCast*>(o)->inherits(jsClass); - if (o->inherits(JSCallbackObject::info())) + if (o->inherits(vm, JSCallbackObject::info())) return jsCast*>(o)->inherits(jsClass); #if JSC_OBJC_API_ENABLED - if (o->inherits(JSCallbackObject::info())) + if (o->inherits(vm, JSCallbackObject::info())) return jsCast*>(o)->inherits(jsClass); #endif } @@ -195,17 +223,14 @@ bool JSValueIsEqual(JSContextRef ctx, JSValueRef a, JSValueRef b, JSValueRef* ex return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsA = toJS(exec, a); JSValue jsB = toJS(exec, b); bool result = JSValue::equal(exec, jsA, jsB); // false if an exception is thrown - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); - } + handleExceptionIfNeeded(exec, exception); + return result; } @@ -216,7 +241,7 @@ bool JSValueIsStrictEqual(JSContextRef ctx, JSValueRef a, JSValueRef b) return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsA = toJS(exec, a); JSValue jsB = toJS(exec, b); @@ -231,7 +256,7 @@ bool JSValueIsInstanceOfConstructor(JSContextRef ctx, JSValueRef value, JSObject return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsValue = toJS(exec, value); @@ -239,11 +264,7 @@ bool JSValueIsInstanceOfConstructor(JSContextRef ctx, JSValueRef value, JSObject if (!jsConstructor->structure()->typeInfo().implementsHasInstance()) return false; bool result = jsConstructor->hasInstance(exec, jsValue); // false if an exception is thrown - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); - } + handleExceptionIfNeeded(exec, exception); return result; } @@ -254,7 +275,7 @@ JSValueRef JSValueMakeUndefined(JSContextRef ctx) return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); return toRef(exec, jsUndefined()); } @@ -266,7 +287,7 @@ JSValueRef JSValueMakeNull(JSContextRef ctx) return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); return toRef(exec, jsNull()); } @@ -278,7 +299,7 @@ JSValueRef JSValueMakeBoolean(JSContextRef ctx, bool value) return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); return toRef(exec, jsBoolean(value)); } @@ -290,15 +311,9 @@ JSValueRef JSValueMakeNumber(JSContextRef ctx, double value) return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); - // Our JSValue representation relies on a standard bit pattern for NaN. NaNs - // generated internally to JavaScriptCore naturally have that representation, - // but an external NaN might not. - if (std::isnan(value)) - value = QNaN; - - return toRef(exec, jsNumber(value)); + return toRef(exec, jsNumber(purifyNaN(value))); } JSValueRef JSValueMakeString(JSContextRef ctx, JSStringRef string) @@ -308,9 +323,9 @@ JSValueRef JSValueMakeString(JSContextRef ctx, JSStringRef string) return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); - return toRef(exec, jsString(exec, string->string())); + return toRef(exec, jsString(exec, string ? string->string() : String())); } JSValueRef JSValueMakeFromJSONString(JSContextRef ctx, JSStringRef string) @@ -320,14 +335,14 @@ JSValueRef JSValueMakeFromJSONString(JSContextRef ctx, JSStringRef string) return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); String str = string->string(); unsigned length = str.length(); - if (length && str.is8Bit()) { + if (!length || str.is8Bit()) { LiteralParser parser(exec, str.characters8(), length, StrictJSON); return toRef(exec, parser.tryLiteralParse()); } - LiteralParser parser(exec, str.deprecatedCharacters(), length, StrictJSON); + LiteralParser parser(exec, str.characters16(), length, StrictJSON); return toRef(exec, parser.tryLiteralParse()); } @@ -338,17 +353,13 @@ JSStringRef JSValueCreateJSONString(JSContextRef ctx, JSValueRef apiValue, unsig return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue value = toJS(exec, apiValue); String result = JSONStringify(exec, value, indent); if (exception) *exception = 0; - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) return 0; - } return OpaqueJSString::create(result).leakRef(); } @@ -359,7 +370,7 @@ bool JSValueToBoolean(JSContextRef ctx, JSValueRef value) return false; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsValue = toJS(exec, value); return jsValue.toBoolean(exec); @@ -369,20 +380,16 @@ double JSValueToNumber(JSContextRef ctx, JSValueRef value, JSValueRef* exception { if (!ctx) { ASSERT_NOT_REACHED(); - return QNaN; + return PNaN; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsValue = toJS(exec, value); double number = jsValue.toNumber(exec); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); - number = QNaN; - } + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + number = PNaN; return number; } @@ -393,18 +400,14 @@ JSStringRef JSValueToStringCopy(JSContextRef ctx, JSValueRef value, JSValueRef* return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsValue = toJS(exec, value); - RefPtr stringRef(OpaqueJSString::create(jsValue.toString(exec)->value(exec))); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); - stringRef.clear(); - } - return stringRef.release().leakRef(); + auto stringRef(OpaqueJSString::create(jsValue.toWTFString(exec))); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + stringRef = nullptr; + return stringRef.leakRef(); } JSObjectRef JSValueToObject(JSContextRef ctx, JSValueRef value, JSValueRef* exception) @@ -414,19 +417,15 @@ JSObjectRef JSValueToObject(JSContextRef ctx, JSValueRef value, JSValueRef* exce return 0; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsValue = toJS(exec, value); JSObjectRef objectRef = toRef(jsValue.toObject(exec)); - if (exec->hadException()) { - if (exception) - *exception = toRef(exec, exec->exception()); - exec->clearException(); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) objectRef = 0; - } return objectRef; -} +} void JSValueProtect(JSContextRef ctx, JSValueRef value) { @@ -435,7 +434,7 @@ void JSValueProtect(JSContextRef ctx, JSValueRef value) return; } ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsValue = toJSForGC(exec, value); gcProtect(jsValue); @@ -449,7 +448,7 @@ void JSValueUnprotect(JSContextRef ctx, JSValueRef value) #endif ExecState* exec = toJS(ctx); - APIEntryShim entryShim(exec); + JSLockHolder locker(exec); JSValue jsValue = toJSForGC(exec, value); gcUnprotect(jsValue); diff --git a/Source/JavaScriptCore/API/JSValueRef.h b/Source/JavaScriptCore/API/JSValueRef.h index 97385c01e..9815de783 100644 --- a/Source/JavaScriptCore/API/JSValueRef.h +++ b/Source/JavaScriptCore/API/JSValueRef.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -52,6 +52,36 @@ typedef enum { kJSTypeObject } JSType; +/*! + @enum JSTypedArrayType + @abstract A constant identifying the Typed Array type of a JSObjectRef. + @constant kJSTypedArrayTypeInt8Array Int8Array + @constant kJSTypedArrayTypeInt16Array Int16Array + @constant kJSTypedArrayTypeInt32Array Int32Array + @constant kJSTypedArrayTypeUint8Array Uint8Array + @constant kJSTypedArrayTypeUint8ClampedArray Uint8ClampedArray + @constant kJSTypedArrayTypeUint16Array Uint16Array + @constant kJSTypedArrayTypeUint32Array Uint32Array + @constant kJSTypedArrayTypeFloat32Array Float32Array + @constant kJSTypedArrayTypeFloat64Array Float64Array + @constant kJSTypedArrayTypeArrayBuffer ArrayBuffer + @constant kJSTypedArrayTypeNone Not a Typed Array + + */ +typedef enum { + kJSTypedArrayTypeInt8Array, + kJSTypedArrayTypeInt16Array, + kJSTypedArrayTypeInt32Array, + kJSTypedArrayTypeUint8Array, + kJSTypedArrayTypeUint8ClampedArray, + kJSTypedArrayTypeUint16Array, + kJSTypedArrayTypeUint32Array, + kJSTypedArrayTypeFloat32Array, + kJSTypedArrayTypeFloat64Array, + kJSTypedArrayTypeArrayBuffer, + kJSTypedArrayTypeNone, +} JSTypedArrayType CF_ENUM_AVAILABLE(10_12, 10_0); + #ifdef __cplusplus extern "C" { #endif @@ -63,7 +93,7 @@ extern "C" { @param value The JSValue whose type you want to obtain. @result A value of type JSType that identifies value's type. */ -JS_EXPORT JSType JSValueGetType(JSContextRef ctx, JSValueRef); +JS_EXPORT JSType JSValueGetType(JSContextRef ctx, JSValueRef value); /*! @function @@ -129,6 +159,34 @@ JS_EXPORT bool JSValueIsObject(JSContextRef ctx, JSValueRef value); */ JS_EXPORT bool JSValueIsObjectOfClass(JSContextRef ctx, JSValueRef value, JSClassRef jsClass); +/*! +@function +@abstract Tests whether a JavaScript value is an array. +@param ctx The execution context to use. +@param value The JSValue to test. +@result true if value is an array, otherwise false. +*/ +JS_EXPORT bool JSValueIsArray(JSContextRef ctx, JSValueRef value) CF_AVAILABLE(10_11, 9_0); + +/*! +@function +@abstract Tests whether a JavaScript value is a date. +@param ctx The execution context to use. +@param value The JSValue to test. +@result true if value is a date, otherwise false. +*/ +JS_EXPORT bool JSValueIsDate(JSContextRef ctx, JSValueRef value) CF_AVAILABLE(10_11, 9_0); + +/*! +@function +@abstract Returns a JavaScript value's Typed Array type. +@param ctx The execution context to use. +@param value The JSValue whose Typed Array type to return. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result A value of type JSTypedArrayType that identifies value's Typed Array type, or kJSTypedArrayTypeNone if the value is not a Typed Array object. + */ +JS_EXPORT JSTypedArrayType JSValueGetTypedArrayType(JSContextRef ctx, JSValueRef value, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + /* Comparing values */ /*! diff --git a/Source/JavaScriptCore/API/JSVirtualMachine.h b/Source/JavaScriptCore/API/JSVirtualMachine.h new file mode 100644 index 000000000..ccf9264d5 --- /dev/null +++ b/Source/JavaScriptCore/API/JSVirtualMachine.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import + +#if JSC_OBJC_API_ENABLED + +/*! +@interface +@discussion An instance of JSVirtualMachine represents a single JavaScript "object space" + or set of execution resources. Thread safety is supported by locking the + virtual machine, with concurrent JavaScript execution supported by allocating + separate instances of JSVirtualMachine. +*/ +NS_CLASS_AVAILABLE(10_9, 7_0) +@interface JSVirtualMachine : NSObject + +/*! +@methodgroup Creating New Virtual Machines +*/ +/*! +@method +@abstract Create a new JSVirtualMachine. +*/ +- (instancetype)init; + +/*! +@methodgroup Memory Management +*/ +/*! +@method +@abstract Notify the JSVirtualMachine of an external object relationship. +@discussion Allows clients of JSVirtualMachine to make the JavaScript runtime aware of + arbitrary external Objective-C object graphs. The runtime can then use + this information to retain any JavaScript values that are referenced + from somewhere in said object graph. + + For correct behavior clients must make their external object graphs + reachable from within the JavaScript runtime. If an Objective-C object is + reachable from within the JavaScript runtime, all managed references + transitively reachable from it as recorded using + -addManagedReference:withOwner: will be scanned by the garbage collector. +@param object The object that the owner points to. +@param owner The object that owns the pointed to object. +*/ +- (void)addManagedReference:(id)object withOwner:(id)owner; + +/*! +@method +@abstract Notify the JSVirtualMachine that a previous object relationship no longer exists. +@discussion The JavaScript runtime will continue to scan any references that were + reported to it by -addManagedReference:withOwner: until those references are removed. +@param object The object that was formerly owned. +@param owner The former owner. +*/ +- (void)removeManagedReference:(id)object withOwner:(id)owner; + +@end + +#endif diff --git a/Source/JavaScriptCore/API/JSVirtualMachineInternal.h b/Source/JavaScriptCore/API/JSVirtualMachineInternal.h new file mode 100644 index 000000000..5ca9a7f4a --- /dev/null +++ b/Source/JavaScriptCore/API/JSVirtualMachineInternal.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2013, 2017 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSVirtualMachineInternal_h +#define JSVirtualMachineInternal_h + +#if JSC_OBJC_API_ENABLED + +#import + +namespace JSC { +class VM; +class SlotVisitor; +} + +#if defined(__OBJC__) +@class NSMapTable; + +@interface JSVirtualMachine(Internal) + +JSContextGroupRef getGroupFromVirtualMachine(JSVirtualMachine *); + ++ (JSVirtualMachine *)virtualMachineWithContextGroupRef:(JSContextGroupRef)group; + +- (JSContext *)contextForGlobalContextRef:(JSGlobalContextRef)globalContext; +- (void)addContext:(JSContext *)wrapper forGlobalContextRef:(JSGlobalContextRef)globalContext; + +@end +#endif // defined(__OBJC__) + +void scanExternalObjectGraph(JSC::VM&, JSC::SlotVisitor&, void* root); +void scanExternalRememberedSet(JSC::VM&, JSC::SlotVisitor&); + +#endif // JSC_OBJC_API_ENABLED + +#endif // JSVirtualMachineInternal_h diff --git a/Source/JavaScriptCore/API/JSVirtualMachinePrivate.h b/Source/JavaScriptCore/API/JSVirtualMachinePrivate.h new file mode 100644 index 000000000..3e5fd42a0 --- /dev/null +++ b/Source/JavaScriptCore/API/JSVirtualMachinePrivate.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2017 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSVirtualMachinePrivate_h +#define JSVirtualMachinePrivate_h + +#if JSC_OBJC_API_ENABLED + +@interface JSVirtualMachine(Private) + +/*! + @method + @abstract Enables SIGILL crash analysis for all JSVirtualMachines. + @discussion Installs a SIGILL crash handler that will collect additional + non-user identifying information about the crash site via os_log_info. + */ +- (void)enableSigillCrashAnalyzer; + +@end + +#endif + +#endif // JSVirtualMachinePrivate_h diff --git a/Source/JavaScriptCore/API/JSWeakObjectMapRefInternal.h b/Source/JavaScriptCore/API/JSWeakObjectMapRefInternal.h index f7b91da51..9037947d7 100644 --- a/Source/JavaScriptCore/API/JSWeakObjectMapRefInternal.h +++ b/Source/JavaScriptCore/API/JSWeakObjectMapRefInternal.h @@ -41,9 +41,9 @@ typedef JSC::WeakGCMap WeakMapType; struct OpaqueJSWeakObjectMap : public RefCounted { public: - static PassRefPtr create(void* data, JSWeakMapDestroyedCallback callback) + static Ref create(JSC::VM& vm, void* data, JSWeakMapDestroyedCallback callback) { - return adoptRef(new OpaqueJSWeakObjectMap(data, callback)); + return adoptRef(*new OpaqueJSWeakObjectMap(vm, data, callback)); } WeakMapType& map() { return m_map; } @@ -54,8 +54,9 @@ public: } private: - OpaqueJSWeakObjectMap(void* data, JSWeakMapDestroyedCallback callback) - : m_data(data) + OpaqueJSWeakObjectMap(JSC::VM& vm, void* data, JSWeakMapDestroyedCallback callback) + : m_map(vm) + , m_data(data) , m_callback(callback) { } diff --git a/Source/JavaScriptCore/API/JSWeakObjectMapRefPrivate.cpp b/Source/JavaScriptCore/API/JSWeakObjectMapRefPrivate.cpp new file mode 100644 index 000000000..28cf24491 --- /dev/null +++ b/Source/JavaScriptCore/API/JSWeakObjectMapRefPrivate.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2010 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSWeakObjectMapRefPrivate.h" + +#include "APICast.h" +#include "JSCJSValue.h" +#include "JSCallbackObject.h" +#include "JSWeakObjectMapRefInternal.h" +#include "JSCInlines.h" +#include "Weak.h" +#include "WeakGCMapInlines.h" + +using namespace WTF; +using namespace JSC; + +#ifdef __cplusplus +extern "C" { +#endif + +JSWeakObjectMapRef JSWeakObjectMapCreate(JSContextRef context, void* privateData, JSWeakMapDestroyedCallback callback) +{ + ExecState* exec = toJS(context); + JSLockHolder locker(exec); + RefPtr map = OpaqueJSWeakObjectMap::create(exec->vm(), privateData, callback); + exec->lexicalGlobalObject()->registerWeakMap(map.get()); + return map.get(); +} + +void JSWeakObjectMapSet(JSContextRef ctx, JSWeakObjectMapRef map, void* key, JSObjectRef object) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + JSObject* obj = toJS(object); + if (!obj) + return; + ASSERT(obj->inherits(exec->vm(), JSProxy::info()) + || obj->inherits(exec->vm(), JSCallbackObject::info()) + || obj->inherits(exec->vm(), JSCallbackObject::info())); + map->map().set(key, obj); +} + +JSObjectRef JSWeakObjectMapGet(JSContextRef ctx, JSWeakObjectMapRef map, void* key) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + return toRef(jsCast(map->map().get(key))); +} + +void JSWeakObjectMapRemove(JSContextRef ctx, JSWeakObjectMapRef map, void* key) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + map->map().remove(key); +} + +// We need to keep this function in the build to keep the nightlies running. +JS_EXPORT bool JSWeakObjectMapClear(JSContextRef, JSWeakObjectMapRef, void*, JSObjectRef); +bool JSWeakObjectMapClear(JSContextRef, JSWeakObjectMapRef, void*, JSObjectRef) +{ + return true; +} + +#ifdef __cplusplus +} +#endif diff --git a/Source/JavaScriptCore/API/JSWeakObjectMapRefPrivate.h b/Source/JavaScriptCore/API/JSWeakObjectMapRefPrivate.h new file mode 100644 index 000000000..a335e23c9 --- /dev/null +++ b/Source/JavaScriptCore/API/JSWeakObjectMapRefPrivate.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2010 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSWeakObjectMapRefPrivate_h +#define JSWeakObjectMapRefPrivate_h + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! @typedef JSWeakObjectMapRef A weak map for storing JSObjectRefs */ +typedef struct OpaqueJSWeakObjectMap* JSWeakObjectMapRef; + +/*! + @typedef JSWeakMapDestroyedCallback + @abstract The callback invoked when a JSWeakObjectMapRef is being destroyed. + @param map The map that is being destroyed. + @param data The private data (if any) that was associated with the map instance. + */ +typedef void (*JSWeakMapDestroyedCallback)(JSWeakObjectMapRef map, void* data); + +/*! + @function + @abstract Creates a weak value map that can be used to reference user defined objects without preventing them from being collected. + @param ctx The execution context to use. + @param data A void* to set as the map's private data. Pass NULL to specify no private data. + @param destructor A function to call when the weak map is destroyed. + @result A JSWeakObjectMapRef bound to the given context, data and destructor. + @discussion The JSWeakObjectMapRef can be used as a storage mechanism to hold custom JS objects without forcing those objects to + remain live as JSValueProtect would. + */ +JS_EXPORT JSWeakObjectMapRef JSWeakObjectMapCreate(JSContextRef ctx, void* data, JSWeakMapDestroyedCallback destructor); + +/*! + @function + @abstract Associates a JSObjectRef with the given key in a JSWeakObjectMap. + @param ctx The execution context to use. + @param map The map to operate on. + @param key The key to associate a weak reference with. + @param object The user defined object to associate with the key. + */ +JS_EXPORT void JSWeakObjectMapSet(JSContextRef ctx, JSWeakObjectMapRef map, void* key, JSObjectRef object); + +/*! + @function + @abstract Retrieves the JSObjectRef associated with a key. + @param ctx The execution context to use. + @param map The map to query. + @param key The key to search for. + @result Either the live object associated with the provided key, or NULL. + */ +JS_EXPORT JSObjectRef JSWeakObjectMapGet(JSContextRef ctx, JSWeakObjectMapRef map, void* key); + +/*! + @function + @abstract Removes the entry for the given key if the key is present, otherwise it has no effect. + @param ctx The execution context to use. + @param map The map to use. + @param key The key to remove. + */ +JS_EXPORT void JSWeakObjectMapRemove(JSContextRef ctx, JSWeakObjectMapRef map, void* key); + +#ifdef __cplusplus +} +#endif + +#endif // JSWeakObjectMapPrivate_h diff --git a/Source/JavaScriptCore/API/JSWrapperMap.h b/Source/JavaScriptCore/API/JSWrapperMap.h new file mode 100644 index 000000000..c6aa1af13 --- /dev/null +++ b/Source/JavaScriptCore/API/JSWrapperMap.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import +#import +#import + +#if JSC_OBJC_API_ENABLED + +@interface JSWrapperMap : NSObject + +- (id)initWithContext:(JSContext *)context; + +- (JSValue *)jsWrapperForObject:(id)object; + +- (JSValue *)objcWrapperForJSValueRef:(JSValueRef)value; + +@end + +id tryUnwrapObjcObject(JSGlobalContextRef, JSValueRef); + +bool supportsInitMethodConstructors(); +Protocol *getJSExportProtocol(); +Class getNSBlockClass(); + +#endif diff --git a/Source/JavaScriptCore/API/JavaScript.h b/Source/JavaScriptCore/API/JavaScript.h index f8d92d8f9..251e3937b 100644 --- a/Source/JavaScriptCore/API/JavaScript.h +++ b/Source/JavaScriptCore/API/JavaScript.h @@ -11,10 +11,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -31,6 +31,7 @@ #include #include #include +#include #include #endif /* JavaScript_h */ diff --git a/Source/JavaScriptCore/API/JavaScriptCore.h b/Source/JavaScriptCore/API/JavaScriptCore.h new file mode 100644 index 000000000..b2fde1dbe --- /dev/null +++ b/Source/JavaScriptCore/API/JavaScriptCore.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JavaScriptCore_h +#define JavaScriptCore_h + +#include +#include + +#if defined(__OBJC__) && JSC_OBJC_API_ENABLED + +#import "JSContext.h" +#import "JSValue.h" +#import "JSManagedValue.h" +#import "JSVirtualMachine.h" +#import "JSExport.h" + +#endif + +#endif /* JavaScriptCore_h */ diff --git a/Source/JavaScriptCore/API/ObjCCallbackFunction.h b/Source/JavaScriptCore/API/ObjCCallbackFunction.h index 046bf650d..4d5b7368d 100644 --- a/Source/JavaScriptCore/API/ObjCCallbackFunction.h +++ b/Source/JavaScriptCore/API/ObjCCallbackFunction.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -36,7 +36,7 @@ JSObjectRef objCCallbackFunctionForMethod(JSContext *, Class, Protocol *, BOOL i JSObjectRef objCCallbackFunctionForBlock(JSContext *, id); JSObjectRef objCCallbackFunctionForInit(JSContext *, Class, Protocol *, SEL, const char* types); -id tryUnwrapConstructor(JSObjectRef); +id tryUnwrapConstructor(JSC::VM*, JSObjectRef); #endif namespace JSC { @@ -48,7 +48,7 @@ class ObjCCallbackFunction : public InternalFunction { public: typedef InternalFunction Base; - static ObjCCallbackFunction* create(VM&, JSGlobalObject*, const String& name, PassOwnPtr); + static ObjCCallbackFunction* create(VM&, JSGlobalObject*, const String& name, std::unique_ptr); static void destroy(JSCell*); static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) @@ -62,7 +62,7 @@ public: ObjCCallbackFunctionImpl* impl() const { return m_impl.get(); } protected: - ObjCCallbackFunction(VM&, JSGlobalObject*, JSObjectCallAsFunctionCallback, JSObjectCallAsConstructorCallback, PassOwnPtr); + ObjCCallbackFunction(VM&, Structure*, JSObjectCallAsFunctionCallback, JSObjectCallAsConstructorCallback, std::unique_ptr); private: static CallType getCallData(JSCell*, CallData&); @@ -73,7 +73,7 @@ private: JSObjectCallAsFunctionCallback m_functionCallback; JSObjectCallAsConstructorCallback m_constructCallback; - OwnPtr m_impl; + std::unique_ptr m_impl; }; } // namespace JSC diff --git a/Source/JavaScriptCore/API/ObjcRuntimeExtras.h b/Source/JavaScriptCore/API/ObjcRuntimeExtras.h new file mode 100644 index 000000000..128df5c90 --- /dev/null +++ b/Source/JavaScriptCore/API/ObjcRuntimeExtras.h @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import +#import +#import +#import + +inline bool protocolImplementsProtocol(Protocol *candidate, Protocol *target) +{ + unsigned protocolProtocolsCount; + Protocol ** protocolProtocols = protocol_copyProtocolList(candidate, &protocolProtocolsCount); + for (unsigned i = 0; i < protocolProtocolsCount; ++i) { + if (protocol_isEqual(protocolProtocols[i], target)) { + free(protocolProtocols); + return true; + } + } + free(protocolProtocols); + return false; +} + +inline void forEachProtocolImplementingProtocol(Class cls, Protocol *target, void (^callback)(Protocol *)) +{ + ASSERT(cls); + ASSERT(target); + + Vector worklist; + HashSet visited; + + // Initially fill the worklist with the Class's protocols. + unsigned protocolsCount; + Protocol ** protocols = class_copyProtocolList(cls, &protocolsCount); + worklist.append(protocols, protocolsCount); + free(protocols); + + while (!worklist.isEmpty()) { + Protocol *protocol = worklist.last(); + worklist.removeLast(); + + // Are we encountering this Protocol for the first time? + if (!visited.add(protocol).isNewEntry) + continue; + + // If it implements the protocol, make the callback. + if (protocolImplementsProtocol(protocol, target)) + callback(protocol); + + // Add incorporated protocols to the worklist. + protocols = protocol_copyProtocolList(protocol, &protocolsCount); + worklist.append(protocols, protocolsCount); + free(protocols); + } +} + +inline void forEachMethodInClass(Class cls, void (^callback)(Method)) +{ + unsigned count; + Method* methods = class_copyMethodList(cls, &count); + for (unsigned i = 0; i < count; ++i) + callback(methods[i]); + free(methods); +} + +inline void forEachMethodInProtocol(Protocol *protocol, BOOL isRequiredMethod, BOOL isInstanceMethod, void (^callback)(SEL, const char*)) +{ + unsigned count; + struct objc_method_description* methods = protocol_copyMethodDescriptionList(protocol, isRequiredMethod, isInstanceMethod, &count); + for (unsigned i = 0; i < count; ++i) + callback(methods[i].name, methods[i].types); + free(methods); +} + +inline void forEachPropertyInProtocol(Protocol *protocol, void (^callback)(objc_property_t)) +{ + unsigned count; + objc_property_t* properties = protocol_copyPropertyList(protocol, &count); + for (unsigned i = 0; i < count; ++i) + callback(properties[i]); + free(properties); +} + +template +void skipPair(const char*& position) +{ + size_t count = 1; + do { + char c = *position++; + if (!c) + @throw [NSException exceptionWithName:NSInternalInconsistencyException reason:@"Malformed type encoding" userInfo:nil]; + if (c == open) + ++count; + else if (c == close) + --count; + } while (count); +} + +class StringRange { + WTF_MAKE_NONCOPYABLE(StringRange); +public: + StringRange(const char* begin, const char* end) : m_ptr(strndup(begin, end - begin)) { } + ~StringRange() { free(m_ptr); } + operator const char*() const { return m_ptr; } + const char* get() const { return m_ptr; } + +private: + char* m_ptr; +}; + +class StructBuffer { + WTF_MAKE_NONCOPYABLE(StructBuffer); +public: + StructBuffer(const char* encodedType) + { + NSUInteger size, alignment; + NSGetSizeAndAlignment(encodedType, &size, &alignment); + --alignment; + m_allocation = static_cast(malloc(size + alignment)); + m_buffer = reinterpret_cast((reinterpret_cast(m_allocation) + alignment) & ~alignment); + } + + ~StructBuffer() { free(m_allocation); } + operator void*() const { return m_buffer; } + +private: + void* m_allocation; + void* m_buffer; +}; + +template +typename DelegateType::ResultType parseObjCType(const char*& position) +{ + ASSERT(*position); + + switch (*position++) { + case 'c': + return DelegateType::template typeInteger(); + case 'i': + return DelegateType::template typeInteger(); + case 's': + return DelegateType::template typeInteger(); + case 'l': + return DelegateType::template typeInteger(); + case 'q': + return DelegateType::template typeDouble(); + case 'C': + return DelegateType::template typeInteger(); + case 'I': + return DelegateType::template typeInteger(); + case 'S': + return DelegateType::template typeInteger(); + case 'L': + return DelegateType::template typeInteger(); + case 'Q': + return DelegateType::template typeDouble(); + case 'f': + return DelegateType::template typeDouble(); + case 'd': + return DelegateType::template typeDouble(); + case 'B': + return DelegateType::typeBool(); + case 'v': + return DelegateType::typeVoid(); + + case '@': { // An object (whether statically typed or typed id) + if (position[0] == '?' && position[1] == '<') { + position += 2; + const char* begin = position; + skipPair<'<','>'>(position); + return DelegateType::typeBlock(begin, position - 1); + } + + if (*position == '"') { + const char* begin = position + 1; + const char* protocolPosition = strchr(begin, '<'); + const char* endOfType = strchr(begin, '"'); + position = endOfType + 1; + + // There's no protocol involved in this type, so just handle the class name. + if (!protocolPosition || protocolPosition > endOfType) + return DelegateType::typeOfClass(begin, endOfType); + // We skipped the class name and went straight to the protocol, so this is an id type. + if (begin == protocolPosition) + return DelegateType::typeId(); + // We have a class name with a protocol. For now, ignore the protocol. + return DelegateType::typeOfClass(begin, protocolPosition); + } + + return DelegateType::typeId(); + } + + case '{': { // {name=type...} A structure + const char* begin = position - 1; + skipPair<'{','}'>(position); + return DelegateType::typeStruct(begin, position); + } + + // NOT supporting C strings, arrays, pointers, unions, bitfields, function pointers. + case '*': // A character string (char *) + case '[': // [array type] An array + case '(': // (name=type...) A union + case 'b': // bnum A bit field of num bits + case '^': // ^type A pointer to type + case '?': // An unknown type (among other things, this code is used for function pointers) + // NOT supporting Objective-C Class, SEL + case '#': // A class object (Class) + case ':': // A method selector (SEL) + default: + return nil; + } +} + +extern "C" { + // Forward declare some Objective-C runtime internal methods that are not API. + const char *_protocol_getMethodTypeEncoding(Protocol *, SEL, BOOL isRequiredMethod, BOOL isInstanceMethod); + id objc_initWeak(id *, id); + void objc_destroyWeak(id *); + bool _Block_has_signature(void *); + const char * _Block_signature(void *); +} diff --git a/Source/JavaScriptCore/API/OpaqueJSString.cpp b/Source/JavaScriptCore/API/OpaqueJSString.cpp index 5cc2e0ab8..07a79ad99 100644 --- a/Source/JavaScriptCore/API/OpaqueJSString.cpp +++ b/Source/JavaScriptCore/API/OpaqueJSString.cpp @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -28,11 +28,13 @@ #include "CallFrame.h" #include "Identifier.h" +#include "IdentifierInlines.h" #include "JSGlobalObject.h" +#include using namespace JSC; -PassRefPtr OpaqueJSString::create(const String& string) +RefPtr OpaqueJSString::create(const String& string) { if (string.isNull()) return nullptr; @@ -47,7 +49,7 @@ OpaqueJSString::~OpaqueJSString() if (!characters) return; - if (!m_string.is8Bit() && m_string.deprecatedCharacters() == characters) + if (!m_string.is8Bit() && m_string.characters16() == characters) return; fastFree(characters); @@ -55,32 +57,26 @@ OpaqueJSString::~OpaqueJSString() String OpaqueJSString::string() const { - if (!this) - return String(); - // Return a copy of the wrapped string, because the caller may make it an Identifier. return m_string.isolatedCopy(); } Identifier OpaqueJSString::identifier(VM* vm) const { - if (!this || m_string.isNull()) + if (m_string.isNull()) return Identifier(); if (m_string.isEmpty()) return Identifier(Identifier::EmptyIdentifier); if (m_string.is8Bit()) - return Identifier(vm, m_string.characters8(), m_string.length()); + return Identifier::fromString(vm, m_string.characters8(), m_string.length()); - return Identifier(vm, m_string.characters16(), m_string.length()); + return Identifier::fromString(vm, m_string.characters16(), m_string.length()); } const UChar* OpaqueJSString::characters() { - if (!this) - return nullptr; - // m_characters is put in a local here to avoid an extra atomic load. UChar* characters = m_characters; if (characters) @@ -91,12 +87,7 @@ const UChar* OpaqueJSString::characters() unsigned length = m_string.length(); UChar* newCharacters = static_cast(fastMalloc(length * sizeof(UChar))); - - if (m_string.is8Bit()) { - for (size_t i = 0; i < length; ++i) - newCharacters[i] = m_string.characters8()[i]; - } else - memcpy(newCharacters, m_string.characters16(), length * sizeof(UChar)); + StringView(m_string).getCharactersWithUpconvert(newCharacters); if (!m_characters.compare_exchange_strong(characters, newCharacters)) { fastFree(newCharacters); diff --git a/Source/JavaScriptCore/API/OpaqueJSString.h b/Source/JavaScriptCore/API/OpaqueJSString.h index f1dd6a43d..208131b3b 100644 --- a/Source/JavaScriptCore/API/OpaqueJSString.h +++ b/Source/JavaScriptCore/API/OpaqueJSString.h @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -36,29 +36,29 @@ namespace JSC { } struct OpaqueJSString : public ThreadSafeRefCounted { - static PassRefPtr create() + static Ref create() { - return adoptRef(new OpaqueJSString); + return adoptRef(*new OpaqueJSString); } - static PassRefPtr create(const LChar* characters, unsigned length) + static Ref create(const LChar* characters, unsigned length) { - return adoptRef(new OpaqueJSString(characters, length)); + return adoptRef(*new OpaqueJSString(characters, length)); } - static PassRefPtr create(const UChar* characters, unsigned length) + static Ref create(const UChar* characters, unsigned length) { - return adoptRef(new OpaqueJSString(characters, length)); + return adoptRef(*new OpaqueJSString(characters, length)); } - JS_EXPORT_PRIVATE static PassRefPtr create(const String&); + JS_EXPORT_PRIVATE static RefPtr create(const String&); JS_EXPORT_PRIVATE ~OpaqueJSString(); - bool is8Bit() { return this ? m_string.is8Bit() : false; } - const LChar* characters8() { return this ? m_string.characters8() : nullptr; } - const UChar* characters16() { return this ? m_string.characters16() : nullptr; } - unsigned length() { return this ? m_string.length() : 0; } + bool is8Bit() { return m_string.is8Bit(); } + const LChar* characters8() { return m_string.characters8(); } + const UChar* characters16() { return m_string.characters16(); } + unsigned length() { return m_string.length(); } const UChar* characters(); diff --git a/Source/JavaScriptCore/API/WebKitAvailability.h b/Source/JavaScriptCore/API/WebKitAvailability.h index 6af619825..ab53183dc 100644 --- a/Source/JavaScriptCore/API/WebKitAvailability.h +++ b/Source/JavaScriptCore/API/WebKitAvailability.h @@ -26,11 +26,58 @@ #ifndef __WebKitAvailability__ #define __WebKitAvailability__ -#if defined(__APPLE__) && !defined(BUILDING_GTK__) +#if defined(__APPLE__) + #include #include + +#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100 +/* To support availability macros that mention newer OS X versions when building on older OS X versions, + we provide our own definitions of the underlying macros that the availability macros expand to. We're + free to expand the macros as no-ops since frameworks built on older OS X versions only ship bundled with + an application rather than as part of the system. +*/ + +#ifndef __NSi_10_10 // Building from trunk rather than SDK. +#define __NSi_10_10 introduced=10.0 // Use 10.0 to indicate that everything is available. +#endif + +#ifndef __NSi_10_11 // Building from trunk rather than SDK. +#define __NSi_10_11 introduced=10.0 // Use 10.0 to indicate that everything is available. +#endif + +#ifndef __NSi_10_12 // Building from trunk rather than SDK. +#define __NSi_10_12 introduced=10.0 // Use 10.0 to indicate that everything is available. +#endif + +#ifndef __AVAILABILITY_INTERNAL__MAC_10_9 +#define __AVAILABILITY_INTERNAL__MAC_10_9 +#endif + +#ifndef __AVAILABILITY_INTERNAL__MAC_10_10 +#define __AVAILABILITY_INTERNAL__MAC_10_10 +#endif + +#ifndef AVAILABLE_MAC_OS_X_VERSION_10_9_AND_LATER +#define AVAILABLE_MAC_OS_X_VERSION_10_9_AND_LATER +#endif + +#ifndef AVAILABLE_MAC_OS_X_VERSION_10_10_AND_LATER +#define AVAILABLE_MAC_OS_X_VERSION_10_10_AND_LATER +#endif + +#endif /* __MAC_OS_X_VERSION_MIN_REQUIRED <= 101100 */ + +#if defined(BUILDING_GTK__) +#undef CF_AVAILABLE +#define CF_AVAILABLE(_mac, _ios) +#undef CF_ENUM_AVAILABLE +#define CF_ENUM_AVAILABLE(_mac, _ios) +#endif + #else #define CF_AVAILABLE(_mac, _ios) +#define CF_ENUM_AVAILABLE(_mac, _ios) #endif #endif /* __WebKitAvailability__ */ diff --git a/Source/JavaScriptCore/API/tests/CompareAndSwapTest.cpp b/Source/JavaScriptCore/API/tests/CompareAndSwapTest.cpp new file mode 100644 index 000000000..e78086c3a --- /dev/null +++ b/Source/JavaScriptCore/API/tests/CompareAndSwapTest.cpp @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CompareAndSwapTest.h" + +#include +#include +#include + +class Bitmap { +public: + Bitmap() { clearAll(); } + + inline void clearAll(); + inline bool concurrentTestAndSet(size_t n); + inline size_t numBits() const { return words * wordSize; } + +private: + static const size_t Size = 4096*10; + + static const unsigned wordSize = sizeof(uint8_t) * 8; + static const unsigned words = (Size + wordSize - 1) / wordSize; + static const uint8_t one = 1; + + uint8_t bits[words]; +}; + +inline void Bitmap::clearAll() +{ + memset(&bits, 0, sizeof(bits)); +} + +inline bool Bitmap::concurrentTestAndSet(size_t n) +{ + uint8_t mask = one << (n % wordSize); + size_t index = n / wordSize; + uint8_t* wordPtr = &bits[index]; + uint8_t oldValue; + do { + oldValue = *wordPtr; + if (oldValue & mask) + return true; + } while (!WTF::atomicCompareExchangeWeakRelaxed(wordPtr, oldValue, static_cast(oldValue | mask))); + return false; +} + +struct Data { + Bitmap* bitmap; + int id; + int numThreads; +}; + +static void setBitThreadFunc(void* p) +{ + Data* data = reinterpret_cast(p); + Bitmap* bitmap = data->bitmap; + size_t numBits = bitmap->numBits(); + + // The computed start index here is heuristic that seems to maximize (anecdotally) + // the chance for the CAS issue to manifest. + size_t start = (numBits * (data->numThreads - data->id)) / data->numThreads; + + printf(" started Thread %d\n", data->id); + for (size_t i = start; i < numBits; i++) + while (!bitmap->concurrentTestAndSet(i)) { } + for (size_t i = 0; i < start; i++) + while (!bitmap->concurrentTestAndSet(i)) { } + + printf(" finished Thread %d\n", data->id); +} + +void testCompareAndSwap() +{ + Bitmap bitmap; + const int numThreads = 5; + ThreadIdentifier threadIDs[numThreads]; + Data data[numThreads]; + + WTF::initializeThreading(); + + printf("Starting %d threads for CompareAndSwap test. Test should complete without hanging.\n", numThreads); + for (int i = 0; i < numThreads; i++) { + data[i].bitmap = &bitmap; + data[i].id = i; + data[i].numThreads = numThreads; + std::function threadFunc = std::bind(setBitThreadFunc, &data[i]); + threadIDs[i] = createThread("setBitThreadFunc", threadFunc); + } + + printf("Waiting for %d threads to join\n", numThreads); + for (int i = 0; i < numThreads; i++) + waitForThreadCompletion(threadIDs[i]); + + printf("PASS: CompareAndSwap test completed without a hang\n"); +} diff --git a/Source/JavaScriptCore/API/tests/CompareAndSwapTest.h b/Source/JavaScriptCore/API/tests/CompareAndSwapTest.h new file mode 100644 index 000000000..4a1fc59c6 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/CompareAndSwapTest.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +/* Regression test for webkit.org/b/142513 */ +void testCompareAndSwap(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/Source/JavaScriptCore/API/tests/CurrentThisInsideBlockGetterTest.h b/Source/JavaScriptCore/API/tests/CurrentThisInsideBlockGetterTest.h new file mode 100644 index 000000000..ab68f8057 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/CurrentThisInsideBlockGetterTest.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include + +#if JSC_OBJC_API_ENABLED + +void currentThisInsideBlockGetterTest(); + +#endif // JSC_OBJC_API_ENABLED diff --git a/Source/JavaScriptCore/API/tests/CustomGlobalObjectClassTest.c b/Source/JavaScriptCore/API/tests/CustomGlobalObjectClassTest.c new file mode 100644 index 000000000..62e63978e --- /dev/null +++ b/Source/JavaScriptCore/API/tests/CustomGlobalObjectClassTest.c @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "CustomGlobalObjectClassTest.h" + +#include +#include +#include + +extern bool assertTrue(bool value, const char* message); + +static bool executedCallback = false; + +static JSValueRef jsDoSomething(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argc, const JSValueRef args[], JSValueRef* exception) +{ + (void)function; + (void)thisObject; + (void)argc; + (void)args; + (void)exception; + executedCallback = true; + return JSValueMakeNull(ctx); +} + +static JSStaticFunction bridgedFunctions[] = { + {"doSomething", jsDoSomething, kJSPropertyAttributeDontDelete}, + {0, 0, 0}, +}; + +static JSClassRef bridgedObjectClass = NULL; +static JSClassDefinition bridgedClassDef; + +static JSClassRef jsClassRef() +{ + if (!bridgedObjectClass) { + bridgedClassDef = kJSClassDefinitionEmpty; + bridgedClassDef.className = "BridgedObject"; + bridgedClassDef.staticFunctions = bridgedFunctions; + bridgedObjectClass = JSClassCreate(&bridgedClassDef); + } + return bridgedObjectClass; +} + +void customGlobalObjectClassTest() +{ + JSClassRef bridgedObjectJsClassRef = jsClassRef(); + JSGlobalContextRef globalContext = JSGlobalContextCreate(bridgedObjectJsClassRef); + + JSObjectRef globalObj = JSContextGetGlobalObject(globalContext); + + JSPropertyNameArrayRef propertyNames = JSObjectCopyPropertyNames(globalContext, globalObj); + size_t propertyCount = JSPropertyNameArrayGetCount(propertyNames); + assertTrue(propertyCount == 1, "Property count == 1"); + + JSStringRef propertyNameRef = JSPropertyNameArrayGetNameAtIndex(propertyNames, 0); + size_t propertyNameLength = JSStringGetLength(propertyNameRef); + size_t bufferSize = sizeof(char) * (propertyNameLength + 1); + char* buffer = (char*)malloc(bufferSize); + JSStringGetUTF8CString(propertyNameRef, buffer, bufferSize); + buffer[propertyNameLength] = '\0'; + assertTrue(!strncmp(buffer, "doSomething", propertyNameLength), "First property name is doSomething"); + free(buffer); + + bool hasMethod = JSObjectHasProperty(globalContext, globalObj, propertyNameRef); + assertTrue(hasMethod, "Property found by name"); + + JSValueRef doSomethingProperty = + JSObjectGetProperty(globalContext, globalObj, propertyNameRef, NULL); + assertTrue(!JSValueIsUndefined(globalContext, doSomethingProperty), "Property is defined"); + + bool globalObjectClassMatchesClassRef = JSValueIsObjectOfClass(globalContext, globalObj, bridgedObjectJsClassRef); + assertTrue(globalObjectClassMatchesClassRef, "Global object is the right class"); + + JSStringRef script = JSStringCreateWithUTF8CString("doSomething();"); + JSEvaluateScript(globalContext, script, NULL, NULL, 1, NULL); + JSStringRelease(script); + + assertTrue(executedCallback, "Executed custom global object callback"); +} + +void globalObjectSetPrototypeTest() +{ + JSClassDefinition definition = kJSClassDefinitionEmpty; + definition.className = "Global"; + JSClassRef global = JSClassCreate(&definition); + JSGlobalContextRef context = JSGlobalContextCreate(global); + JSObjectRef object = JSContextGetGlobalObject(context); + + JSObjectRef above = JSObjectMake(context, 0, 0); + JSStringRef test = JSStringCreateWithUTF8CString("test"); + JSValueRef value = JSValueMakeString(context, test); + JSObjectSetProperty(context, above, test, value, kJSPropertyAttributeDontEnum, 0); + + JSObjectSetPrototype(context, object, above); + JSStringRef script = JSStringCreateWithUTF8CString("test === \"test\""); + JSValueRef result = JSEvaluateScript(context, script, 0, 0, 0, 0); + + assertTrue(JSValueToBoolean(context, result), "test === \"test\""); + + JSStringRelease(test); + JSStringRelease(script); +} + +void globalObjectPrivatePropertyTest() +{ + JSClassDefinition definition = kJSClassDefinitionEmpty; + definition.className = "Global"; + JSClassRef global = JSClassCreate(&definition); + JSGlobalContextRef context = JSGlobalContextCreate(global); + JSObjectRef globalObject = JSContextGetGlobalObject(context); + + JSStringRef privateName = JSStringCreateWithUTF8CString("private"); + JSValueRef privateValue = JSValueMakeString(context, privateName); + assertTrue(JSObjectSetPrivateProperty(context, globalObject, privateName, privateValue), "JSObjectSetPrivateProperty succeeded"); + JSValueRef result = JSObjectGetPrivateProperty(context, globalObject, privateName); + assertTrue(JSValueIsStrictEqual(context, privateValue, result), "privateValue === \"private\""); + + assertTrue(JSObjectDeletePrivateProperty(context, globalObject, privateName), "JSObjectDeletePrivateProperty succeeded"); + result = JSObjectGetPrivateProperty(context, globalObject, privateName); + assertTrue(JSValueIsNull(context, result), "Deleted private property is indeed no longer present"); + + JSStringRelease(privateName); +} diff --git a/Source/JavaScriptCore/API/tests/CustomGlobalObjectClassTest.h b/Source/JavaScriptCore/API/tests/CustomGlobalObjectClassTest.h new file mode 100644 index 000000000..3d2a520a7 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/CustomGlobalObjectClassTest.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +void customGlobalObjectClassTest(void); +void globalObjectSetPrototypeTest(void); +void globalObjectPrivatePropertyTest(void); diff --git a/Source/JavaScriptCore/API/tests/DateTests.h b/Source/JavaScriptCore/API/tests/DateTests.h new file mode 100644 index 000000000..eeb47a165 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/DateTests.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import + +#if JSC_OBJC_API_ENABLED + +void runDateTests(); + +#endif // JSC_OBJC_API_ENABLED diff --git a/Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.cpp b/Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.cpp new file mode 100644 index 000000000..d5e53243b --- /dev/null +++ b/Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.cpp @@ -0,0 +1,374 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ExecutionTimeLimitTest.h" + +#include "InitializeThreading.h" +#include "JSContextRefPrivate.h" +#include "JavaScriptCore.h" +#include "Options.h" +#include +#include +#include + +using namespace std::chrono; +using JSC::Options; + +static JSGlobalContextRef context = nullptr; + +static JSValueRef currentCPUTimeAsJSFunctionCallback(JSContextRef ctx, JSObjectRef functionObject, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + UNUSED_PARAM(functionObject); + UNUSED_PARAM(thisObject); + UNUSED_PARAM(argumentCount); + UNUSED_PARAM(arguments); + UNUSED_PARAM(exception); + + ASSERT(JSContextGetGlobalContext(ctx) == context); + return JSValueMakeNumber(ctx, currentCPUTime().count() / 1000000.); +} + +bool shouldTerminateCallbackWasCalled = false; +static bool shouldTerminateCallback(JSContextRef, void*) +{ + shouldTerminateCallbackWasCalled = true; + return true; +} + +bool cancelTerminateCallbackWasCalled = false; +static bool cancelTerminateCallback(JSContextRef, void*) +{ + cancelTerminateCallbackWasCalled = true; + return false; +} + +int extendTerminateCallbackCalled = 0; +static bool extendTerminateCallback(JSContextRef ctx, void*) +{ + extendTerminateCallbackCalled++; + if (extendTerminateCallbackCalled == 1) { + JSContextGroupRef contextGroup = JSContextGetGroup(ctx); + JSContextGroupSetExecutionTimeLimit(contextGroup, .200f, extendTerminateCallback, 0); + return false; + } + return true; +} + +struct TierOptions { + const char* tier; + unsigned timeLimitAdjustmentMillis; + const char* optionsStr; +}; + +static void testResetAfterTimeout(bool& failed) +{ + JSValueRef v = nullptr; + JSValueRef exception = nullptr; + const char* reentryScript = "100"; + JSStringRef script = JSStringCreateWithUTF8CString(reentryScript); + v = JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + if (exception) { + printf("FAIL: Watchdog timeout was not reset.\n"); + failed = true; + } else if (!JSValueIsNumber(context, v) || JSValueToNumber(context, v, nullptr) != 100) { + printf("FAIL: Script result is not as expected.\n"); + failed = true; + } +} + +int testExecutionTimeLimit() +{ + static const TierOptions tierOptionsList[] = { + { "LLINT", 0, "--useConcurrentJIT=false --useLLInt=true --useJIT=false" }, + { "Baseline", 0, "--useConcurrentJIT=false --useLLInt=true --useJIT=true --useDFGJIT=false" }, + { "DFG", 0, "--useConcurrentJIT=false --useLLInt=true --useJIT=true --useDFGJIT=true --useFTLJIT=false" }, + { "FTL", 200, "--useConcurrentJIT=false --useLLInt=true --useJIT=true --useDFGJIT=true --useFTLJIT=true" }, + }; + + bool failed = false; + + JSC::initializeThreading(); + Options::initialize(); // Ensure options is initialized first. + + for (auto tierOptions : tierOptionsList) { + StringBuilder savedOptionsBuilder; + Options::dumpAllOptionsInALine(savedOptionsBuilder); + + Options::setOptions(tierOptions.optionsStr); + + unsigned tierAdjustmentMillis = tierOptions.timeLimitAdjustmentMillis; + double timeLimit; + + context = JSGlobalContextCreateInGroup(nullptr, nullptr); + + JSContextGroupRef contextGroup = JSContextGetGroup(context); + JSObjectRef globalObject = JSContextGetGlobalObject(context); + ASSERT(JSValueIsObject(context, globalObject)); + + JSValueRef exception = nullptr; + + JSStringRef currentCPUTimeStr = JSStringCreateWithUTF8CString("currentCPUTime"); + JSObjectRef currentCPUTimeFunction = JSObjectMakeFunctionWithCallback(context, currentCPUTimeStr, currentCPUTimeAsJSFunctionCallback); + JSObjectSetProperty(context, globalObject, currentCPUTimeStr, currentCPUTimeFunction, kJSPropertyAttributeNone, nullptr); + JSStringRelease(currentCPUTimeStr); + + /* Test script timeout: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, shouldTerminateCallback, 0); + { + unsigned timeAfterWatchdogShouldHaveFired = 300 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("function foo() { var startTime = currentCPUTime(); while (true) { for (var i = 0; i < 1000; i++); if (currentCPUTime() - startTime > "); + scriptBuilder.appendNumber(timeAfterWatchdogShouldHaveFired / 1000.0); + scriptBuilder.appendLiteral(") break; } } foo();"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + shouldTerminateCallbackWasCalled = false; + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + + if (((endTime - startTime) < milliseconds(timeAfterWatchdogShouldHaveFired)) && shouldTerminateCallbackWasCalled) + printf("PASS: %s script timed out as expected.\n", tierOptions.tier); + else { + if ((endTime - startTime) >= milliseconds(timeAfterWatchdogShouldHaveFired)) + printf("FAIL: %s script did not time out as expected.\n", tierOptions.tier); + if (!shouldTerminateCallbackWasCalled) + printf("FAIL: %s script timeout callback was not called.\n", tierOptions.tier); + failed = true; + } + + if (!exception) { + printf("FAIL: %s TerminatedExecutionException was not thrown.\n", tierOptions.tier); + failed = true; + } + + testResetAfterTimeout(failed); + } + + /* Test script timeout with tail calls: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, shouldTerminateCallback, 0); + { + unsigned timeAfterWatchdogShouldHaveFired = 300 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("var startTime = currentCPUTime();" + "function recurse(i) {" + "'use strict';" + "if (i % 1000 === 0) {" + "if (currentCPUTime() - startTime >"); + scriptBuilder.appendNumber(timeAfterWatchdogShouldHaveFired / 1000.0); + scriptBuilder.appendLiteral(" ) { return; }"); + scriptBuilder.appendLiteral(" }"); + scriptBuilder.appendLiteral(" return recurse(i + 1); }"); + scriptBuilder.appendLiteral("recurse(0);"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + shouldTerminateCallbackWasCalled = false; + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + + if (((endTime - startTime) < milliseconds(timeAfterWatchdogShouldHaveFired)) && shouldTerminateCallbackWasCalled) + printf("PASS: %s script with infinite tail calls timed out as expected .\n", tierOptions.tier); + else { + if ((endTime - startTime) >= milliseconds(timeAfterWatchdogShouldHaveFired)) + printf("FAIL: %s script with infinite tail calls did not time out as expected.\n", tierOptions.tier); + if (!shouldTerminateCallbackWasCalled) + printf("FAIL: %s script with infinite tail calls' timeout callback was not called.\n", tierOptions.tier); + failed = true; + } + + if (!exception) { + printf("FAIL: %s TerminatedExecutionException was not thrown.\n", tierOptions.tier); + failed = true; + } + + testResetAfterTimeout(failed); + } + + /* Test the script timeout's TerminatedExecutionException should NOT be catchable: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, shouldTerminateCallback, 0); + { + unsigned timeAfterWatchdogShouldHaveFired = 300 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("function foo() { var startTime = currentCPUTime(); try { while (true) { for (var i = 0; i < 1000; i++); if (currentCPUTime() - startTime > "); + scriptBuilder.appendNumber(timeAfterWatchdogShouldHaveFired / 1000.0); + scriptBuilder.appendLiteral(") break; } } catch(e) { } } foo();"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + shouldTerminateCallbackWasCalled = false; + + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + + if (((endTime - startTime) >= milliseconds(timeAfterWatchdogShouldHaveFired)) || !shouldTerminateCallbackWasCalled) { + if (!((endTime - startTime) < milliseconds(timeAfterWatchdogShouldHaveFired))) + printf("FAIL: %s script did not time out as expected.\n", tierOptions.tier); + if (!shouldTerminateCallbackWasCalled) + printf("FAIL: %s script timeout callback was not called.\n", tierOptions.tier); + failed = true; + } + + if (exception) + printf("PASS: %s TerminatedExecutionException was not catchable as expected.\n", tierOptions.tier); + else { + printf("FAIL: %s TerminatedExecutionException was caught.\n", tierOptions.tier); + failed = true; + } + + testResetAfterTimeout(failed); + } + + /* Test script timeout with no callback: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, 0, 0); + { + unsigned timeAfterWatchdogShouldHaveFired = 300 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("function foo() { var startTime = currentCPUTime(); while (true) { for (var i = 0; i < 1000; i++); if (currentCPUTime() - startTime > "); + scriptBuilder.appendNumber(timeAfterWatchdogShouldHaveFired / 1000.0); + scriptBuilder.appendLiteral(") break; } } foo();"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + shouldTerminateCallbackWasCalled = false; + + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + + if (((endTime - startTime) < milliseconds(timeAfterWatchdogShouldHaveFired)) && !shouldTerminateCallbackWasCalled) + printf("PASS: %s script timed out as expected when no callback is specified.\n", tierOptions.tier); + else { + if ((endTime - startTime) >= milliseconds(timeAfterWatchdogShouldHaveFired)) + printf("FAIL: %s script did not time out as expected when no callback is specified.\n", tierOptions.tier); + else + printf("FAIL: %s script called stale callback function.\n", tierOptions.tier); + failed = true; + } + + if (!exception) { + printf("FAIL: %s TerminatedExecutionException was not thrown.\n", tierOptions.tier); + failed = true; + } + + testResetAfterTimeout(failed); + } + + /* Test script timeout cancellation: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, cancelTerminateCallback, 0); + { + unsigned timeAfterWatchdogShouldHaveFired = 300 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("function foo() { var startTime = currentCPUTime(); while (true) { for (var i = 0; i < 1000; i++); if (currentCPUTime() - startTime > "); + scriptBuilder.appendNumber(timeAfterWatchdogShouldHaveFired / 1000.0); + scriptBuilder.appendLiteral(") break; } } foo();"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + cancelTerminateCallbackWasCalled = false; + + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + + if (((endTime - startTime) >= milliseconds(timeAfterWatchdogShouldHaveFired)) && cancelTerminateCallbackWasCalled && !exception) + printf("PASS: %s script timeout was cancelled as expected.\n", tierOptions.tier); + else { + if (((endTime - startTime) < milliseconds(timeAfterWatchdogShouldHaveFired)) || exception) + printf("FAIL: %s script timeout was not cancelled.\n", tierOptions.tier); + if (!cancelTerminateCallbackWasCalled) + printf("FAIL: %s script timeout callback was not called.\n", tierOptions.tier); + failed = true; + } + + if (exception) { + printf("FAIL: %s Unexpected TerminatedExecutionException thrown.\n", tierOptions.tier); + failed = true; + } + } + + /* Test script timeout extension: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, extendTerminateCallback, 0); + { + unsigned timeBeforeExtendedDeadline = 250 + tierAdjustmentMillis; + unsigned timeAfterExtendedDeadline = 600 + tierAdjustmentMillis; + unsigned maxBusyLoopTime = 750 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("function foo() { var startTime = currentCPUTime(); while (true) { for (var i = 0; i < 1000; i++); if (currentCPUTime() - startTime > "); + scriptBuilder.appendNumber(maxBusyLoopTime / 1000.0); // in seconds. + scriptBuilder.appendLiteral(") break; } } foo();"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + extendTerminateCallbackCalled = 0; + + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + auto deltaTime = endTime - startTime; + + if ((deltaTime >= milliseconds(timeBeforeExtendedDeadline)) && (deltaTime < milliseconds(timeAfterExtendedDeadline)) && (extendTerminateCallbackCalled == 2) && exception) + printf("PASS: %s script timeout was extended as expected.\n", tierOptions.tier); + else { + if (deltaTime < milliseconds(timeBeforeExtendedDeadline)) + printf("FAIL: %s script timeout was not extended as expected.\n", tierOptions.tier); + else if (deltaTime >= milliseconds(timeAfterExtendedDeadline)) + printf("FAIL: %s script did not timeout.\n", tierOptions.tier); + + if (extendTerminateCallbackCalled < 1) + printf("FAIL: %s script timeout callback was not called.\n", tierOptions.tier); + if (extendTerminateCallbackCalled < 2) + printf("FAIL: %s script timeout callback was not called after timeout extension.\n", tierOptions.tier); + + if (!exception) + printf("FAIL: %s TerminatedExecutionException was not thrown during timeout extension test.\n", tierOptions.tier); + + failed = true; + } + } + + JSGlobalContextRelease(context); + + Options::setOptions(savedOptionsBuilder.toString().ascii().data()); + } + + return failed; +} diff --git a/Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.h b/Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.h new file mode 100644 index 000000000..2c937d08d --- /dev/null +++ b/Source/JavaScriptCore/API/tests/ExecutionTimeLimitTest.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +/* Returns 1 if failures were encountered. Else, returns 0. */ +int testExecutionTimeLimit(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/Source/JavaScriptCore/API/tests/FunctionOverridesTest.cpp b/Source/JavaScriptCore/API/tests/FunctionOverridesTest.cpp new file mode 100644 index 000000000..a325f8393 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/FunctionOverridesTest.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FunctionOverridesTest.h" + +#include "FunctionOverrides.h" +#include "InitializeThreading.h" +#include "JSContextRefPrivate.h" +#include "JavaScriptCore.h" +#include "Options.h" +#include + +using JSC::Options; + +int testFunctionOverrides() +{ + bool failed = false; + + JSC::initializeThreading(); + Options::initialize(); // Ensure options is initialized first. + + const char* oldFunctionOverrides = Options::functionOverrides(); + + Options::functionOverrides() = "testapi-function-overrides.js"; + JSC::FunctionOverrides::reinstallOverrides(); + + JSGlobalContextRef context = JSGlobalContextCreateInGroup(nullptr, nullptr); + + JSObjectRef globalObject = JSContextGetGlobalObject(context); + ASSERT_UNUSED(globalObject, JSValueIsObject(context, globalObject)); + + const char* scriptString = + "var str = '';" "\n" + "function f1() { /* Original f1 */ }" "\n" + "str += f1 + '\\n';" "\n" + "var f2 = function() {" "\n" + " // Original f2" "\n" + "}" "\n" + "str += f2 + '\\n';" "\n" + "str += (function() { /* Original f3 */ }) + '\\n';" "\n" + "var f4Source = '/* Original f4 */'" "\n" + "var f4 = new Function(f4Source);" "\n" + "str += f4 + '\\n';" "\n" + "\n" + "var expectedStr =" "\n" + "'function f1() { /* Overridden f1 */ }\\n" + "function () { /* Overridden f2 */ }\\n" + "function () { /* Overridden f3 */ }\\n" + "function anonymous() { /* Overridden f4 */ }\\n';" + "var result = (str == expectedStr);" "\n" + "result"; + + JSStringRef script = JSStringCreateWithUTF8CString(scriptString); + JSValueRef exception = nullptr; + JSValueRef resultRef = JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + + if (!JSValueIsBoolean(context, resultRef) || !JSValueToBoolean(context, resultRef)) + failed = true; + + JSGlobalContextRelease(context); + + JSC::Options::functionOverrides() = oldFunctionOverrides; + JSC::FunctionOverrides::reinstallOverrides(); + + printf("%s: function override tests.\n", failed ? "FAIL" : "PASS"); + + return failed; +} diff --git a/Source/JavaScriptCore/API/tests/FunctionOverridesTest.h b/Source/JavaScriptCore/API/tests/FunctionOverridesTest.h new file mode 100644 index 000000000..16237e5d2 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/FunctionOverridesTest.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +/* Returns 1 if failures were encountered. Else, returns 0. */ +int testFunctionOverrides(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/Source/JavaScriptCore/API/tests/GlobalContextWithFinalizerTest.cpp b/Source/JavaScriptCore/API/tests/GlobalContextWithFinalizerTest.cpp new file mode 100644 index 000000000..0486a26c0 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/GlobalContextWithFinalizerTest.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "GlobalContextWithFinalizerTest.h" + +#include "JavaScriptCore.h" + +static bool failed = true; + +static void finalize(JSObjectRef) +{ + failed = false; +} + +int testGlobalContextWithFinalizer() +{ + JSClassDefinition def = kJSClassDefinitionEmpty; + def.className = "testClass"; + def.finalize = finalize; + JSClassRef classRef = JSClassCreate(&def); + + JSGlobalContextRef ref = JSGlobalContextCreateInGroup(nullptr, classRef); + JSGlobalContextRelease(ref); + JSClassRelease(classRef); + + if (failed) + printf("FAIL: JSGlobalContextRef did not call its JSClassRef finalizer.\n"); + else + printf("PASS: JSGlobalContextRef called its JSClassRef finalizer as expected.\n"); + + return failed; +} diff --git a/Source/JavaScriptCore/API/tests/GlobalContextWithFinalizerTest.h b/Source/JavaScriptCore/API/tests/GlobalContextWithFinalizerTest.h new file mode 100644 index 000000000..1961350e0 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/GlobalContextWithFinalizerTest.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "JSContextRefPrivate.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Returns 1 if failures were encountered. Else, returns 0. */ +int testGlobalContextWithFinalizer(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/Source/JavaScriptCore/API/tests/JSExportTests.h b/Source/JavaScriptCore/API/tests/JSExportTests.h new file mode 100644 index 000000000..9d501ee7e --- /dev/null +++ b/Source/JavaScriptCore/API/tests/JSExportTests.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import +#import + +#if JSC_OBJC_API_ENABLED + +void runJSExportTests(); + +#endif // JSC_OBJC_API_ENABLED + diff --git a/Source/JavaScriptCore/API/tests/JSNode.c b/Source/JavaScriptCore/API/tests/JSNode.c index d9a40bea6..d0a0dc3ec 100644 --- a/Source/JavaScriptCore/API/tests/JSNode.c +++ b/Source/JavaScriptCore/API/tests/JSNode.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -23,6 +23,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include + #include "JSNode.h" #include "JSNodeList.h" #include "JSObjectRef.h" diff --git a/Source/JavaScriptCore/API/tests/JSNode.h b/Source/JavaScriptCore/API/tests/JSNode.h index 7725733ca..dc3e1caa2 100644 --- a/Source/JavaScriptCore/API/tests/JSNode.h +++ b/Source/JavaScriptCore/API/tests/JSNode.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -23,8 +23,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef JSNode_h -#define JSNode_h +#pragma once #include "JSBase.h" #include "Node.h" @@ -33,5 +32,3 @@ extern JSObjectRef JSNode_new(JSContextRef context, Node* node); extern JSClassRef JSNode_class(JSContextRef context); extern JSObjectRef JSNode_construct(JSContextRef context, JSObjectRef object, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception); - -#endif /* JSNode_h */ diff --git a/Source/JavaScriptCore/API/tests/JSNodeList.c b/Source/JavaScriptCore/API/tests/JSNodeList.c index 61d7041a4..f037e094a 100644 --- a/Source/JavaScriptCore/API/tests/JSNodeList.c +++ b/Source/JavaScriptCore/API/tests/JSNodeList.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -23,6 +23,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include + #include "JSNode.h" #include "JSNodeList.h" #include "JSObjectRef.h" diff --git a/Source/JavaScriptCore/API/tests/JSNodeList.h b/Source/JavaScriptCore/API/tests/JSNodeList.h index f9309142e..c2d2bb9a0 100644 --- a/Source/JavaScriptCore/API/tests/JSNodeList.h +++ b/Source/JavaScriptCore/API/tests/JSNodeList.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -23,12 +23,9 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef JSNodeList_h -#define JSNodeList_h +#pragma once #include "JSBase.h" #include "NodeList.h" extern JSObjectRef JSNodeList_new(JSContextRef, NodeList*); - -#endif /* JSNodeList_h */ diff --git a/Source/JavaScriptCore/API/tests/JSONParseTest.cpp b/Source/JavaScriptCore/API/tests/JSONParseTest.cpp new file mode 100644 index 000000000..d7e2bca21 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/JSONParseTest.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSONParseTest.h" + +#include "JSCInlines.h" +#include "JSGlobalObject.h" +#include "JSONObject.h" +#include "VM.h" +#include + +using namespace JSC; + +int testJSONParse() +{ + bool failed = false; + + RefPtr vm = VM::create(); + + JSLockHolder locker(vm.get()); + JSGlobalObject* globalObject = JSGlobalObject::create(*vm, JSGlobalObject::createStructure(*vm, jsNull())); + + ExecState* exec = globalObject->globalExec(); + JSValue v0 = JSONParse(exec, ""); + JSValue v1 = JSONParse(exec, "#$%^"); + JSValue v2 = JSONParse(exec, String()); + UChar emptyUCharArray[1] = { '\0' }; + JSValue v3 = JSONParse(exec, String(emptyUCharArray, 0)); + JSValue v4; + JSValue v5 = JSONParse(exec, "123"); + + failed = failed || (v0 != v1); + failed = failed || (v1 != v2); + failed = failed || (v2 != v3); + failed = failed || (v3 != v4); + failed = failed || (v4 == v5); + + vm = nullptr; + + if (failed) + printf("FAIL: JSONParse String test.\n"); + else + printf("PASS: JSONParse String test.\n"); + + return failed; +} diff --git a/Source/JavaScriptCore/API/tests/JSONParseTest.h b/Source/JavaScriptCore/API/tests/JSONParseTest.h new file mode 100644 index 000000000..13842f92e --- /dev/null +++ b/Source/JavaScriptCore/API/tests/JSONParseTest.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +int testJSONParse(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/Source/JavaScriptCore/API/tests/Node.c b/Source/JavaScriptCore/API/tests/Node.c index 913da0a2a..db687e952 100644 --- a/Source/JavaScriptCore/API/tests/Node.c +++ b/Source/JavaScriptCore/API/tests/Node.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR diff --git a/Source/JavaScriptCore/API/tests/Node.h b/Source/JavaScriptCore/API/tests/Node.h index e9250b3ae..bdb1f2c55 100644 --- a/Source/JavaScriptCore/API/tests/Node.h +++ b/Source/JavaScriptCore/API/tests/Node.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -23,8 +23,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef Node_h -#define Node_h +#pragma once typedef struct __Node Node; typedef struct __NodeLink NodeLink; @@ -46,5 +45,3 @@ extern void Node_deref(Node* node); extern void Node_appendChild(Node* node, Node* child); extern void Node_removeChild(Node* node, Node* child); extern void Node_replaceChild(Node* node, Node* newChild, Node* oldChild); - -#endif /* Node_h */ diff --git a/Source/JavaScriptCore/API/tests/NodeList.c b/Source/JavaScriptCore/API/tests/NodeList.c index ae4c17062..69f4cd5c4 100644 --- a/Source/JavaScriptCore/API/tests/NodeList.c +++ b/Source/JavaScriptCore/API/tests/NodeList.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR diff --git a/Source/JavaScriptCore/API/tests/NodeList.h b/Source/JavaScriptCore/API/tests/NodeList.h index 25b95bf4d..51163c2be 100644 --- a/Source/JavaScriptCore/API/tests/NodeList.h +++ b/Source/JavaScriptCore/API/tests/NodeList.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -23,8 +23,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef NodeList_h -#define NodeList_h +#pragma once #include "Node.h" @@ -38,5 +37,3 @@ extern unsigned NodeList_length(NodeList*); extern Node* NodeList_item(NodeList*, unsigned); extern void NodeList_ref(NodeList*); extern void NodeList_deref(NodeList*); - -#endif /* NodeList_h */ diff --git a/Source/JavaScriptCore/API/tests/PingPongStackOverflowTest.cpp b/Source/JavaScriptCore/API/tests/PingPongStackOverflowTest.cpp new file mode 100644 index 000000000..ef4b914c1 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/PingPongStackOverflowTest.cpp @@ -0,0 +1,182 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PingPongStackOverflowTest.h" + +#include "InitializeThreading.h" +#include "JSContextRefPrivate.h" +#include "JavaScriptCore.h" +#include "Options.h" +#include + +using JSC::Options; + +static JSGlobalContextRef context = nullptr; +static int nativeRecursionCount = 0; + +static bool PingPongStackOverflowObject_hasInstance(JSContextRef context, JSObjectRef constructor, JSValueRef possibleValue, JSValueRef* exception) +{ + UNUSED_PARAM(context); + UNUSED_PARAM(constructor); + + JSStringRef hasInstanceName = JSStringCreateWithUTF8CString("hasInstance"); + JSValueRef hasInstance = JSObjectGetProperty(context, constructor, hasInstanceName, exception); + JSStringRelease(hasInstanceName); + if (!hasInstance) + return false; + + int countAtEntry = nativeRecursionCount++; + + JSValueRef result = 0; + if (nativeRecursionCount < 100) { + JSObjectRef function = JSValueToObject(context, hasInstance, exception); + result = JSObjectCallAsFunction(context, function, constructor, 1, &possibleValue, exception); + } else { + StringBuilder builder; + builder.appendLiteral("dummy.valueOf([0]"); + for (int i = 1; i < 35000; i++) { + builder.appendLiteral(", ["); + builder.appendNumber(i); + builder.appendLiteral("]"); + } + builder.appendLiteral(");"); + + JSStringRef script = JSStringCreateWithUTF8CString(builder.toString().utf8().data()); + result = JSEvaluateScript(context, script, NULL, NULL, 1, exception); + JSStringRelease(script); + } + + --nativeRecursionCount; + if (nativeRecursionCount != countAtEntry) + printf(" ERROR: PingPongStackOverflow test saw a recursion count mismatch\n"); + + return result && JSValueToBoolean(context, result); +} + +JSClassDefinition PingPongStackOverflowObject_definition = { + 0, + kJSClassAttributeNone, + + "PingPongStackOverflowObject", + NULL, + + NULL, + NULL, + + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + PingPongStackOverflowObject_hasInstance, + NULL, +}; + +static JSClassRef PingPongStackOverflowObject_class(JSContextRef context) +{ + UNUSED_PARAM(context); + + static JSClassRef jsClass; + if (!jsClass) + jsClass = JSClassCreate(&PingPongStackOverflowObject_definition); + + return jsClass; +} + +// This tests tests a stack overflow on VM reentry into a JS function from a native function +// after ping-pong'ing back and forth between JS and native functions multiple times. +// This test should not hang or crash. +int testPingPongStackOverflow() +{ + bool failed = false; + + JSC::initializeThreading(); + Options::initialize(); // Ensure options is initialized first. + + auto origSoftReservedZoneSize = Options::softReservedZoneSize(); + auto origReservedZoneSize = Options::reservedZoneSize(); + auto origUseLLInt = Options::useLLInt(); + auto origMaxPerThreadStackUsage = Options::maxPerThreadStackUsage(); + + Options::softReservedZoneSize() = 128 * KB; + Options::reservedZoneSize() = 64 * KB; +#if ENABLE(JIT) + // Normally, we want to disable the LLINT to force the use of JITted code which is necessary for + // reproducing the regression in https://bugs.webkit.org/show_bug.cgi?id=148749. However, we only + // want to do this if the LLINT isn't the only available execution engine. + Options::useLLInt() = false; +#endif + + const char* scriptString = + "var count = 0;" \ + "PingPongStackOverflowObject.hasInstance = function f() {" \ + " return (undefined instanceof PingPongStackOverflowObject);" \ + "};" \ + "PingPongStackOverflowObject.__proto__ = undefined;" \ + "undefined instanceof PingPongStackOverflowObject;"; + + JSValueRef scriptResult = nullptr; + JSValueRef exception = nullptr; + JSStringRef script = JSStringCreateWithUTF8CString(scriptString); + + nativeRecursionCount = 0; + context = JSGlobalContextCreateInGroup(nullptr, nullptr); + + JSObjectRef globalObject = JSContextGetGlobalObject(context); + ASSERT(JSValueIsObject(context, globalObject)); + + JSObjectRef PingPongStackOverflowObject = JSObjectMake(context, PingPongStackOverflowObject_class(context), NULL); + JSStringRef PingPongStackOverflowObjectString = JSStringCreateWithUTF8CString("PingPongStackOverflowObject"); + JSObjectSetProperty(context, globalObject, PingPongStackOverflowObjectString, PingPongStackOverflowObject, kJSPropertyAttributeNone, NULL); + JSStringRelease(PingPongStackOverflowObjectString); + + unsigned stackSize = 32 * KB; + Options::maxPerThreadStackUsage() = stackSize + Options::softReservedZoneSize(); + + exception = nullptr; + scriptResult = JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + + if (!exception) { + printf("FAIL: PingPongStackOverflowError not thrown in PingPongStackOverflow test\n"); + failed = true; + } else if (nativeRecursionCount) { + printf("FAIL: Unbalanced native recursion count: %d in PingPongStackOverflow test\n", nativeRecursionCount); + failed = true; + } else { + printf("PASS: PingPongStackOverflow test.\n"); + } + + Options::softReservedZoneSize() = origSoftReservedZoneSize; + Options::reservedZoneSize() = origReservedZoneSize; + Options::useLLInt() = origUseLLInt; + Options::maxPerThreadStackUsage() = origMaxPerThreadStackUsage; + + return failed; +} diff --git a/Source/JavaScriptCore/API/tests/PingPongStackOverflowTest.h b/Source/JavaScriptCore/API/tests/PingPongStackOverflowTest.h new file mode 100644 index 000000000..a2046693f --- /dev/null +++ b/Source/JavaScriptCore/API/tests/PingPongStackOverflowTest.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +int testPingPongStackOverflow(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/Source/JavaScriptCore/API/tests/Regress141275.h b/Source/JavaScriptCore/API/tests/Regress141275.h new file mode 100644 index 000000000..bf3492afa --- /dev/null +++ b/Source/JavaScriptCore/API/tests/Regress141275.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import +#import + +#if JSC_OBJC_API_ENABLED + +void runRegress141275(); + +#endif // JSC_OBJC_API_ENABLED + diff --git a/Source/JavaScriptCore/API/tests/Regress141809.h b/Source/JavaScriptCore/API/tests/Regress141809.h new file mode 100644 index 000000000..43b099c94 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/Regress141809.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import +#import + +#if JSC_OBJC_API_ENABLED + +void runRegress141809(); + +#endif // JSC_OBJC_API_ENABLED + diff --git a/Source/JavaScriptCore/API/tests/TypedArrayCTest.cpp b/Source/JavaScriptCore/API/tests/TypedArrayCTest.cpp new file mode 100644 index 000000000..8ec8cdd5d --- /dev/null +++ b/Source/JavaScriptCore/API/tests/TypedArrayCTest.cpp @@ -0,0 +1,268 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "TypedArrayCTest.h" + +#include "JavaScriptCore.h" +#include + +extern "C" void JSSynchronousGarbageCollectForDebugging(JSContextRef); + +static void id(void*, void*) { } +static void freePtr(void* ptr, void*) +{ + free(ptr); +} + +static const unsigned numLengths = 3; + +static const unsigned lengths[numLengths] = +{ + 0, + 1, + 10, +}; + +static const unsigned byteSizes[kJSTypedArrayTypeArrayBuffer] = +{ + 1, // kJSTypedArrayTypeInt8Array + 2, // kJSTypedArrayTypeInt16Array + 4, // kJSTypedArrayTypeInt32Array + 1, // kJSTypedArrayTypeUint8Array + 1, // kJSTypedArrayTypeUint8ClampedArray + 2, // kJSTypedArrayTypeUint16Array + 4, // kJSTypedArrayTypeUint32Array + 4, // kJSTypedArrayTypeFloat32Array + 8, // kJSTypedArrayTypeFloat64Array +}; + +static const char* typeToString[kJSTypedArrayTypeArrayBuffer] = +{ + "kJSTypedArrayTypeInt8Array", + "kJSTypedArrayTypeInt16Array", + "kJSTypedArrayTypeInt32Array", + "kJSTypedArrayTypeUint8Array", + "kJSTypedArrayTypeUint8ClampedArray", + "kJSTypedArrayTypeUint16Array", + "kJSTypedArrayTypeUint32Array", + "kJSTypedArrayTypeFloat32Array", + "kJSTypedArrayTypeFloat64Array", +}; + +inline int unexpectedException(const char* name) +{ + fprintf(stderr, "%s FAILED: unexpected exception\n", name); + return 1; +} + +static int assertEqualsAsNumber(JSGlobalContextRef context, JSValueRef value, double expectedValue) +{ + double number = JSValueToNumber(context, value, nullptr); + if (number != expectedValue && !(isnan(number) && isnan(expectedValue))) { + fprintf(stderr, "assertEqualsAsNumber FAILED: %p, %lf\n", value, expectedValue); + return 1; + } + return 0; +} + +static int testAccess(JSGlobalContextRef context, JSObjectRef typedArray, JSTypedArrayType type, unsigned elementLength, void* expectedPtr = nullptr, JSObjectRef expectedBuffer = nullptr, unsigned expectedOffset = 0) +{ + JSValueRef exception = nullptr; + // Test typedArray basic functions. + JSTypedArrayType actualType = JSValueGetTypedArrayType(context, typedArray, &exception); + if (type != actualType || exception) { + fprintf(stderr, "TypedArray type FAILED: %p, got: %s, expected: %s\n", typedArray, typeToString[actualType], typeToString[type]); + return 1; + } + + unsigned length = JSObjectGetTypedArrayLength(context, typedArray, &exception); + if (elementLength != length || exception) { + fprintf(stderr, "TypedArray length FAILED: %p (%s), got: %d, expected: %d\n", typedArray, typeToString[type], length, elementLength); + return 1; + } + + unsigned byteLength = JSObjectGetTypedArrayByteLength(context, typedArray, &exception); + unsigned expectedLength = byteSizes[type] * elementLength; + if (byteLength != expectedLength || exception) { + fprintf(stderr, "TypedArray byteLength FAILED: %p (%s), got: %d, expected: %d\n", typedArray, typeToString[type], byteLength, expectedLength); + return 1; + } + + unsigned offset = JSObjectGetTypedArrayByteOffset(context, typedArray, &exception); + if (expectedOffset != offset || exception) { + fprintf(stderr, "TypedArray byteOffset FAILED: %p (%s), got: %d, expected: %d\n", typedArray, typeToString[type], offset, expectedOffset); + return 1; + } + + void* ptr = JSObjectGetTypedArrayBytesPtr(context, typedArray, &exception); + if (exception) + return unexpectedException("TypedArray get bytes ptr"); + + JSObjectRef buffer = JSObjectGetTypedArrayBuffer(context, typedArray, &exception); + if (exception) + return unexpectedException("TypedArray get buffer"); + + void* bufferPtr = JSObjectGetArrayBufferBytesPtr(context, buffer, &exception); + if (exception) + return unexpectedException("ArrayBuffer get bytes ptr"); + + if (bufferPtr != ptr) { + fprintf(stderr, "FAIL: TypedArray bytes ptr and ArrayBuffer byte ptr were not the same: %p (%s) TypedArray: %p, ArrayBuffer: %p\n", typedArray, typeToString[type], ptr, bufferPtr); + return 1; + } + + if (expectedPtr && ptr != expectedPtr) { + fprintf(stderr, "FAIL: TypedArray bytes ptr and the ptr used to construct the array were not the same: %p (%s) TypedArray: %p, bytes ptr: %p\n", typedArray, typeToString[type], ptr, expectedPtr); + return 1; + } + + if (expectedBuffer && expectedBuffer != buffer) { + fprintf(stderr, "FAIL: TypedArray buffer and the ArrayBuffer buffer used to construct the array were not the same: %p (%s) TypedArray buffer: %p, data: %p\n", typedArray, typeToString[type], buffer, expectedBuffer); + return 1; + } + + return 0; +} + +static int testConstructors(JSGlobalContextRef context, JSTypedArrayType type, unsigned length) +{ + int failed = 0; + JSValueRef exception = nullptr; + JSObjectRef typedArray; + + // Test create with length. + typedArray = JSObjectMakeTypedArray(context, type, length, &exception); + failed = failed || exception || testAccess(context, typedArray, type, length); + + void* ptr = calloc(length, byteSizes[type]); // This is to be freed by data + JSObjectRef data = JSObjectMakeArrayBufferWithBytesNoCopy(context, ptr, length * byteSizes[type], freePtr, nullptr, &exception); + failed = failed || exception; + + // Test create with existing ptr. + typedArray = JSObjectMakeTypedArrayWithBytesNoCopy(context, type, ptr, length * byteSizes[type], id, nullptr, &exception); + failed = failed || exception || testAccess(context, typedArray, type, length, ptr); + + // Test create with existing ArrayBuffer. + typedArray = JSObjectMakeTypedArrayWithArrayBuffer(context, type, data, &exception); + failed = failed || exception || testAccess(context, typedArray, type, length, ptr, data); + + // Test create with existing ArrayBuffer and offset. + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, 0, length, &exception); + failed = failed || exception || testAccess(context, typedArray, type, length, ptr, data); + + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, byteSizes[type], length-1, &exception); + if (!length) + failed = failed || !exception; + else + failed = failed || testAccess(context, typedArray, type, length-1, ptr, data, byteSizes[type]) || exception; + + exception = nullptr; + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, byteSizes[type], 3, &exception); + if (length < 2) + failed = failed || !exception; + else + failed = failed || testAccess(context, typedArray, type, 3, ptr, data, byteSizes[type]) || exception; + + if (byteSizes[type] > 1) { + exception = nullptr; + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, 1, length-1, &exception); + failed = failed || !exception; + } + + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, byteSizes[type], length, &exception); + failed = failed || !exception; + + exception = nullptr; + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, byteSizes[type], 0, &exception); + if (!length) + failed = failed || !exception; + else + failed = failed || testAccess(context, typedArray, type, 0, ptr, data, byteSizes[type]) || exception; + + return failed; +} + +template +static int forEachTypedArrayType(const Functor& functor) +{ + int failed = 0; + for (unsigned i = 0; i < kJSTypedArrayTypeArrayBuffer; i++) + failed = failed || functor(static_cast(i)); + return failed; +} + +int testTypedArrayCAPI() +{ + int failed = 0; + JSGlobalContextRef context = JSGlobalContextCreate(nullptr); + + failed = failed || forEachTypedArrayType([&](JSTypedArrayType type) { + int failed = 0; + for (unsigned i = 0; i < numLengths; i++) + failed = failed || testConstructors(context, type, lengths[i]); + return failed; + }); + + // Test making a typedArray from scratch length. + volatile JSObjectRef typedArray = JSObjectMakeTypedArray(context, kJSTypedArrayTypeUint32Array, 10, nullptr); + JSObjectRef data = JSObjectGetTypedArrayBuffer(context, typedArray, nullptr); + unsigned* buffer = static_cast(JSObjectGetArrayBufferBytesPtr(context, data, nullptr)); + + ASSERT(JSObjectGetTypedArrayLength(context, typedArray, nullptr) == 10); + + // Test buffer is connected to typedArray. + buffer[1] = 1; + JSValueRef v = JSObjectGetPropertyAtIndex(context, typedArray, 1, nullptr); + failed = failed || assertEqualsAsNumber(context, v, 1); + + // Test passing a buffer from a new array to an old array + typedArray = JSObjectMakeTypedArrayWithBytesNoCopy(context, kJSTypedArrayTypeUint32Array, buffer, 40, id, nullptr, nullptr); + buffer = static_cast(JSObjectGetTypedArrayBytesPtr(context, typedArray, nullptr)); + ASSERT(buffer[1] == 1); + buffer[1] = 20; + ASSERT(((unsigned*)JSObjectGetArrayBufferBytesPtr(context, data, nullptr))[1] == 20); + + // Test constructing with data and the data returned are the same even with an offset. + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, kJSTypedArrayTypeUint32Array, data, 4, 9, nullptr); + failed = failed || assertEqualsAsNumber(context, JSObjectGetPropertyAtIndex(context, typedArray, 0, nullptr), 20); + ASSERT(data == JSObjectGetTypedArrayBuffer(context, typedArray, nullptr)); + + // Test attempting to allocate an array too big for memory. + forEachTypedArrayType([&](JSTypedArrayType type) { + JSValueRef exception = nullptr; + JSObjectMakeTypedArray(context, type, UINT_MAX, &exception); + return !exception; + }); + + JSGlobalContextRelease(context); + + if (!failed) + printf("PASS: Typed Array C API Tests.\n"); + else + printf("FAIL: Some Typed Array C API Tests failed.\n"); + + return failed; +} diff --git a/Source/JavaScriptCore/API/tests/TypedArrayCTest.h b/Source/JavaScriptCore/API/tests/TypedArrayCTest.h new file mode 100644 index 000000000..dc66881e8 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/TypedArrayCTest.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +int testTypedArrayCAPI(void); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/Source/JavaScriptCore/API/tests/minidom.c b/Source/JavaScriptCore/API/tests/minidom.c index f4ccf91e4..02b41a9c7 100644 --- a/Source/JavaScriptCore/API/tests/minidom.c +++ b/Source/JavaScriptCore/API/tests/minidom.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (C) 2006 Apple Inc. All rights reserved. * Copyright (C) 2007 Alp Toker * * Redistribution and use in source and binary forms, with or without @@ -11,10 +11,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -24,6 +24,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include + #include "JSContextRef.h" #include "JSNode.h" #include "JSObjectRef.h" diff --git a/Source/JavaScriptCore/API/tests/minidom.html b/Source/JavaScriptCore/API/tests/minidom.html new file mode 100644 index 000000000..7ea474752 --- /dev/null +++ b/Source/JavaScriptCore/API/tests/minidom.html @@ -0,0 +1,9 @@ + + + + + + +

+
+
diff --git a/Source/JavaScriptCore/API/tests/minidom.js b/Source/JavaScriptCore/API/tests/minidom.js
new file mode 100644
index 000000000..85134d7cb
--- /dev/null
+++ b/Source/JavaScriptCore/API/tests/minidom.js
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2006 Apple Inc.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+function shouldBe(a, b)
+{
+    var evalA;
+    try {
+        evalA = eval(a);
+    } catch(e) {
+        evalA = e;
+    }
+    
+    if (evalA == b || isNaN(evalA) && typeof evalA == 'number' && isNaN(b) && typeof b == 'number')
+        print("PASS: " + a + " should be " + b + " and is.", "green");
+    else
+        print("__FAIL__: " + a + " should be " + b + " but instead is " + evalA + ".", "red");
+}
+
+function test()
+{
+    print("Node is " + Node);
+    for (var p in Node)
+        print(p + ": " + Node[p]);
+    
+    node = new Node();
+    print("node is " + node);
+    for (var p in node)
+        print(p + ": " + node[p]);
+
+    child1 = new Node();
+    child2 = new Node();
+    child3 = new Node();
+    
+    node.appendChild(child1);
+    node.appendChild(child2);
+
+    var childNodes = node.childNodes;
+    
+    for (var i = 0; i < childNodes.length + 1; i++) {
+        print("item " + i + ": " + childNodes.item(i));
+    }
+    
+    for (var i = 0; i < childNodes.length + 1; i++) {
+        print(i + ": " + childNodes[i]);
+    }
+
+    node.removeChild(child1);
+    node.replaceChild(child3, child2);
+    
+    for (var i = 0; i < childNodes.length + 1; i++) {
+        print("item " + i + ": " + childNodes.item(i));
+    }
+
+    for (var i = 0; i < childNodes.length + 1; i++) {
+        print(i + ": " + childNodes[i]);
+    }
+
+    try {
+        node.appendChild(null);
+    } catch(e) {
+        print("caught: " + e);
+    }
+    
+    try {
+        var o = new Object();
+        o.appendChild = node.appendChild;
+        o.appendChild(node);
+    } catch(e) {
+        print("caught: " + e);
+    }
+    
+    try {
+        node.appendChild();
+    } catch(e) {
+        print("caught: " + e);
+    }
+    
+    oldNodeType = node.nodeType;
+    node.nodeType = 1;
+    shouldBe("node.nodeType", oldNodeType);
+    
+    shouldBe("node instanceof Node", true);
+    shouldBe("new Object() instanceof Node", false);
+    
+    print(Node);
+}
+
+test();
diff --git a/Source/JavaScriptCore/API/tests/testapi-function-overrides.js b/Source/JavaScriptCore/API/tests/testapi-function-overrides.js
new file mode 100644
index 000000000..363cced04
--- /dev/null
+++ b/Source/JavaScriptCore/API/tests/testapi-function-overrides.js
@@ -0,0 +1,16 @@
+// testapi function overrides for testing.
+override %%%{ /* Original f1 */ }%%%
+with %%%{ /* Overridden f1 */ }%%%
+
+override #$%{
+    // Original f2
+}#$%
+with $$${ /* Overridden f2 */ }$$$
+
+override %%%{ /* Original f3 */ }%%%
+with %%%{ /* Overridden f3 */ }%%%
+
+override %%%{
+/* Original f4 */
+}%%%
+with %%%{ /* Overridden f4 */ }%%%
diff --git a/Source/JavaScriptCore/API/tests/testapi.c b/Source/JavaScriptCore/API/tests/testapi.c
new file mode 100644
index 000000000..b6c24518b
--- /dev/null
+++ b/Source/JavaScriptCore/API/tests/testapi.c
@@ -0,0 +1,1989 @@
+/*
+ * Copyright (C) 2006, 2015-2016 Apple Inc.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include 
+
+#include "JavaScriptCore.h"
+#include "JSBasePrivate.h"
+#include "JSContextRefPrivate.h"
+#include "JSObjectRefPrivate.h"
+#include "JSScriptRefPrivate.h"
+#include "JSStringRefPrivate.h"
+#include 
+#define ASSERT_DISABLED 0
+#include 
+
+#if OS(WINDOWS)
+#include 
+#endif
+
+#include "CompareAndSwapTest.h"
+#include "CustomGlobalObjectClassTest.h"
+#include "ExecutionTimeLimitTest.h"
+#include "FunctionOverridesTest.h"
+#include "GlobalContextWithFinalizerTest.h"
+#include "JSONParseTest.h"
+#include "PingPongStackOverflowTest.h"
+#include "TypedArrayCTest.h"
+
+#if JSC_OBJC_API_ENABLED
+void testObjectiveCAPI(void);
+#endif
+
+bool assertTrue(bool value, const char* message);
+
+static JSGlobalContextRef context;
+int failed;
+static void assertEqualsAsBoolean(JSValueRef value, bool expectedValue)
+{
+    if (JSValueToBoolean(context, value) != expectedValue) {
+        fprintf(stderr, "assertEqualsAsBoolean failed: %p, %d\n", value, expectedValue);
+        failed = 1;
+    }
+}
+
+static void assertEqualsAsNumber(JSValueRef value, double expectedValue)
+{
+    double number = JSValueToNumber(context, value, NULL);
+
+    // FIXME  - On i386 the isnan(double) macro tries to map to the isnan(float) function,
+    // causing a build break with -Wshorten-64-to-32 enabled.  The issue is known by the appropriate team.
+    // After that's resolved, we can remove these casts
+    if (number != expectedValue && !(isnan((float)number) && isnan((float)expectedValue))) {
+        fprintf(stderr, "assertEqualsAsNumber failed: %p, %lf\n", value, expectedValue);
+        failed = 1;
+    }
+}
+
+static void assertEqualsAsUTF8String(JSValueRef value, const char* expectedValue)
+{
+    JSStringRef valueAsString = JSValueToStringCopy(context, value, NULL);
+
+    size_t jsSize = JSStringGetMaximumUTF8CStringSize(valueAsString);
+    char* jsBuffer = (char*)malloc(jsSize);
+    JSStringGetUTF8CString(valueAsString, jsBuffer, jsSize);
+
+    unsigned i;
+    for (i = 0; jsBuffer[i]; i++) {
+        if (jsBuffer[i] != expectedValue[i]) {
+            fprintf(stderr, "assertEqualsAsUTF8String failed at character %d: %c(%d) != %c(%d)\n", i, jsBuffer[i], jsBuffer[i], expectedValue[i], expectedValue[i]);
+            fprintf(stderr, "value: %s\n", jsBuffer);
+            fprintf(stderr, "expectedValue: %s\n", expectedValue);
+            failed = 1;
+        }
+    }
+
+    if (jsSize < strlen(jsBuffer) + 1) {
+        fprintf(stderr, "assertEqualsAsUTF8String failed: jsSize was too small\n");
+        failed = 1;
+    }
+
+    free(jsBuffer);
+    JSStringRelease(valueAsString);
+}
+
+static void assertEqualsAsCharactersPtr(JSValueRef value, const char* expectedValue)
+{
+    JSStringRef valueAsString = JSValueToStringCopy(context, value, NULL);
+
+    size_t jsLength = JSStringGetLength(valueAsString);
+    const JSChar* jsBuffer = JSStringGetCharactersPtr(valueAsString);
+
+    CFStringRef expectedValueAsCFString = CFStringCreateWithCString(kCFAllocatorDefault, 
+                                                                    expectedValue,
+                                                                    kCFStringEncodingUTF8);    
+    CFIndex cfLength = CFStringGetLength(expectedValueAsCFString);
+    UniChar* cfBuffer = (UniChar*)malloc(cfLength * sizeof(UniChar));
+    CFStringGetCharacters(expectedValueAsCFString, CFRangeMake(0, cfLength), cfBuffer);
+    CFRelease(expectedValueAsCFString);
+
+    if (memcmp(jsBuffer, cfBuffer, cfLength * sizeof(UniChar)) != 0) {
+        fprintf(stderr, "assertEqualsAsCharactersPtr failed: jsBuffer != cfBuffer\n");
+        failed = 1;
+    }
+    
+    if (jsLength != (size_t)cfLength) {
+#if OS(WINDOWS)
+        fprintf(stderr, "assertEqualsAsCharactersPtr failed: jsLength(%Iu) != cfLength(%Iu)\n", jsLength, (size_t)cfLength);
+#else
+        fprintf(stderr, "assertEqualsAsCharactersPtr failed: jsLength(%zu) != cfLength(%zu)\n", jsLength, (size_t)cfLength);
+#endif
+        failed = 1;
+    }
+
+    free(cfBuffer);
+    JSStringRelease(valueAsString);
+}
+
+static bool timeZoneIsPST()
+{
+    char timeZoneName[70];
+    struct tm gtm;
+    memset(>m, 0, sizeof(gtm));
+    strftime(timeZoneName, sizeof(timeZoneName), "%Z", >m);
+
+    return 0 == strcmp("PST", timeZoneName);
+}
+
+static JSValueRef jsGlobalValue; // non-stack value for testing JSValueProtect()
+
+/* MyObject pseudo-class */
+
+static bool MyObject_hasProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "alwaysOne")
+        || JSStringIsEqualToUTF8CString(propertyName, "cantFind")
+        || JSStringIsEqualToUTF8CString(propertyName, "throwOnGet")
+        || JSStringIsEqualToUTF8CString(propertyName, "myPropertyName")
+        || JSStringIsEqualToUTF8CString(propertyName, "hasPropertyLie")
+        || JSStringIsEqualToUTF8CString(propertyName, "0")) {
+        return true;
+    }
+    
+    return false;
+}
+
+static JSValueRef MyObject_getProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "alwaysOne")) {
+        return JSValueMakeNumber(context, 1);
+    }
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "myPropertyName")) {
+        return JSValueMakeNumber(context, 1);
+    }
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "cantFind")) {
+        return JSValueMakeUndefined(context);
+    }
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "hasPropertyLie")) {
+        return 0;
+    }
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "throwOnGet")) {
+        return JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception);
+    }
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "0")) {
+        *exception = JSValueMakeNumber(context, 1);
+        return JSValueMakeNumber(context, 1);
+    }
+    
+    return JSValueMakeNull(context);
+}
+
+static bool MyObject_setProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(exception);
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "cantSet"))
+        return true; // pretend we set the property in order to swallow it
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "throwOnSet")) {
+        JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception);
+    }
+    
+    return false;
+}
+
+static bool MyObject_deleteProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "cantDelete"))
+        return true;
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "throwOnDelete")) {
+        JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception);
+        return false;
+    }
+
+    return false;
+}
+
+static void MyObject_getPropertyNames(JSContextRef context, JSObjectRef object, JSPropertyNameAccumulatorRef propertyNames)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    
+    JSStringRef propertyName;
+    
+    propertyName = JSStringCreateWithUTF8CString("alwaysOne");
+    JSPropertyNameAccumulatorAddName(propertyNames, propertyName);
+    JSStringRelease(propertyName);
+    
+    propertyName = JSStringCreateWithUTF8CString("myPropertyName");
+    JSPropertyNameAccumulatorAddName(propertyNames, propertyName);
+    JSStringRelease(propertyName);
+}
+
+static JSValueRef MyObject_callAsFunction(JSContextRef context, JSObjectRef object, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(exception);
+
+    if (argumentCount > 0 && JSValueIsString(context, arguments[0]) && JSStringIsEqualToUTF8CString(JSValueToStringCopy(context, arguments[0], 0), "throwOnCall")) {
+        JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception);
+        return JSValueMakeUndefined(context);
+    }
+
+    if (argumentCount > 0 && JSValueIsStrictEqual(context, arguments[0], JSValueMakeNumber(context, 0)))
+        return JSValueMakeNumber(context, 1);
+    
+    return JSValueMakeUndefined(context);
+}
+
+static JSObjectRef MyObject_callAsConstructor(JSContextRef context, JSObjectRef object, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+
+    if (argumentCount > 0 && JSValueIsString(context, arguments[0]) && JSStringIsEqualToUTF8CString(JSValueToStringCopy(context, arguments[0], 0), "throwOnConstruct")) {
+        JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception);
+        return object;
+    }
+
+    if (argumentCount > 0 && JSValueIsStrictEqual(context, arguments[0], JSValueMakeNumber(context, 0)))
+        return JSValueToObject(context, JSValueMakeNumber(context, 1), exception);
+    
+    return JSValueToObject(context, JSValueMakeNumber(context, 0), exception);
+}
+
+static bool MyObject_hasInstance(JSContextRef context, JSObjectRef constructor, JSValueRef possibleValue, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(constructor);
+
+    if (JSValueIsString(context, possibleValue) && JSStringIsEqualToUTF8CString(JSValueToStringCopy(context, possibleValue, 0), "throwOnHasInstance")) {
+        JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), constructor, JSStringCreateWithUTF8CString("test script"), 1, exception);
+        return false;
+    }
+
+    JSStringRef numberString = JSStringCreateWithUTF8CString("Number");
+    JSObjectRef numberConstructor = JSValueToObject(context, JSObjectGetProperty(context, JSContextGetGlobalObject(context), numberString, exception), exception);
+    JSStringRelease(numberString);
+
+    return JSValueIsInstanceOfConstructor(context, possibleValue, numberConstructor, exception);
+}
+
+static JSValueRef MyObject_convertToType(JSContextRef context, JSObjectRef object, JSType type, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(exception);
+    
+    switch (type) {
+    case kJSTypeNumber:
+        return JSValueMakeNumber(context, 1);
+    case kJSTypeString:
+        {
+            JSStringRef string = JSStringCreateWithUTF8CString("MyObjectAsString");
+            JSValueRef result = JSValueMakeString(context, string);
+            JSStringRelease(string);
+            return result;
+        }
+    default:
+        break;
+    }
+
+    // string conversion -- forward to default object class
+    return JSValueMakeNull(context);
+}
+
+static JSValueRef MyObject_convertToTypeWrapper(JSContextRef context, JSObjectRef object, JSType type, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(type);
+    UNUSED_PARAM(exception);
+    // Forward to default object class
+    return 0;
+}
+
+static bool MyObject_set_nullGetForwardSet(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(ctx);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(exception);
+    return false; // Forward to parent class.
+}
+
+static JSStaticValue evilStaticValues[] = {
+    { "nullGetSet", 0, 0, kJSPropertyAttributeNone },
+    { "nullGetForwardSet", 0, MyObject_set_nullGetForwardSet, kJSPropertyAttributeNone },
+    { 0, 0, 0, 0 }
+};
+
+static JSStaticFunction evilStaticFunctions[] = {
+    { "nullCall", 0, kJSPropertyAttributeNone },
+    { 0, 0, 0 }
+};
+
+JSClassDefinition MyObject_definition = {
+    0,
+    kJSClassAttributeNone,
+    
+    "MyObject",
+    NULL,
+    
+    evilStaticValues,
+    evilStaticFunctions,
+    
+    NULL,
+    NULL,
+    MyObject_hasProperty,
+    MyObject_getProperty,
+    MyObject_setProperty,
+    MyObject_deleteProperty,
+    MyObject_getPropertyNames,
+    MyObject_callAsFunction,
+    MyObject_callAsConstructor,
+    MyObject_hasInstance,
+    MyObject_convertToType,
+};
+
+JSClassDefinition MyObject_convertToTypeWrapperDefinition = {
+    0,
+    kJSClassAttributeNone,
+    
+    "MyObject",
+    NULL,
+    
+    NULL,
+    NULL,
+    
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    MyObject_convertToTypeWrapper,
+};
+
+JSClassDefinition MyObject_nullWrapperDefinition = {
+    0,
+    kJSClassAttributeNone,
+    
+    "MyObject",
+    NULL,
+    
+    NULL,
+    NULL,
+    
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+};
+
+static JSClassRef MyObject_class(JSContextRef context)
+{
+    UNUSED_PARAM(context);
+
+    static JSClassRef jsClass;
+    if (!jsClass) {
+        JSClassRef baseClass = JSClassCreate(&MyObject_definition);
+        MyObject_convertToTypeWrapperDefinition.parentClass = baseClass;
+        JSClassRef wrapperClass = JSClassCreate(&MyObject_convertToTypeWrapperDefinition);
+        MyObject_nullWrapperDefinition.parentClass = wrapperClass;
+        jsClass = JSClassCreate(&MyObject_nullWrapperDefinition);
+    }
+
+    return jsClass;
+}
+
+static JSValueRef PropertyCatchalls_getProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(exception);
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "x")) {
+        static size_t count;
+        if (count++ < 5)
+            return NULL;
+
+        // Swallow all .x gets after 5, returning null.
+        return JSValueMakeNull(context);
+    }
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "y")) {
+        static size_t count;
+        if (count++ < 5)
+            return NULL;
+
+        // Swallow all .y gets after 5, returning null.
+        return JSValueMakeNull(context);
+    }
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "z")) {
+        static size_t count;
+        if (count++ < 5)
+            return NULL;
+
+        // Swallow all .y gets after 5, returning null.
+        return JSValueMakeNull(context);
+    }
+
+    return NULL;
+}
+
+static bool PropertyCatchalls_setProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(exception);
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "x")) {
+        static size_t count;
+        if (count++ < 5)
+            return false;
+
+        // Swallow all .x sets after 4.
+        return true;
+    }
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "make_throw") || JSStringIsEqualToUTF8CString(propertyName, "0")) {
+        *exception = JSValueMakeNumber(context, 5);
+        return true;
+    }
+
+    return false;
+}
+
+static void PropertyCatchalls_getPropertyNames(JSContextRef context, JSObjectRef object, JSPropertyNameAccumulatorRef propertyNames)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+
+    static size_t count;
+    static const char* numbers[] = { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" };
+    
+    // Provide a property of a different name every time.
+    JSStringRef propertyName = JSStringCreateWithUTF8CString(numbers[count++ % 10]);
+    JSPropertyNameAccumulatorAddName(propertyNames, propertyName);
+    JSStringRelease(propertyName);
+}
+
+JSClassDefinition PropertyCatchalls_definition = {
+    0,
+    kJSClassAttributeNone,
+    
+    "PropertyCatchalls",
+    NULL,
+    
+    NULL,
+    NULL,
+    
+    NULL,
+    NULL,
+    NULL,
+    PropertyCatchalls_getProperty,
+    PropertyCatchalls_setProperty,
+    NULL,
+    PropertyCatchalls_getPropertyNames,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+};
+
+static JSClassRef PropertyCatchalls_class(JSContextRef context)
+{
+    UNUSED_PARAM(context);
+
+    static JSClassRef jsClass;
+    if (!jsClass)
+        jsClass = JSClassCreate(&PropertyCatchalls_definition);
+    
+    return jsClass;
+}
+
+static bool EvilExceptionObject_hasInstance(JSContextRef context, JSObjectRef constructor, JSValueRef possibleValue, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(constructor);
+    
+    JSStringRef hasInstanceName = JSStringCreateWithUTF8CString("hasInstance");
+    JSValueRef hasInstance = JSObjectGetProperty(context, constructor, hasInstanceName, exception);
+    JSStringRelease(hasInstanceName);
+    if (!hasInstance)
+        return false;
+    JSObjectRef function = JSValueToObject(context, hasInstance, exception);
+    JSValueRef result = JSObjectCallAsFunction(context, function, constructor, 1, &possibleValue, exception);
+    return result && JSValueToBoolean(context, result);
+}
+
+static JSValueRef EvilExceptionObject_convertToType(JSContextRef context, JSObjectRef object, JSType type, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(exception);
+    JSStringRef funcName;
+    switch (type) {
+    case kJSTypeNumber:
+        funcName = JSStringCreateWithUTF8CString("toNumber");
+        break;
+    case kJSTypeString:
+        funcName = JSStringCreateWithUTF8CString("toStringExplicit");
+        break;
+    default:
+        return JSValueMakeNull(context);
+    }
+    
+    JSValueRef func = JSObjectGetProperty(context, object, funcName, exception);
+    JSStringRelease(funcName);    
+    JSObjectRef function = JSValueToObject(context, func, exception);
+    if (!function)
+        return JSValueMakeNull(context);
+    JSValueRef value = JSObjectCallAsFunction(context, function, object, 0, NULL, exception);
+    if (!value) {
+        JSStringRef errorString = JSStringCreateWithUTF8CString("convertToType failed"); 
+        JSValueRef errorStringRef = JSValueMakeString(context, errorString);
+        JSStringRelease(errorString);
+        return errorStringRef;
+    }
+    return value;
+}
+
+JSClassDefinition EvilExceptionObject_definition = {
+    0,
+    kJSClassAttributeNone,
+
+    "EvilExceptionObject",
+    NULL,
+
+    NULL,
+    NULL,
+
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    EvilExceptionObject_hasInstance,
+    EvilExceptionObject_convertToType,
+};
+
+static JSClassRef EvilExceptionObject_class(JSContextRef context)
+{
+    UNUSED_PARAM(context);
+    
+    static JSClassRef jsClass;
+    if (!jsClass)
+        jsClass = JSClassCreate(&EvilExceptionObject_definition);
+    
+    return jsClass;
+}
+
+JSClassDefinition EmptyObject_definition = {
+    0,
+    kJSClassAttributeNone,
+    
+    NULL,
+    NULL,
+    
+    NULL,
+    NULL,
+    
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+};
+
+static JSClassRef EmptyObject_class(JSContextRef context)
+{
+    UNUSED_PARAM(context);
+    
+    static JSClassRef jsClass;
+    if (!jsClass)
+        jsClass = JSClassCreate(&EmptyObject_definition);
+    
+    return jsClass;
+}
+
+
+static JSValueRef Base_get(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(exception);
+
+    return JSValueMakeNumber(ctx, 1); // distinguish base get form derived get
+}
+
+static bool Base_set(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(value);
+
+    *exception = JSValueMakeNumber(ctx, 1); // distinguish base set from derived set
+    return true;
+}
+
+static JSValueRef Base_callAsFunction(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+    
+    return JSValueMakeNumber(ctx, 1); // distinguish base call from derived call
+}
+
+static JSValueRef Base_returnHardNull(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(ctx);
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+    
+    return 0; // should convert to undefined!
+}
+
+static JSStaticFunction Base_staticFunctions[] = {
+    { "baseProtoDup", NULL, kJSPropertyAttributeNone },
+    { "baseProto", Base_callAsFunction, kJSPropertyAttributeNone },
+    { "baseHardNull", Base_returnHardNull, kJSPropertyAttributeNone },
+    { 0, 0, 0 }
+};
+
+static JSStaticValue Base_staticValues[] = {
+    { "baseDup", Base_get, Base_set, kJSPropertyAttributeNone },
+    { "baseOnly", Base_get, Base_set, kJSPropertyAttributeNone },
+    { 0, 0, 0, 0 }
+};
+
+static bool TestInitializeFinalize;
+static void Base_initialize(JSContextRef context, JSObjectRef object)
+{
+    UNUSED_PARAM(context);
+
+    if (TestInitializeFinalize) {
+        ASSERT((void*)1 == JSObjectGetPrivate(object));
+        JSObjectSetPrivate(object, (void*)2);
+    }
+}
+
+static unsigned Base_didFinalize;
+static void Base_finalize(JSObjectRef object)
+{
+    UNUSED_PARAM(object);
+    if (TestInitializeFinalize) {
+        ASSERT((void*)4 == JSObjectGetPrivate(object));
+        Base_didFinalize = true;
+    }
+}
+
+static JSClassRef Base_class(JSContextRef context)
+{
+    UNUSED_PARAM(context);
+
+    static JSClassRef jsClass;
+    if (!jsClass) {
+        JSClassDefinition definition = kJSClassDefinitionEmpty;
+        definition.staticValues = Base_staticValues;
+        definition.staticFunctions = Base_staticFunctions;
+        definition.initialize = Base_initialize;
+        definition.finalize = Base_finalize;
+        jsClass = JSClassCreate(&definition);
+    }
+    return jsClass;
+}
+
+static JSValueRef Derived_get(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(exception);
+
+    return JSValueMakeNumber(ctx, 2); // distinguish base get form derived get
+}
+
+static bool Derived_set(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(ctx);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(value);
+
+    *exception = JSValueMakeNumber(ctx, 2); // distinguish base set from derived set
+    return true;
+}
+
+static JSValueRef Derived_callAsFunction(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+    
+    return JSValueMakeNumber(ctx, 2); // distinguish base call from derived call
+}
+
+static JSStaticFunction Derived_staticFunctions[] = {
+    { "protoOnly", Derived_callAsFunction, kJSPropertyAttributeNone },
+    { "protoDup", NULL, kJSPropertyAttributeNone },
+    { "baseProtoDup", Derived_callAsFunction, kJSPropertyAttributeNone },
+    { 0, 0, 0 }
+};
+
+static JSStaticValue Derived_staticValues[] = {
+    { "derivedOnly", Derived_get, Derived_set, kJSPropertyAttributeNone },
+    { "protoDup", Derived_get, Derived_set, kJSPropertyAttributeNone },
+    { "baseDup", Derived_get, Derived_set, kJSPropertyAttributeNone },
+    { 0, 0, 0, 0 }
+};
+
+static void Derived_initialize(JSContextRef context, JSObjectRef object)
+{
+    UNUSED_PARAM(context);
+
+    if (TestInitializeFinalize) {
+        ASSERT((void*)2 == JSObjectGetPrivate(object));
+        JSObjectSetPrivate(object, (void*)3);
+    }
+}
+
+static void Derived_finalize(JSObjectRef object)
+{
+    if (TestInitializeFinalize) {
+        ASSERT((void*)3 == JSObjectGetPrivate(object));
+        JSObjectSetPrivate(object, (void*)4);
+    }
+}
+
+static JSClassRef Derived_class(JSContextRef context)
+{
+    static JSClassRef jsClass;
+    if (!jsClass) {
+        JSClassDefinition definition = kJSClassDefinitionEmpty;
+        definition.parentClass = Base_class(context);
+        definition.staticValues = Derived_staticValues;
+        definition.staticFunctions = Derived_staticFunctions;
+        definition.initialize = Derived_initialize;
+        definition.finalize = Derived_finalize;
+        jsClass = JSClassCreate(&definition);
+    }
+    return jsClass;
+}
+
+static JSClassRef Derived2_class(JSContextRef context)
+{
+    static JSClassRef jsClass;
+    if (!jsClass) {
+        JSClassDefinition definition = kJSClassDefinitionEmpty;
+        definition.parentClass = Derived_class(context);
+        jsClass = JSClassCreate(&definition);
+    }
+    return jsClass;
+}
+
+static JSValueRef print_callAsFunction(JSContextRef ctx, JSObjectRef functionObject, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(functionObject);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(exception);
+
+    ASSERT(JSContextGetGlobalContext(ctx) == context);
+    
+    if (argumentCount > 0) {
+        JSStringRef string = JSValueToStringCopy(ctx, arguments[0], NULL);
+        size_t sizeUTF8 = JSStringGetMaximumUTF8CStringSize(string);
+        char* stringUTF8 = (char*)malloc(sizeUTF8);
+        JSStringGetUTF8CString(string, stringUTF8, sizeUTF8);
+        printf("%s\n", stringUTF8);
+        free(stringUTF8);
+        JSStringRelease(string);
+    }
+    
+    return JSValueMakeUndefined(ctx);
+}
+
+static JSObjectRef myConstructor_callAsConstructor(JSContextRef context, JSObjectRef constructorObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(constructorObject);
+    UNUSED_PARAM(exception);
+    
+    JSObjectRef result = JSObjectMake(context, NULL, NULL);
+    if (argumentCount > 0) {
+        JSStringRef value = JSStringCreateWithUTF8CString("value");
+        JSObjectSetProperty(context, result, value, arguments[0], kJSPropertyAttributeNone, NULL);
+        JSStringRelease(value);
+    }
+    
+    return result;
+}
+
+static JSObjectRef myBadConstructor_callAsConstructor(JSContextRef context, JSObjectRef constructorObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(constructorObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+    
+    return 0;
+}
+
+
+static void globalObject_initialize(JSContextRef context, JSObjectRef object)
+{
+    UNUSED_PARAM(object);
+    // Ensure that an execution context is passed in
+    ASSERT(context);
+
+    JSObjectRef globalObject = JSContextGetGlobalObject(context);
+    ASSERT(globalObject);
+
+    // Ensure that the standard global properties have been set on the global object
+    JSStringRef array = JSStringCreateWithUTF8CString("Array");
+    JSObjectRef arrayConstructor = JSValueToObject(context, JSObjectGetProperty(context, globalObject, array, NULL), NULL);
+    JSStringRelease(array);
+
+    UNUSED_PARAM(arrayConstructor);
+    ASSERT(arrayConstructor);
+}
+
+static JSValueRef globalObject_get(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(exception);
+
+    return JSValueMakeNumber(ctx, 3);
+}
+
+static bool globalObject_set(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(value);
+
+    *exception = JSValueMakeNumber(ctx, 3);
+    return true;
+}
+
+static JSValueRef globalObject_call(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+
+    return JSValueMakeNumber(ctx, 3);
+}
+
+static JSValueRef functionGC(JSContextRef context, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+    JSGarbageCollect(context);
+    return JSValueMakeUndefined(context);
+}
+
+static JSStaticValue globalObject_staticValues[] = {
+    { "globalStaticValue", globalObject_get, globalObject_set, kJSPropertyAttributeNone },
+    { 0, 0, 0, 0 }
+};
+
+static JSStaticFunction globalObject_staticFunctions[] = {
+    { "globalStaticFunction", globalObject_call, kJSPropertyAttributeNone },
+    { "globalStaticFunction2", globalObject_call, kJSPropertyAttributeNone },
+    { "gc", functionGC, kJSPropertyAttributeNone },
+    { 0, 0, 0 }
+};
+
+static char* createStringWithContentsOfFile(const char* fileName);
+
+static void testInitializeFinalize()
+{
+    JSObjectRef o = JSObjectMake(context, Derived_class(context), (void*)1);
+    UNUSED_PARAM(o);
+    ASSERT(JSObjectGetPrivate(o) == (void*)3);
+}
+
+static JSValueRef jsNumberValue =  NULL;
+
+static JSObjectRef aHeapRef = NULL;
+
+static void makeGlobalNumberValue(JSContextRef context) {
+    JSValueRef v = JSValueMakeNumber(context, 420);
+    JSValueProtect(context, v);
+    jsNumberValue = v;
+    v = NULL;
+}
+
+bool assertTrue(bool value, const char* message)
+{
+    if (!value) {
+        if (message)
+            fprintf(stderr, "assertTrue failed: '%s'\n", message);
+        else
+            fprintf(stderr, "assertTrue failed.\n");
+        failed = 1;
+    }
+    return value;
+}
+
+static bool checkForCycleInPrototypeChain()
+{
+    bool result = true;
+    JSGlobalContextRef context = JSGlobalContextCreate(0);
+    JSObjectRef object1 = JSObjectMake(context, /* jsClass */ 0, /* data */ 0);
+    JSObjectRef object2 = JSObjectMake(context, /* jsClass */ 0, /* data */ 0);
+    JSObjectRef object3 = JSObjectMake(context, /* jsClass */ 0, /* data */ 0);
+
+    JSObjectSetPrototype(context, object1, JSValueMakeNull(context));
+    ASSERT(JSValueIsNull(context, JSObjectGetPrototype(context, object1)));
+
+    // object1 -> object1
+    JSObjectSetPrototype(context, object1, object1);
+    result &= assertTrue(JSValueIsNull(context, JSObjectGetPrototype(context, object1)), "It is possible to assign self as a prototype");
+
+    // object1 -> object2 -> object1
+    JSObjectSetPrototype(context, object2, object1);
+    ASSERT(JSValueIsStrictEqual(context, JSObjectGetPrototype(context, object2), object1));
+    JSObjectSetPrototype(context, object1, object2);
+    result &= assertTrue(JSValueIsNull(context, JSObjectGetPrototype(context, object1)), "It is possible to close a prototype chain cycle");
+
+    // object1 -> object2 -> object3 -> object1
+    JSObjectSetPrototype(context, object2, object3);
+    ASSERT(JSValueIsStrictEqual(context, JSObjectGetPrototype(context, object2), object3));
+    JSObjectSetPrototype(context, object1, object2);
+    ASSERT(JSValueIsStrictEqual(context, JSObjectGetPrototype(context, object1), object2));
+    JSObjectSetPrototype(context, object3, object1);
+    result &= assertTrue(!JSValueIsStrictEqual(context, JSObjectGetPrototype(context, object3), object1), "It is possible to close a prototype chain cycle");
+
+    JSValueRef exception;
+    JSStringRef code = JSStringCreateWithUTF8CString("o = { }; p = { }; o.__proto__ = p; p.__proto__ = o");
+    JSStringRef file = JSStringCreateWithUTF8CString("");
+    result &= assertTrue(!JSEvaluateScript(context, code, /* thisObject*/ 0, file, 1, &exception)
+                         , "An exception should be thrown");
+
+    JSStringRelease(code);
+    JSStringRelease(file);
+    JSGlobalContextRelease(context);
+    return result;
+}
+
+static JSValueRef valueToObjectExceptionCallAsFunction(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    JSValueRef jsUndefined = JSValueMakeUndefined(JSContextGetGlobalContext(ctx));
+    JSValueToObject(JSContextGetGlobalContext(ctx), jsUndefined, exception);
+    
+    return JSValueMakeUndefined(ctx);
+}
+static bool valueToObjectExceptionTest()
+{
+    JSGlobalContextRef testContext;
+    JSClassDefinition globalObjectClassDefinition = kJSClassDefinitionEmpty;
+    globalObjectClassDefinition.initialize = globalObject_initialize;
+    globalObjectClassDefinition.staticValues = globalObject_staticValues;
+    globalObjectClassDefinition.staticFunctions = globalObject_staticFunctions;
+    globalObjectClassDefinition.attributes = kJSClassAttributeNoAutomaticPrototype;
+    JSClassRef globalObjectClass = JSClassCreate(&globalObjectClassDefinition);
+    testContext = JSGlobalContextCreateInGroup(NULL, globalObjectClass);
+    JSObjectRef globalObject = JSContextGetGlobalObject(testContext);
+
+    JSStringRef valueToObject = JSStringCreateWithUTF8CString("valueToObject");
+    JSObjectRef valueToObjectFunction = JSObjectMakeFunctionWithCallback(testContext, valueToObject, valueToObjectExceptionCallAsFunction);
+    JSObjectSetProperty(testContext, globalObject, valueToObject, valueToObjectFunction, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(valueToObject);
+
+    JSStringRef test = JSStringCreateWithUTF8CString("valueToObject();");
+    JSEvaluateScript(testContext, test, NULL, NULL, 1, NULL);
+    
+    JSStringRelease(test);
+    JSClassRelease(globalObjectClass);
+    JSGlobalContextRelease(testContext);
+    
+    return true;
+}
+
+static bool globalContextNameTest()
+{
+    bool result = true;
+    JSGlobalContextRef context = JSGlobalContextCreate(0);
+
+    JSStringRef str = JSGlobalContextCopyName(context);
+    result &= assertTrue(!str, "Default context name is NULL");
+
+    JSStringRef name1 = JSStringCreateWithUTF8CString("name1");
+    JSStringRef name2 = JSStringCreateWithUTF8CString("name2");
+
+    JSGlobalContextSetName(context, name1);
+    JSStringRef fetchName1 = JSGlobalContextCopyName(context);
+    JSGlobalContextSetName(context, name2);
+    JSStringRef fetchName2 = JSGlobalContextCopyName(context);
+    JSGlobalContextSetName(context, NULL);
+    JSStringRef fetchName3 = JSGlobalContextCopyName(context);
+
+    result &= assertTrue(JSStringIsEqual(name1, fetchName1), "Unexpected Context name");
+    result &= assertTrue(JSStringIsEqual(name2, fetchName2), "Unexpected Context name");
+    result &= assertTrue(!JSStringIsEqual(fetchName1, fetchName2), "Unexpected Context name");
+    result &= assertTrue(!fetchName3, "Unexpected Context name");
+
+    JSStringRelease(name1);
+    JSStringRelease(name2);
+    JSStringRelease(fetchName1);
+    JSStringRelease(fetchName2);
+
+    return result;
+}
+
+static void checkConstnessInJSObjectNames()
+{
+    JSStaticFunction fun;
+    fun.name = "something";
+    JSStaticValue val;
+    val.name = "something";
+}
+
+int main(int argc, char* argv[])
+{
+#if OS(WINDOWS)
+    // Cygwin calls ::SetErrorMode(SEM_FAILCRITICALERRORS), which we will inherit. This is bad for
+    // testing/debugging, as it causes the post-mortem debugger not to be invoked. We reset the
+    // error mode here to work around Cygwin's behavior. See .
+    ::SetErrorMode(0);
+#endif
+
+    testCompareAndSwap();
+
+#if JSC_OBJC_API_ENABLED
+    testObjectiveCAPI();
+#endif
+
+    const char *scriptPath = "testapi.js";
+    if (argc > 1) {
+        scriptPath = argv[1];
+    }
+    
+    // Test garbage collection with a fresh context
+    context = JSGlobalContextCreateInGroup(NULL, NULL);
+    TestInitializeFinalize = true;
+    testInitializeFinalize();
+    JSGlobalContextRelease(context);
+    TestInitializeFinalize = false;
+
+    ASSERT(Base_didFinalize);
+
+    JSClassDefinition globalObjectClassDefinition = kJSClassDefinitionEmpty;
+    globalObjectClassDefinition.initialize = globalObject_initialize;
+    globalObjectClassDefinition.staticValues = globalObject_staticValues;
+    globalObjectClassDefinition.staticFunctions = globalObject_staticFunctions;
+    globalObjectClassDefinition.attributes = kJSClassAttributeNoAutomaticPrototype;
+    JSClassRef globalObjectClass = JSClassCreate(&globalObjectClassDefinition);
+    context = JSGlobalContextCreateInGroup(NULL, globalObjectClass);
+
+    JSContextGroupRef contextGroup = JSContextGetGroup(context);
+    
+    JSGlobalContextRetain(context);
+    JSGlobalContextRelease(context);
+    ASSERT(JSContextGetGlobalContext(context) == context);
+    
+    JSReportExtraMemoryCost(context, 0);
+    JSReportExtraMemoryCost(context, 1);
+    JSReportExtraMemoryCost(context, 1024);
+
+    JSObjectRef globalObject = JSContextGetGlobalObject(context);
+    ASSERT(JSValueIsObject(context, globalObject));
+    
+    JSValueRef jsUndefined = JSValueMakeUndefined(context);
+    JSValueRef jsNull = JSValueMakeNull(context);
+    JSValueRef jsTrue = JSValueMakeBoolean(context, true);
+    JSValueRef jsFalse = JSValueMakeBoolean(context, false);
+    JSValueRef jsZero = JSValueMakeNumber(context, 0);
+    JSValueRef jsOne = JSValueMakeNumber(context, 1);
+    JSValueRef jsOneThird = JSValueMakeNumber(context, 1.0 / 3.0);
+    JSObjectRef jsObjectNoProto = JSObjectMake(context, NULL, NULL);
+    JSObjectSetPrototype(context, jsObjectNoProto, JSValueMakeNull(context));
+
+    JSObjectSetPrivate(globalObject, (void*)123);
+    if (JSObjectGetPrivate(globalObject) != (void*)123) {
+        printf("FAIL: Didn't return private data when set by JSObjectSetPrivate().\n");
+        failed = 1;
+    } else
+        printf("PASS: returned private data when set by JSObjectSetPrivate().\n");
+
+    // FIXME: test funny utf8 characters
+    JSStringRef jsEmptyIString = JSStringCreateWithUTF8CString("");
+    JSValueRef jsEmptyString = JSValueMakeString(context, jsEmptyIString);
+    
+    JSStringRef jsOneIString = JSStringCreateWithUTF8CString("1");
+    JSValueRef jsOneString = JSValueMakeString(context, jsOneIString);
+
+    UniChar singleUniChar = 65; // Capital A
+    CFMutableStringRef cfString = 
+        CFStringCreateMutableWithExternalCharactersNoCopy(kCFAllocatorDefault,
+                                                          &singleUniChar,
+                                                          1,
+                                                          1,
+                                                          kCFAllocatorNull);
+
+    JSStringRef jsCFIString = JSStringCreateWithCFString(cfString);
+    JSValueRef jsCFString = JSValueMakeString(context, jsCFIString);
+    
+    CFStringRef cfEmptyString = CFStringCreateWithCString(kCFAllocatorDefault, "", kCFStringEncodingUTF8);
+    
+    JSStringRef jsCFEmptyIString = JSStringCreateWithCFString(cfEmptyString);
+    JSValueRef jsCFEmptyString = JSValueMakeString(context, jsCFEmptyIString);
+
+    CFIndex cfStringLength = CFStringGetLength(cfString);
+    UniChar* buffer = (UniChar*)malloc(cfStringLength * sizeof(UniChar));
+    CFStringGetCharacters(cfString, 
+                          CFRangeMake(0, cfStringLength), 
+                          buffer);
+    JSStringRef jsCFIStringWithCharacters = JSStringCreateWithCharacters((JSChar*)buffer, cfStringLength);
+    JSValueRef jsCFStringWithCharacters = JSValueMakeString(context, jsCFIStringWithCharacters);
+    
+    JSStringRef jsCFEmptyIStringWithCharacters = JSStringCreateWithCharacters((JSChar*)buffer, CFStringGetLength(cfEmptyString));
+    free(buffer);
+    JSValueRef jsCFEmptyStringWithCharacters = JSValueMakeString(context, jsCFEmptyIStringWithCharacters);
+
+    JSChar constantString[] = { 'H', 'e', 'l', 'l', 'o', };
+    JSStringRef constantStringRef = JSStringCreateWithCharactersNoCopy(constantString, sizeof(constantString) / sizeof(constantString[0]));
+    ASSERT(JSStringGetCharactersPtr(constantStringRef) == constantString);
+    JSStringRelease(constantStringRef);
+
+    ASSERT(JSValueGetType(context, NULL) == kJSTypeNull);
+    ASSERT(JSValueGetType(context, jsUndefined) == kJSTypeUndefined);
+    ASSERT(JSValueGetType(context, jsNull) == kJSTypeNull);
+    ASSERT(JSValueGetType(context, jsTrue) == kJSTypeBoolean);
+    ASSERT(JSValueGetType(context, jsFalse) == kJSTypeBoolean);
+    ASSERT(JSValueGetType(context, jsZero) == kJSTypeNumber);
+    ASSERT(JSValueGetType(context, jsOne) == kJSTypeNumber);
+    ASSERT(JSValueGetType(context, jsOneThird) == kJSTypeNumber);
+    ASSERT(JSValueGetType(context, jsEmptyString) == kJSTypeString);
+    ASSERT(JSValueGetType(context, jsOneString) == kJSTypeString);
+    ASSERT(JSValueGetType(context, jsCFString) == kJSTypeString);
+    ASSERT(JSValueGetType(context, jsCFStringWithCharacters) == kJSTypeString);
+    ASSERT(JSValueGetType(context, jsCFEmptyString) == kJSTypeString);
+    ASSERT(JSValueGetType(context, jsCFEmptyStringWithCharacters) == kJSTypeString);
+
+    ASSERT(!JSValueIsBoolean(context, NULL));
+    ASSERT(!JSValueIsObject(context, NULL));
+    ASSERT(!JSValueIsArray(context, NULL));
+    ASSERT(!JSValueIsDate(context, NULL));
+    ASSERT(!JSValueIsString(context, NULL));
+    ASSERT(!JSValueIsNumber(context, NULL));
+    ASSERT(!JSValueIsUndefined(context, NULL));
+    ASSERT(JSValueIsNull(context, NULL));
+    ASSERT(!JSObjectCallAsFunction(context, NULL, NULL, 0, NULL, NULL));
+    ASSERT(!JSObjectCallAsConstructor(context, NULL, 0, NULL, NULL));
+    ASSERT(!JSObjectIsConstructor(context, NULL));
+    ASSERT(!JSObjectIsFunction(context, NULL));
+
+    JSStringRef nullString = JSStringCreateWithUTF8CString(0);
+    const JSChar* characters = JSStringGetCharactersPtr(nullString);
+    if (characters) {
+        printf("FAIL: Didn't return null when accessing character pointer of a null String.\n");
+        failed = 1;
+    } else
+        printf("PASS: returned null when accessing character pointer of a null String.\n");
+
+    JSStringRef emptyString = JSStringCreateWithCFString(CFSTR(""));
+    characters = JSStringGetCharactersPtr(emptyString);
+    if (!characters) {
+        printf("FAIL: Returned null when accessing character pointer of an empty String.\n");
+        failed = 1;
+    } else
+        printf("PASS: returned empty when accessing character pointer of an empty String.\n");
+
+    size_t length = JSStringGetLength(nullString);
+    if (length) {
+        printf("FAIL: Didn't return 0 length for null String.\n");
+        failed = 1;
+    } else
+        printf("PASS: returned 0 length for null String.\n");
+    JSStringRelease(nullString);
+
+    length = JSStringGetLength(emptyString);
+    if (length) {
+        printf("FAIL: Didn't return 0 length for empty String.\n");
+        failed = 1;
+    } else
+        printf("PASS: returned 0 length for empty String.\n");
+    JSStringRelease(emptyString);
+
+    JSObjectRef propertyCatchalls = JSObjectMake(context, PropertyCatchalls_class(context), NULL);
+    JSStringRef propertyCatchallsString = JSStringCreateWithUTF8CString("PropertyCatchalls");
+    JSObjectSetProperty(context, globalObject, propertyCatchallsString, propertyCatchalls, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(propertyCatchallsString);
+
+    JSObjectRef myObject = JSObjectMake(context, MyObject_class(context), NULL);
+    JSStringRef myObjectIString = JSStringCreateWithUTF8CString("MyObject");
+    JSObjectSetProperty(context, globalObject, myObjectIString, myObject, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(myObjectIString);
+    
+    JSObjectRef EvilExceptionObject = JSObjectMake(context, EvilExceptionObject_class(context), NULL);
+    JSStringRef EvilExceptionObjectIString = JSStringCreateWithUTF8CString("EvilExceptionObject");
+    JSObjectSetProperty(context, globalObject, EvilExceptionObjectIString, EvilExceptionObject, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(EvilExceptionObjectIString);
+    
+    JSObjectRef EmptyObject = JSObjectMake(context, EmptyObject_class(context), NULL);
+    JSStringRef EmptyObjectIString = JSStringCreateWithUTF8CString("EmptyObject");
+    JSObjectSetProperty(context, globalObject, EmptyObjectIString, EmptyObject, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(EmptyObjectIString);
+    
+    JSStringRef lengthStr = JSStringCreateWithUTF8CString("length");
+    JSObjectRef aStackRef = JSObjectMakeArray(context, 0, 0, 0);
+    aHeapRef = aStackRef;
+    JSObjectSetProperty(context, aHeapRef, lengthStr, JSValueMakeNumber(context, 10), 0, 0);
+    JSStringRef privatePropertyName = JSStringCreateWithUTF8CString("privateProperty");
+    if (!JSObjectSetPrivateProperty(context, myObject, privatePropertyName, aHeapRef)) {
+        printf("FAIL: Could not set private property.\n");
+        failed = 1;
+    } else
+        printf("PASS: Set private property.\n");
+    aStackRef = 0;
+    if (JSObjectSetPrivateProperty(context, aHeapRef, privatePropertyName, aHeapRef)) {
+        printf("FAIL: JSObjectSetPrivateProperty should fail on non-API objects.\n");
+        failed = 1;
+    } else
+        printf("PASS: Did not allow JSObjectSetPrivateProperty on a non-API object.\n");
+    if (JSObjectGetPrivateProperty(context, myObject, privatePropertyName) != aHeapRef) {
+        printf("FAIL: Could not retrieve private property.\n");
+        failed = 1;
+    } else
+        printf("PASS: Retrieved private property.\n");
+    if (JSObjectGetPrivateProperty(context, aHeapRef, privatePropertyName)) {
+        printf("FAIL: JSObjectGetPrivateProperty should return NULL when called on a non-API object.\n");
+        failed = 1;
+    } else
+        printf("PASS: JSObjectGetPrivateProperty return NULL.\n");
+
+    if (JSObjectGetProperty(context, myObject, privatePropertyName, 0) == aHeapRef) {
+        printf("FAIL: Accessed private property through ordinary property lookup.\n");
+        failed = 1;
+    } else
+        printf("PASS: Cannot access private property through ordinary property lookup.\n");
+
+    JSGarbageCollect(context);
+
+    for (int i = 0; i < 10000; i++)
+        JSObjectMake(context, 0, 0);
+
+    aHeapRef = JSValueToObject(context, JSObjectGetPrivateProperty(context, myObject, privatePropertyName), 0);
+    if (JSValueToNumber(context, JSObjectGetProperty(context, aHeapRef, lengthStr, 0), 0) != 10) {
+        printf("FAIL: Private property has been collected.\n");
+        failed = 1;
+    } else
+        printf("PASS: Private property does not appear to have been collected.\n");
+    JSStringRelease(lengthStr);
+
+    if (!JSObjectSetPrivateProperty(context, myObject, privatePropertyName, 0)) {
+        printf("FAIL: Could not set private property to NULL.\n");
+        failed = 1;
+    } else
+        printf("PASS: Set private property to NULL.\n");
+    if (JSObjectGetPrivateProperty(context, myObject, privatePropertyName)) {
+        printf("FAIL: Could not retrieve private property.\n");
+        failed = 1;
+    } else
+        printf("PASS: Retrieved private property.\n");
+
+    JSStringRef nullJSON = JSStringCreateWithUTF8CString(0);
+    JSValueRef nullJSONObject = JSValueMakeFromJSONString(context, nullJSON);
+    if (nullJSONObject) {
+        printf("FAIL: Did not parse null String as JSON correctly\n");
+        failed = 1;
+    } else
+        printf("PASS: Parsed null String as JSON correctly.\n");
+    JSStringRelease(nullJSON);
+
+    JSStringRef validJSON = JSStringCreateWithUTF8CString("{\"aProperty\":true}");
+    JSValueRef jsonObject = JSValueMakeFromJSONString(context, validJSON);
+    JSStringRelease(validJSON);
+    if (!JSValueIsObject(context, jsonObject)) {
+        printf("FAIL: Did not parse valid JSON correctly\n");
+        failed = 1;
+    } else
+        printf("PASS: Parsed valid JSON string.\n");
+    JSStringRef propertyName = JSStringCreateWithUTF8CString("aProperty");
+    assertEqualsAsBoolean(JSObjectGetProperty(context, JSValueToObject(context, jsonObject, 0), propertyName, 0), true);
+    JSStringRelease(propertyName);
+    JSStringRef invalidJSON = JSStringCreateWithUTF8CString("fail!");
+    if (JSValueMakeFromJSONString(context, invalidJSON)) {
+        printf("FAIL: Should return null for invalid JSON data\n");
+        failed = 1;
+    } else
+        printf("PASS: Correctly returned null for invalid JSON data.\n");
+    JSValueRef exception;
+    JSStringRef str = JSValueCreateJSONString(context, jsonObject, 0, 0);
+    if (!JSStringIsEqualToUTF8CString(str, "{\"aProperty\":true}")) {
+        printf("FAIL: Did not correctly serialise with indent of 0.\n");
+        failed = 1;
+    } else
+        printf("PASS: Correctly serialised with indent of 0.\n");
+    JSStringRelease(str);
+
+    str = JSValueCreateJSONString(context, jsonObject, 4, 0);
+    if (!JSStringIsEqualToUTF8CString(str, "{\n    \"aProperty\": true\n}")) {
+        printf("FAIL: Did not correctly serialise with indent of 4.\n");
+        failed = 1;
+    } else
+        printf("PASS: Correctly serialised with indent of 4.\n");
+    JSStringRelease(str);
+
+    str = JSStringCreateWithUTF8CString("({get a(){ throw '';}})");
+    JSValueRef unstringifiableObj = JSEvaluateScript(context, str, NULL, NULL, 1, NULL);
+    JSStringRelease(str);
+    
+    str = JSValueCreateJSONString(context, unstringifiableObj, 4, 0);
+    if (str) {
+        printf("FAIL: Didn't return null when attempting to serialize unserializable value.\n");
+        JSStringRelease(str);
+        failed = 1;
+    } else
+        printf("PASS: returned null when attempting to serialize unserializable value.\n");
+    
+    str = JSValueCreateJSONString(context, unstringifiableObj, 4, &exception);
+    if (str) {
+        printf("FAIL: Didn't return null when attempting to serialize unserializable value.\n");
+        JSStringRelease(str);
+        failed = 1;
+    } else
+        printf("PASS: returned null when attempting to serialize unserializable value.\n");
+    if (!exception) {
+        printf("FAIL: Did not set exception on serialisation error\n");
+        failed = 1;
+    } else
+        printf("PASS: set exception on serialisation error\n");
+    // Conversions that throw exceptions
+    exception = NULL;
+    ASSERT(NULL == JSValueToObject(context, jsNull, &exception));
+    ASSERT(exception);
+    
+    exception = NULL;
+    // FIXME  - On i386 the isnan(double) macro tries to map to the isnan(float) function,
+    // causing a build break with -Wshorten-64-to-32 enabled.  The issue is known by the appropriate team.
+    // After that's resolved, we can remove these casts
+    ASSERT(isnan((float)JSValueToNumber(context, jsObjectNoProto, &exception)));
+    ASSERT(exception);
+
+    exception = NULL;
+    ASSERT(!JSValueToStringCopy(context, jsObjectNoProto, &exception));
+    ASSERT(exception);
+    
+    ASSERT(JSValueToBoolean(context, myObject));
+    
+    exception = NULL;
+    ASSERT(!JSValueIsEqual(context, jsObjectNoProto, JSValueMakeNumber(context, 1), &exception));
+    ASSERT(exception);
+    
+    exception = NULL;
+    JSObjectGetPropertyAtIndex(context, myObject, 0, &exception);
+    ASSERT(1 == JSValueToNumber(context, exception, NULL));
+
+    assertEqualsAsBoolean(jsUndefined, false);
+    assertEqualsAsBoolean(jsNull, false);
+    assertEqualsAsBoolean(jsTrue, true);
+    assertEqualsAsBoolean(jsFalse, false);
+    assertEqualsAsBoolean(jsZero, false);
+    assertEqualsAsBoolean(jsOne, true);
+    assertEqualsAsBoolean(jsOneThird, true);
+    assertEqualsAsBoolean(jsEmptyString, false);
+    assertEqualsAsBoolean(jsOneString, true);
+    assertEqualsAsBoolean(jsCFString, true);
+    assertEqualsAsBoolean(jsCFStringWithCharacters, true);
+    assertEqualsAsBoolean(jsCFEmptyString, false);
+    assertEqualsAsBoolean(jsCFEmptyStringWithCharacters, false);
+    
+    assertEqualsAsNumber(jsUndefined, nan(""));
+    assertEqualsAsNumber(jsNull, 0);
+    assertEqualsAsNumber(jsTrue, 1);
+    assertEqualsAsNumber(jsFalse, 0);
+    assertEqualsAsNumber(jsZero, 0);
+    assertEqualsAsNumber(jsOne, 1);
+    assertEqualsAsNumber(jsOneThird, 1.0 / 3.0);
+    assertEqualsAsNumber(jsEmptyString, 0);
+    assertEqualsAsNumber(jsOneString, 1);
+    assertEqualsAsNumber(jsCFString, nan(""));
+    assertEqualsAsNumber(jsCFStringWithCharacters, nan(""));
+    assertEqualsAsNumber(jsCFEmptyString, 0);
+    assertEqualsAsNumber(jsCFEmptyStringWithCharacters, 0);
+    ASSERT(sizeof(JSChar) == sizeof(UniChar));
+    
+    assertEqualsAsCharactersPtr(jsUndefined, "undefined");
+    assertEqualsAsCharactersPtr(jsNull, "null");
+    assertEqualsAsCharactersPtr(jsTrue, "true");
+    assertEqualsAsCharactersPtr(jsFalse, "false");
+    assertEqualsAsCharactersPtr(jsZero, "0");
+    assertEqualsAsCharactersPtr(jsOne, "1");
+    assertEqualsAsCharactersPtr(jsOneThird, "0.3333333333333333");
+    assertEqualsAsCharactersPtr(jsEmptyString, "");
+    assertEqualsAsCharactersPtr(jsOneString, "1");
+    assertEqualsAsCharactersPtr(jsCFString, "A");
+    assertEqualsAsCharactersPtr(jsCFStringWithCharacters, "A");
+    assertEqualsAsCharactersPtr(jsCFEmptyString, "");
+    assertEqualsAsCharactersPtr(jsCFEmptyStringWithCharacters, "");
+    
+    assertEqualsAsUTF8String(jsUndefined, "undefined");
+    assertEqualsAsUTF8String(jsNull, "null");
+    assertEqualsAsUTF8String(jsTrue, "true");
+    assertEqualsAsUTF8String(jsFalse, "false");
+    assertEqualsAsUTF8String(jsZero, "0");
+    assertEqualsAsUTF8String(jsOne, "1");
+    assertEqualsAsUTF8String(jsOneThird, "0.3333333333333333");
+    assertEqualsAsUTF8String(jsEmptyString, "");
+    assertEqualsAsUTF8String(jsOneString, "1");
+    assertEqualsAsUTF8String(jsCFString, "A");
+    assertEqualsAsUTF8String(jsCFStringWithCharacters, "A");
+    assertEqualsAsUTF8String(jsCFEmptyString, "");
+    assertEqualsAsUTF8String(jsCFEmptyStringWithCharacters, "");
+    
+    checkConstnessInJSObjectNames();
+    
+    ASSERT(JSValueIsStrictEqual(context, jsTrue, jsTrue));
+    ASSERT(!JSValueIsStrictEqual(context, jsOne, jsOneString));
+
+    ASSERT(JSValueIsEqual(context, jsOne, jsOneString, NULL));
+    ASSERT(!JSValueIsEqual(context, jsTrue, jsFalse, NULL));
+    
+    CFStringRef cfJSString = JSStringCopyCFString(kCFAllocatorDefault, jsCFIString);
+    CFStringRef cfJSEmptyString = JSStringCopyCFString(kCFAllocatorDefault, jsCFEmptyIString);
+    ASSERT(CFEqual(cfJSString, cfString));
+    ASSERT(CFEqual(cfJSEmptyString, cfEmptyString));
+    CFRelease(cfJSString);
+    CFRelease(cfJSEmptyString);
+
+    CFRelease(cfString);
+    CFRelease(cfEmptyString);
+    
+    jsGlobalValue = JSObjectMake(context, NULL, NULL);
+    makeGlobalNumberValue(context);
+    JSValueProtect(context, jsGlobalValue);
+    JSGarbageCollect(context);
+    ASSERT(JSValueIsObject(context, jsGlobalValue));
+    JSValueUnprotect(context, jsGlobalValue);
+    JSValueUnprotect(context, jsNumberValue);
+
+    JSStringRef goodSyntax = JSStringCreateWithUTF8CString("x = 1;");
+    const char* badSyntaxConstant = "x := 1;";
+    JSStringRef badSyntax = JSStringCreateWithUTF8CString(badSyntaxConstant);
+    ASSERT(JSCheckScriptSyntax(context, goodSyntax, NULL, 0, NULL));
+    ASSERT(!JSCheckScriptSyntax(context, badSyntax, NULL, 0, NULL));
+    ASSERT(!JSScriptCreateFromString(contextGroup, 0, 0, badSyntax, 0, 0));
+    ASSERT(!JSScriptCreateReferencingImmortalASCIIText(contextGroup, 0, 0, badSyntaxConstant, strlen(badSyntaxConstant), 0, 0));
+
+    JSValueRef result;
+    JSValueRef v;
+    JSObjectRef o;
+    JSStringRef string;
+
+    result = JSEvaluateScript(context, goodSyntax, NULL, NULL, 1, NULL);
+    ASSERT(result);
+    ASSERT(JSValueIsEqual(context, result, jsOne, NULL));
+
+    exception = NULL;
+    result = JSEvaluateScript(context, badSyntax, NULL, NULL, 1, &exception);
+    ASSERT(!result);
+    ASSERT(JSValueIsObject(context, exception));
+    
+    JSStringRef array = JSStringCreateWithUTF8CString("Array");
+    JSObjectRef arrayConstructor = JSValueToObject(context, JSObjectGetProperty(context, globalObject, array, NULL), NULL);
+    JSStringRelease(array);
+    result = JSObjectCallAsConstructor(context, arrayConstructor, 0, NULL, NULL);
+    ASSERT(result);
+    ASSERT(JSValueIsObject(context, result));
+    ASSERT(JSValueIsInstanceOfConstructor(context, result, arrayConstructor, NULL));
+    ASSERT(!JSValueIsInstanceOfConstructor(context, JSValueMakeNull(context), arrayConstructor, NULL));
+
+    o = JSValueToObject(context, result, NULL);
+    exception = NULL;
+    ASSERT(JSValueIsUndefined(context, JSObjectGetPropertyAtIndex(context, o, 0, &exception)));
+    ASSERT(!exception);
+    
+    JSObjectSetPropertyAtIndex(context, o, 0, JSValueMakeNumber(context, 1), &exception);
+    ASSERT(!exception);
+    
+    exception = NULL;
+    ASSERT(1 == JSValueToNumber(context, JSObjectGetPropertyAtIndex(context, o, 0, &exception), &exception));
+    ASSERT(!exception);
+
+    JSStringRef functionBody;
+    JSObjectRef function;
+    
+    exception = NULL;
+    functionBody = JSStringCreateWithUTF8CString("rreturn Array;");
+    JSStringRef line = JSStringCreateWithUTF8CString("line");
+    ASSERT(!JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, &exception));
+    ASSERT(JSValueIsObject(context, exception));
+    v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), line, NULL);
+    assertEqualsAsNumber(v, 2);
+    JSStringRelease(functionBody);
+    JSStringRelease(line);
+
+    exception = NULL;
+    functionBody = JSStringCreateWithUTF8CString("rreturn Array;");
+    line = JSStringCreateWithUTF8CString("line");
+    ASSERT(!JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, -42, &exception));
+    ASSERT(JSValueIsObject(context, exception));
+    v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), line, NULL);
+    assertEqualsAsNumber(v, 2);
+    JSStringRelease(functionBody);
+    JSStringRelease(line);
+
+    exception = NULL;
+    functionBody = JSStringCreateWithUTF8CString("// Line one.\nrreturn Array;");
+    line = JSStringCreateWithUTF8CString("line");
+    ASSERT(!JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, &exception));
+    ASSERT(JSValueIsObject(context, exception));
+    v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), line, NULL);
+    assertEqualsAsNumber(v, 3);
+    JSStringRelease(functionBody);
+    JSStringRelease(line);
+
+    exception = NULL;
+    functionBody = JSStringCreateWithUTF8CString("return Array;");
+    function = JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, &exception);
+    JSStringRelease(functionBody);
+    ASSERT(!exception);
+    ASSERT(JSObjectIsFunction(context, function));
+    v = JSObjectCallAsFunction(context, function, NULL, 0, NULL, NULL);
+    ASSERT(v);
+    ASSERT(JSValueIsEqual(context, v, arrayConstructor, NULL));
+    
+    exception = NULL;
+    function = JSObjectMakeFunction(context, NULL, 0, NULL, jsEmptyIString, NULL, 0, &exception);
+    ASSERT(!exception);
+    v = JSObjectCallAsFunction(context, function, NULL, 0, NULL, &exception);
+    ASSERT(v && !exception);
+    ASSERT(JSValueIsUndefined(context, v));
+    
+    exception = NULL;
+    v = NULL;
+    JSStringRef foo = JSStringCreateWithUTF8CString("foo");
+    JSStringRef argumentNames[] = { foo };
+    functionBody = JSStringCreateWithUTF8CString("return foo;");
+    function = JSObjectMakeFunction(context, foo, 1, argumentNames, functionBody, NULL, 1, &exception);
+    ASSERT(function && !exception);
+    JSValueRef arguments[] = { JSValueMakeNumber(context, 2) };
+    JSObjectCallAsFunction(context, function, NULL, 1, arguments, &exception);
+    JSStringRelease(foo);
+    JSStringRelease(functionBody);
+    
+    string = JSValueToStringCopy(context, function, NULL);
+    assertEqualsAsUTF8String(JSValueMakeString(context, string), "function foo(foo) {\nreturn foo;\n}");
+    JSStringRelease(string);
+
+    JSStringRef print = JSStringCreateWithUTF8CString("print");
+    JSObjectRef printFunction = JSObjectMakeFunctionWithCallback(context, print, print_callAsFunction);
+    JSObjectSetProperty(context, globalObject, print, printFunction, kJSPropertyAttributeNone, NULL); 
+    JSStringRelease(print);
+    
+    ASSERT(!JSObjectSetPrivate(printFunction, (void*)1));
+    ASSERT(!JSObjectGetPrivate(printFunction));
+
+    JSStringRef myConstructorIString = JSStringCreateWithUTF8CString("MyConstructor");
+    JSObjectRef myConstructor = JSObjectMakeConstructor(context, NULL, myConstructor_callAsConstructor);
+    JSObjectSetProperty(context, globalObject, myConstructorIString, myConstructor, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(myConstructorIString);
+    
+    JSStringRef myBadConstructorIString = JSStringCreateWithUTF8CString("MyBadConstructor");
+    JSObjectRef myBadConstructor = JSObjectMakeConstructor(context, NULL, myBadConstructor_callAsConstructor);
+    JSObjectSetProperty(context, globalObject, myBadConstructorIString, myBadConstructor, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(myBadConstructorIString);
+    
+    ASSERT(!JSObjectSetPrivate(myConstructor, (void*)1));
+    ASSERT(!JSObjectGetPrivate(myConstructor));
+    
+    string = JSStringCreateWithUTF8CString("Base");
+    JSObjectRef baseConstructor = JSObjectMakeConstructor(context, Base_class(context), NULL);
+    JSObjectSetProperty(context, globalObject, string, baseConstructor, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(string);
+    
+    string = JSStringCreateWithUTF8CString("Derived");
+    JSObjectRef derivedConstructor = JSObjectMakeConstructor(context, Derived_class(context), NULL);
+    JSObjectSetProperty(context, globalObject, string, derivedConstructor, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(string);
+    
+    string = JSStringCreateWithUTF8CString("Derived2");
+    JSObjectRef derived2Constructor = JSObjectMakeConstructor(context, Derived2_class(context), NULL);
+    JSObjectSetProperty(context, globalObject, string, derived2Constructor, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(string);
+
+    o = JSObjectMake(context, NULL, NULL);
+    JSObjectSetProperty(context, o, jsOneIString, JSValueMakeNumber(context, 1), kJSPropertyAttributeNone, NULL);
+    JSObjectSetProperty(context, o, jsCFIString,  JSValueMakeNumber(context, 1), kJSPropertyAttributeDontEnum, NULL);
+    JSPropertyNameArrayRef nameArray = JSObjectCopyPropertyNames(context, o);
+    size_t expectedCount = JSPropertyNameArrayGetCount(nameArray);
+    size_t count;
+    for (count = 0; count < expectedCount; ++count)
+        JSPropertyNameArrayGetNameAtIndex(nameArray, count);
+    JSPropertyNameArrayRelease(nameArray);
+    ASSERT(count == 1); // jsCFString should not be enumerated
+
+    JSValueRef argumentsArrayValues[] = { JSValueMakeNumber(context, 10), JSValueMakeNumber(context, 20) };
+    o = JSObjectMakeArray(context, sizeof(argumentsArrayValues) / sizeof(JSValueRef), argumentsArrayValues, NULL);
+    string = JSStringCreateWithUTF8CString("length");
+    v = JSObjectGetProperty(context, o, string, NULL);
+    assertEqualsAsNumber(v, 2);
+    v = JSObjectGetPropertyAtIndex(context, o, 0, NULL);
+    assertEqualsAsNumber(v, 10);
+    v = JSObjectGetPropertyAtIndex(context, o, 1, NULL);
+    assertEqualsAsNumber(v, 20);
+
+    o = JSObjectMakeArray(context, 0, NULL, NULL);
+    v = JSObjectGetProperty(context, o, string, NULL);
+    assertEqualsAsNumber(v, 0);
+    JSStringRelease(string);
+
+    JSValueRef argumentsDateValues[] = { JSValueMakeNumber(context, 0) };
+    o = JSObjectMakeDate(context, 1, argumentsDateValues, NULL);
+    if (timeZoneIsPST())
+        assertEqualsAsUTF8String(o, "Wed Dec 31 1969 16:00:00 GMT-0800 (PST)");
+
+    string = JSStringCreateWithUTF8CString("an error message");
+    JSValueRef argumentsErrorValues[] = { JSValueMakeString(context, string) };
+    o = JSObjectMakeError(context, 1, argumentsErrorValues, NULL);
+    assertEqualsAsUTF8String(o, "Error: an error message");
+    JSStringRelease(string);
+
+    string = JSStringCreateWithUTF8CString("foo");
+    JSStringRef string2 = JSStringCreateWithUTF8CString("gi");
+    JSValueRef argumentsRegExpValues[] = { JSValueMakeString(context, string), JSValueMakeString(context, string2) };
+    o = JSObjectMakeRegExp(context, 2, argumentsRegExpValues, NULL);
+    assertEqualsAsUTF8String(o, "/foo/gi");
+    JSStringRelease(string);
+    JSStringRelease(string2);
+
+    JSClassDefinition nullDefinition = kJSClassDefinitionEmpty;
+    nullDefinition.attributes = kJSClassAttributeNoAutomaticPrototype;
+    JSClassRef nullClass = JSClassCreate(&nullDefinition);
+    JSClassRelease(nullClass);
+    
+    nullDefinition = kJSClassDefinitionEmpty;
+    nullClass = JSClassCreate(&nullDefinition);
+    JSClassRelease(nullClass);
+
+    functionBody = JSStringCreateWithUTF8CString("return this;");
+    function = JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, NULL);
+    JSStringRelease(functionBody);
+    v = JSObjectCallAsFunction(context, function, NULL, 0, NULL, NULL);
+    ASSERT(JSValueIsEqual(context, v, globalObject, NULL));
+    v = JSObjectCallAsFunction(context, function, o, 0, NULL, NULL);
+    ASSERT(JSValueIsEqual(context, v, o, NULL));
+
+    functionBody = JSStringCreateWithUTF8CString("return eval(\"this\");");
+    function = JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, NULL);
+    JSStringRelease(functionBody);
+    v = JSObjectCallAsFunction(context, function, NULL, 0, NULL, NULL);
+    ASSERT(JSValueIsEqual(context, v, globalObject, NULL));
+    v = JSObjectCallAsFunction(context, function, o, 0, NULL, NULL);
+    ASSERT(JSValueIsEqual(context, v, o, NULL));
+
+    const char* thisScript = "this;";
+    JSStringRef script = JSStringCreateWithUTF8CString(thisScript);
+    v = JSEvaluateScript(context, script, NULL, NULL, 1, NULL);
+    ASSERT(JSValueIsEqual(context, v, globalObject, NULL));
+    v = JSEvaluateScript(context, script, o, NULL, 1, NULL);
+    ASSERT(JSValueIsEqual(context, v, o, NULL));
+    JSStringRelease(script);
+
+    JSScriptRef scriptObject = JSScriptCreateReferencingImmortalASCIIText(contextGroup, 0, 0, thisScript, strlen(thisScript), 0, 0);
+    v = JSScriptEvaluate(context, scriptObject, NULL, NULL);
+    ASSERT(JSValueIsEqual(context, v, globalObject, NULL));
+    v = JSScriptEvaluate(context, scriptObject, o, NULL);
+    ASSERT(JSValueIsEqual(context, v, o, NULL));
+    JSScriptRelease(scriptObject);
+
+    script = JSStringCreateWithUTF8CString("eval(this);");
+    v = JSEvaluateScript(context, script, NULL, NULL, 1, NULL);
+    ASSERT(JSValueIsEqual(context, v, globalObject, NULL));
+    v = JSEvaluateScript(context, script, o, NULL, 1, NULL);
+    ASSERT(JSValueIsEqual(context, v, o, NULL));
+    JSStringRelease(script);
+
+    script = JSStringCreateWithUTF8CString("[ ]");
+    v = JSEvaluateScript(context, script, NULL, NULL, 1, NULL);
+    ASSERT(JSValueIsArray(context, v));
+    JSStringRelease(script);
+
+    script = JSStringCreateWithUTF8CString("new Date");
+    v = JSEvaluateScript(context, script, NULL, NULL, 1, NULL);
+    ASSERT(JSValueIsDate(context, v));
+    JSStringRelease(script);
+
+    exception = NULL;
+    script = JSStringCreateWithUTF8CString("rreturn Array;");
+    JSStringRef sourceURL = JSStringCreateWithUTF8CString("file:///foo/bar.js");
+    JSStringRef sourceURLKey = JSStringCreateWithUTF8CString("sourceURL");
+    JSEvaluateScript(context, script, NULL, sourceURL, 1, &exception);
+    ASSERT(exception);
+    v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), sourceURLKey, NULL);
+    assertEqualsAsUTF8String(v, "file:///foo/bar.js");
+    JSStringRelease(script);
+    JSStringRelease(sourceURL);
+    JSStringRelease(sourceURLKey);
+
+    // Verify that creating a constructor for a class with no static functions does not trigger
+    // an assert inside putDirect or lead to a crash during GC. 
+    nullDefinition = kJSClassDefinitionEmpty;
+    nullClass = JSClassCreate(&nullDefinition);
+    JSObjectMakeConstructor(context, nullClass, 0);
+    JSClassRelease(nullClass);
+
+    char* scriptUTF8 = createStringWithContentsOfFile(scriptPath);
+    if (!scriptUTF8) {
+        printf("FAIL: Test script could not be loaded.\n");
+        failed = 1;
+    } else {
+        JSStringRef url = JSStringCreateWithUTF8CString(scriptPath);
+        JSStringRef script = JSStringCreateWithUTF8CString(scriptUTF8);
+        JSStringRef errorMessage = 0;
+        int errorLine = 0;
+        JSScriptRef scriptObject = JSScriptCreateFromString(contextGroup, url, 1, script, &errorMessage, &errorLine);
+        ASSERT((!scriptObject) != (!errorMessage));
+        if (!scriptObject) {
+            printf("FAIL: Test script did not parse\n\t%s:%d\n\t", scriptPath, errorLine);
+            CFStringRef errorCF = JSStringCopyCFString(kCFAllocatorDefault, errorMessage);
+            CFShow(errorCF);
+            CFRelease(errorCF);
+            JSStringRelease(errorMessage);
+            failed = 1;
+        }
+
+        JSStringRelease(script);
+        exception = NULL;
+        result = scriptObject ? JSScriptEvaluate(context, scriptObject, 0, &exception) : 0;
+        if (result && JSValueIsUndefined(context, result))
+            printf("PASS: Test script executed successfully.\n");
+        else {
+            printf("FAIL: Test script returned unexpected value:\n");
+            JSStringRef exceptionIString = JSValueToStringCopy(context, exception, NULL);
+            CFStringRef exceptionCF = JSStringCopyCFString(kCFAllocatorDefault, exceptionIString);
+            CFShow(exceptionCF);
+            CFRelease(exceptionCF);
+            JSStringRelease(exceptionIString);
+            failed = 1;
+        }
+        JSScriptRelease(scriptObject);
+        free(scriptUTF8);
+    }
+
+    // Check Promise is not exposed.
+    {
+        JSObjectRef globalObject = JSContextGetGlobalObject(context);
+        {
+            JSStringRef promiseProperty = JSStringCreateWithUTF8CString("Promise");
+            ASSERT(JSObjectHasProperty(context, globalObject, promiseProperty));
+            JSStringRelease(promiseProperty);
+        }
+        {
+            JSStringRef script = JSStringCreateWithUTF8CString("typeof Promise");
+            JSStringRef function = JSStringCreateWithUTF8CString("function");
+            JSValueRef value = JSEvaluateScript(context, script, NULL, NULL, 1, NULL);
+            ASSERT(JSValueIsString(context, value));
+            JSStringRef valueAsString = JSValueToStringCopy(context, value, NULL);
+            ASSERT(JSStringIsEqual(valueAsString, function));
+            JSStringRelease(valueAsString);
+            JSStringRelease(function);
+            JSStringRelease(script);
+        }
+        printf("PASS: Promise is exposed under JSContext API.\n");
+    }
+
+    // Check microtasks.
+    {
+        JSGlobalContextRef context = JSGlobalContextCreateInGroup(NULL, NULL);
+        {
+            JSObjectRef globalObject = JSContextGetGlobalObject(context);
+            JSValueRef exception;
+            JSStringRef code = JSStringCreateWithUTF8CString("result = 0; Promise.resolve(42).then(function (value) { result = value; });");
+            JSStringRef file = JSStringCreateWithUTF8CString("");
+            assertTrue(JSEvaluateScript(context, code, globalObject, file, 1, &exception), "An exception should not be thrown");
+            JSStringRelease(code);
+            JSStringRelease(file);
+
+            JSStringRef resultProperty = JSStringCreateWithUTF8CString("result");
+            ASSERT(JSObjectHasProperty(context, globalObject, resultProperty));
+
+            JSValueRef resultValue = JSObjectGetProperty(context, globalObject, resultProperty, &exception);
+            assertEqualsAsNumber(resultValue, 42);
+            JSStringRelease(resultProperty);
+        }
+        JSGlobalContextRelease(context);
+    }
+
+    failed = testTypedArrayCAPI() || failed;
+    failed = testExecutionTimeLimit() || failed;
+    failed = testFunctionOverrides() || failed;
+    failed = testGlobalContextWithFinalizer() || failed;
+    failed = testPingPongStackOverflow() || failed;
+    failed = testJSONParse() || failed;
+
+    // Clear out local variables pointing at JSObjectRefs to allow their values to be collected
+    function = NULL;
+    v = NULL;
+    o = NULL;
+    globalObject = NULL;
+    myConstructor = NULL;
+
+    JSStringRelease(jsEmptyIString);
+    JSStringRelease(jsOneIString);
+    JSStringRelease(jsCFIString);
+    JSStringRelease(jsCFEmptyIString);
+    JSStringRelease(jsCFIStringWithCharacters);
+    JSStringRelease(jsCFEmptyIStringWithCharacters);
+    JSStringRelease(goodSyntax);
+    JSStringRelease(badSyntax);
+
+    JSGlobalContextRelease(context);
+    JSClassRelease(globalObjectClass);
+
+    // Test for an infinite prototype chain that used to be created. This test
+    // passes if the call to JSObjectHasProperty() does not hang.
+
+    JSClassDefinition prototypeLoopClassDefinition = kJSClassDefinitionEmpty;
+    prototypeLoopClassDefinition.staticFunctions = globalObject_staticFunctions;
+    JSClassRef prototypeLoopClass = JSClassCreate(&prototypeLoopClassDefinition);
+    JSGlobalContextRef prototypeLoopContext = JSGlobalContextCreateInGroup(NULL, prototypeLoopClass);
+
+    JSStringRef nameProperty = JSStringCreateWithUTF8CString("name");
+    JSObjectHasProperty(prototypeLoopContext, JSContextGetGlobalObject(prototypeLoopContext), nameProperty);
+
+    JSGlobalContextRelease(prototypeLoopContext);
+    JSClassRelease(prototypeLoopClass);
+
+    printf("PASS: Infinite prototype chain does not occur.\n");
+
+    if (checkForCycleInPrototypeChain())
+        printf("PASS: A cycle in a prototype chain can't be created.\n");
+    else {
+        printf("FAIL: A cycle in a prototype chain can be created.\n");
+        failed = true;
+    }
+    if (valueToObjectExceptionTest())
+        printf("PASS: throwException did not crash when handling an error with appendMessageToError set and no codeBlock available.\n");
+
+    if (globalContextNameTest())
+        printf("PASS: global context name behaves as expected.\n");
+
+    customGlobalObjectClassTest();
+    globalObjectSetPrototypeTest();
+    globalObjectPrivatePropertyTest();
+
+    if (failed) {
+        printf("FAIL: Some tests failed.\n");
+        return 1;
+    }
+
+    printf("PASS: Program exited normally.\n");
+    return 0;
+}
+
+static char* createStringWithContentsOfFile(const char* fileName)
+{
+    char* buffer;
+    
+    size_t buffer_size = 0;
+    size_t buffer_capacity = 1024;
+    buffer = (char*)malloc(buffer_capacity);
+    
+    FILE* f = fopen(fileName, "r");
+    if (!f) {
+        fprintf(stderr, "Could not open file: %s\n", fileName);
+        free(buffer);
+        return 0;
+    }
+    
+    while (!feof(f) && !ferror(f)) {
+        buffer_size += fread(buffer + buffer_size, 1, buffer_capacity - buffer_size, f);
+        if (buffer_size == buffer_capacity) { // guarantees space for trailing '\0'
+            buffer_capacity *= 2;
+            buffer = (char*)realloc(buffer, buffer_capacity);
+            ASSERT(buffer);
+        }
+        
+        ASSERT(buffer_size < buffer_capacity);
+    }
+    fclose(f);
+    buffer[buffer_size] = '\0';
+    
+    return buffer;
+}
+
+#if OS(WINDOWS)
+extern "C" __declspec(dllexport) int WINAPI dllLauncherEntryPoint(int argc, const char* argv[])
+{
+    return main(argc, const_cast(argv));
+}
+#endif
diff --git a/Source/JavaScriptCore/API/tests/testapi.js b/Source/JavaScriptCore/API/tests/testapi.js
new file mode 100644
index 000000000..88d3701c2
--- /dev/null
+++ b/Source/JavaScriptCore/API/tests/testapi.js
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2006 Apple Inc.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function bludgeonArguments() { if (0) arguments; return function g() {} }
+h = bludgeonArguments();
+gc();
+
+var failed = false;
+function pass(msg)
+{
+    print("PASS: " + msg, "green");
+}
+
+function fail(msg)
+{
+    print("FAIL: " + msg, "red");
+    failed = true;
+}
+
+function shouldBe(a, b)
+{
+    var evalA;
+    try {
+        evalA = eval(a);
+    } catch(e) {
+        evalA = e;
+    }
+    
+    if (evalA == b || isNaN(evalA) && typeof evalA == 'number' && isNaN(b) && typeof b == 'number')
+        pass(a + " should be " + b + " and is.");
+    else
+        fail(a + " should be " + b + " but instead is " + evalA + ".");
+}
+
+function shouldThrow(a)
+{
+    var evalA;
+    try {
+        eval(a);
+    } catch(e) {
+        pass(a + " threw: " + e);
+        return;
+    }
+
+    fail(a + " did not throw an exception.");
+}
+
+function globalStaticFunction()
+{
+    return 4;
+}
+
+shouldBe("globalStaticValue", 3);
+shouldBe("globalStaticFunction()", 4);
+shouldBe("this.globalStaticFunction()", 4);
+
+function globalStaticFunction2() {
+    return 10;
+}
+shouldBe("globalStaticFunction2();", 10);
+this.globalStaticFunction2 = function() { return 20; }
+shouldBe("globalStaticFunction2();", 20);
+shouldBe("this.globalStaticFunction2();", 20);
+
+function iAmNotAStaticFunction() { return 10; }
+shouldBe("iAmNotAStaticFunction();", 10);
+this.iAmNotAStaticFunction = function() { return 20; }
+shouldBe("iAmNotAStaticFunction();", 20);
+
+shouldBe("typeof MyObject", "function"); // our object implements 'call'
+MyObject.cantFind = 1;
+shouldBe("MyObject.cantFind", undefined);
+MyObject.regularType = 1;
+shouldBe("MyObject.regularType", 1);
+MyObject.alwaysOne = 2;
+shouldBe("MyObject.alwaysOne", 1);
+MyObject.cantDelete = 1;
+delete MyObject.cantDelete;
+shouldBe("MyObject.cantDelete", 1);
+shouldBe("delete MyObject.throwOnDelete", "an exception");
+MyObject.cantSet = 1;
+shouldBe("MyObject.cantSet", undefined);
+shouldBe("MyObject.throwOnGet", "an exception");
+shouldBe("MyObject.throwOnSet = 5", "an exception");
+shouldBe("MyObject('throwOnCall')", "an exception");
+shouldBe("new MyObject('throwOnConstruct')", "an exception");
+shouldBe("'throwOnHasInstance' instanceof MyObject", "an exception");
+
+MyObject.nullGetForwardSet = 1;
+shouldBe("MyObject.nullGetForwardSet", 1);
+
+var foundMyPropertyName = false;
+var foundRegularType = false;
+for (var p in MyObject) {
+    if (p == "myPropertyName")
+        foundMyPropertyName = true;
+    if (p == "regularType")
+        foundRegularType = true;
+}
+
+if (foundMyPropertyName)
+    pass("MyObject.myPropertyName was enumerated");
+else
+    fail("MyObject.myPropertyName was not enumerated");
+
+if (foundRegularType)
+    pass("MyObject.regularType was enumerated");
+else
+    fail("MyObject.regularType was not enumerated");
+
+var alwaysOneDescriptor = Object.getOwnPropertyDescriptor(MyObject, "alwaysOne");
+shouldBe('typeof alwaysOneDescriptor', "object");
+shouldBe('alwaysOneDescriptor.value', MyObject.alwaysOne);
+shouldBe('alwaysOneDescriptor.configurable', true);
+shouldBe('alwaysOneDescriptor.enumerable', false); // Actually it is.
+var cantFindDescriptor = Object.getOwnPropertyDescriptor(MyObject, "cantFind");
+shouldBe('typeof cantFindDescriptor', "object");
+shouldBe('cantFindDescriptor.value', MyObject.cantFind);
+shouldBe('cantFindDescriptor.configurable', true);
+shouldBe('cantFindDescriptor.enumerable', false);
+try {
+    // If getOwnPropertyDescriptor() returned an access descriptor, this wouldn't throw.
+    Object.getOwnPropertyDescriptor(MyObject, "throwOnGet");
+} catch (e) {
+    pass("getting property descriptor of throwOnGet threw exception");
+}
+var myPropertyNameDescriptor = Object.getOwnPropertyDescriptor(MyObject, "myPropertyName");
+shouldBe('typeof myPropertyNameDescriptor', "object");
+shouldBe('myPropertyNameDescriptor.value', MyObject.myPropertyName);
+shouldBe('myPropertyNameDescriptor.configurable', true);
+shouldBe('myPropertyNameDescriptor.enumerable', false); // Actually it is.
+try {
+    // if getOwnPropertyDescriptor() returned an access descriptor, this wouldn't throw.
+    Object.getOwnPropertyDescriptor(MyObject, "hasPropertyLie");
+} catch (e) {
+    pass("getting property descriptor of hasPropertyLie threw exception");
+}
+shouldBe('Object.getOwnPropertyDescriptor(MyObject, "doesNotExist")', undefined);
+
+myObject = new MyObject();
+
+shouldBe("delete MyObject.regularType", true);
+shouldBe("MyObject.regularType", undefined);
+shouldBe("MyObject(0)", 1);
+shouldBe("MyObject()", undefined);
+shouldBe("typeof myObject", "object");
+shouldBe("MyObject ? 1 : 0", true); // toBoolean
+shouldBe("+MyObject", 1); // toNumber
+shouldBe("(Object.prototype.toString.call(MyObject))", "[object MyObject]"); // Object.prototype.toString
+shouldBe("(MyObject.toString())", "[object MyObject]"); // toString
+shouldBe("String(MyObject)", "MyObjectAsString"); // toString
+shouldBe("MyObject - 0", 1); // toNumber
+shouldBe("MyObject.valueOf()", 1); // valueOf
+
+shouldBe("typeof MyConstructor", "object");
+constructedObject = new MyConstructor(1);
+shouldBe("typeof constructedObject", "object");
+shouldBe("constructedObject.value", 1);
+shouldBe("myObject instanceof MyObject", true);
+shouldBe("(new Object()) instanceof MyObject", false);
+
+shouldThrow("new MyBadConstructor()");
+
+MyObject.nullGetSet = 1;
+shouldBe("MyObject.nullGetSet", 1);
+shouldThrow("MyObject.nullCall()");
+shouldThrow("MyObject.hasPropertyLie");
+
+derived = new Derived();
+
+shouldBe("derived instanceof Derived", true);
+shouldBe("derived instanceof Base", true);
+
+// base properties and functions return 1 when called/gotten; derived, 2
+shouldBe("derived.baseProtoDup()", 2);
+shouldBe("derived.baseProto()", 1);
+shouldBe("derived.baseDup", 2);
+shouldBe("derived.baseOnly", 1);
+shouldBe("derived.protoOnly()", 2);
+shouldBe("derived.protoDup", 2);
+shouldBe("derived.derivedOnly", 2)
+
+shouldBe("derived.baseHardNull()", undefined)
+
+// base properties throw 1 when set; derived, 2
+shouldBe("derived.baseDup = 0", 2);
+shouldBe("derived.baseOnly = 0", 1);
+shouldBe("derived.derivedOnly = 0", 2)
+shouldBe("derived.protoDup = 0", 2);
+
+derived2 = new Derived2();
+
+shouldBe("derived2 instanceof Derived2", true);
+shouldBe("derived2 instanceof Derived", true);
+shouldBe("derived2 instanceof Base", true);
+
+// base properties and functions return 1 when called/gotten; derived, 2
+shouldBe("derived2.baseProtoDup()", 2);
+shouldBe("derived2.baseProto()", 1);
+shouldBe("derived2.baseDup", 2);
+shouldBe("derived2.baseOnly", 1);
+shouldBe("derived2.protoOnly()", 2);
+shouldBe("derived2.protoDup", 2);
+shouldBe("derived2.derivedOnly", 2)
+
+// base properties throw 1 when set; derived, 2
+shouldBe("derived2.baseDup = 0", 2);
+shouldBe("derived2.baseOnly = 0", 1);
+shouldBe("derived2.derivedOnly = 0", 2)
+shouldBe("derived2.protoDup = 0", 2);
+
+shouldBe('Object.getOwnPropertyDescriptor(derived, "baseProto")', undefined);
+shouldBe('Object.getOwnPropertyDescriptor(derived, "baseProtoDup")', undefined);
+var baseDupDescriptor = Object.getOwnPropertyDescriptor(derived, "baseDup");
+shouldBe('typeof baseDupDescriptor', "object");
+shouldBe('baseDupDescriptor.value', derived.baseDup);
+shouldBe('baseDupDescriptor.configurable', true);
+shouldBe('baseDupDescriptor.enumerable', false);
+var baseOnlyDescriptor = Object.getOwnPropertyDescriptor(derived, "baseOnly");
+shouldBe('typeof baseOnlyDescriptor', "object");
+shouldBe('baseOnlyDescriptor.value', derived.baseOnly);
+shouldBe('baseOnlyDescriptor.configurable', true);
+shouldBe('baseOnlyDescriptor.enumerable', false);
+shouldBe('Object.getOwnPropertyDescriptor(derived, "protoOnly")', undefined);
+var protoDupDescriptor = Object.getOwnPropertyDescriptor(derived, "protoDup");
+shouldBe('typeof protoDupDescriptor', "object");
+shouldBe('protoDupDescriptor.value', derived.protoDup);
+shouldBe('protoDupDescriptor.configurable', true);
+shouldBe('protoDupDescriptor.enumerable', false);
+var derivedOnlyDescriptor = Object.getOwnPropertyDescriptor(derived, "derivedOnly");
+shouldBe('typeof derivedOnlyDescriptor', "object");
+shouldBe('derivedOnlyDescriptor.value', derived.derivedOnly);
+shouldBe('derivedOnlyDescriptor.configurable', true);
+shouldBe('derivedOnlyDescriptor.enumerable', false);
+
+shouldBe("undefined instanceof MyObject", false);
+EvilExceptionObject.hasInstance = function f() { return f(); };
+EvilExceptionObject.__proto__ = undefined;
+shouldThrow("undefined instanceof EvilExceptionObject");
+EvilExceptionObject.hasInstance = function () { return true; };
+shouldBe("undefined instanceof EvilExceptionObject", true);
+
+EvilExceptionObject.toNumber = function f() { return f(); }
+shouldThrow("EvilExceptionObject*5");
+EvilExceptionObject.toStringExplicit = function f() { return f(); }
+shouldThrow("String(EvilExceptionObject)");
+
+shouldBe("console", "[object Console]");
+shouldBe("typeof console.log", "function");
+
+shouldBe("EmptyObject", "[object CallbackObject]");
+
+for (var i = 0; i < 6; ++i)
+    PropertyCatchalls.x = i;
+shouldBe("PropertyCatchalls.x", 4);
+
+for (var i = 0; i < 6; ++i)
+    var x = PropertyCatchalls.x;
+shouldBe("x", null);
+var make_throw = 'make_throw';
+shouldThrow("PropertyCatchalls[make_throw]=1");
+make_throw = 0;
+shouldThrow("PropertyCatchalls[make_throw]=1");
+
+for (var i = 0; i < 10; ++i) {
+    for (var p in PropertyCatchalls) {
+        if (p == "x")
+            continue;
+        shouldBe("p", i % 10);
+        break;
+    }
+}
+
+PropertyCatchalls.__proto__ = { y: 1 };
+for (var i = 0; i < 6; ++i)
+    var y = PropertyCatchalls.y;
+shouldBe("y", null);
+
+var o = { __proto__: PropertyCatchalls };
+for (var i = 0; i < 6; ++i)
+    var z = PropertyCatchalls.z;
+shouldBe("z", null);
+
+if (failed)
+    throw "Some tests failed";
diff --git a/Source/JavaScriptCore/CMakeLists.txt b/Source/JavaScriptCore/CMakeLists.txt
new file mode 100644
index 000000000..c2a203fc0
--- /dev/null
+++ b/Source/JavaScriptCore/CMakeLists.txt
@@ -0,0 +1,1538 @@
+cmake_minimum_required(VERSION 2.8.12)
+include(WebKitCommon)
+set_property(DIRECTORY . PROPERTY FOLDER "JavaScriptCore")
+
+set(JavaScriptCore_INCLUDE_DIRECTORIES
+    "${CMAKE_BINARY_DIR}"
+    "${JAVASCRIPTCORE_DIR}"
+    "${JAVASCRIPTCORE_DIR}/.."
+    "${JAVASCRIPTCORE_DIR}/API"
+    "${JAVASCRIPTCORE_DIR}/ForwardingHeaders"
+    "${JAVASCRIPTCORE_DIR}/assembler"
+    "${JAVASCRIPTCORE_DIR}/b3"
+    "${JAVASCRIPTCORE_DIR}/b3/air"
+    "${JAVASCRIPTCORE_DIR}/bindings"
+    "${JAVASCRIPTCORE_DIR}/builtins"
+    "${JAVASCRIPTCORE_DIR}/bytecode"
+    "${JAVASCRIPTCORE_DIR}/bytecompiler"
+    "${JAVASCRIPTCORE_DIR}/dfg"
+    "${JAVASCRIPTCORE_DIR}/disassembler"
+    "${JAVASCRIPTCORE_DIR}/disassembler/udis86"
+    "${JAVASCRIPTCORE_DIR}/disassembler/ARM64"
+    "${JAVASCRIPTCORE_DIR}/domjit"
+    "${JAVASCRIPTCORE_DIR}/ftl"
+    "${JAVASCRIPTCORE_DIR}/heap"
+    "${JAVASCRIPTCORE_DIR}/debugger"
+    "${JAVASCRIPTCORE_DIR}/inspector"
+    "${JAVASCRIPTCORE_DIR}/inspector/agents"
+    "${JAVASCRIPTCORE_DIR}/inspector/augmentable"
+    "${JAVASCRIPTCORE_DIR}/inspector/remote"
+    "${JAVASCRIPTCORE_DIR}/interpreter"
+    "${JAVASCRIPTCORE_DIR}/jit"
+    "${JAVASCRIPTCORE_DIR}/llint"
+    "${JAVASCRIPTCORE_DIR}/parser"
+    "${JAVASCRIPTCORE_DIR}/profiler"
+    "${JAVASCRIPTCORE_DIR}/replay"
+    "${JAVASCRIPTCORE_DIR}/runtime"
+    "${JAVASCRIPTCORE_DIR}/tools"
+    "${JAVASCRIPTCORE_DIR}/wasm"
+    "${JAVASCRIPTCORE_DIR}/wasm/js"
+    "${JAVASCRIPTCORE_DIR}/yarr"
+    "${DERIVED_SOURCES_DIR}/ForwardingHeaders"
+    "${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}"
+    "${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector"
+)
+
+set(JavaScriptCore_SYSTEM_INCLUDE_DIRECTORIES
+    "${ICU_INCLUDE_DIRS}"
+)
+
+set(JavaScriptCore_SOURCES
+    API/JSBase.cpp
+    API/JSCTestRunnerUtils.cpp
+    API/JSCallbackConstructor.cpp
+    API/JSCallbackFunction.cpp
+    API/JSCallbackObject.cpp
+    API/JSClassRef.cpp
+    API/JSContextRef.cpp
+    API/JSObjectRef.cpp
+    API/JSTypedArray.cpp
+    API/JSScriptRef.cpp
+    API/JSStringRef.cpp
+    API/JSValueRef.cpp
+    API/JSWeakObjectMapRefPrivate.cpp
+    API/OpaqueJSString.cpp
+
+    assembler/ARMAssembler.cpp
+    assembler/LinkBuffer.cpp
+    assembler/MacroAssembler.cpp
+    assembler/MacroAssemblerARM.cpp
+    assembler/MacroAssemblerARMv7.cpp
+    assembler/MacroAssemblerCodeRef.cpp
+    assembler/MacroAssemblerPrinter.cpp
+    assembler/MacroAssemblerX86Common.cpp
+
+    b3/air/AirAllocateStack.cpp
+    b3/air/AirArg.cpp
+    b3/air/AirBasicBlock.cpp
+    b3/air/AirCCallSpecial.cpp
+    b3/air/AirCCallingConvention.cpp
+    b3/air/AirCode.cpp
+    b3/air/AirCustom.cpp
+    b3/air/AirDumpAsJS.cpp
+    b3/air/AirEliminateDeadCode.cpp
+    b3/air/AirEmitShuffle.cpp
+    b3/air/AirFixObviousSpills.cpp
+    b3/air/AirFixPartialRegisterStalls.cpp
+    b3/air/AirGenerate.cpp
+    b3/air/AirGenerated.cpp
+    b3/air/AirHandleCalleeSaves.cpp
+    b3/air/AirInsertionSet.cpp
+    b3/air/AirInst.cpp
+    b3/air/AirIteratedRegisterCoalescing.cpp
+    b3/air/AirKind.cpp
+    b3/air/AirLogRegisterPressure.cpp
+    b3/air/AirLowerAfterRegAlloc.cpp
+    b3/air/AirLowerEntrySwitch.cpp
+    b3/air/AirLowerMacros.cpp
+    b3/air/AirOptimizeBlockOrder.cpp
+    b3/air/AirPadInterference.cpp
+    b3/air/AirPhaseScope.cpp
+    b3/air/AirReportUsedRegisters.cpp
+    b3/air/AirSimplifyCFG.cpp
+    b3/air/AirSpecial.cpp
+    b3/air/AirSpillEverything.cpp
+    b3/air/AirStackSlot.cpp
+    b3/air/AirStackSlotKind.cpp
+    b3/air/AirTmp.cpp
+    b3/air/AirTmpWidth.cpp
+    b3/air/AirValidate.cpp
+
+    b3/B3ArgumentRegValue.cpp
+    b3/B3BasicBlock.cpp
+    b3/B3BlockInsertionSet.cpp
+    b3/B3BreakCriticalEdges.cpp
+    b3/B3CCallValue.cpp
+    b3/B3CaseCollection.cpp
+    b3/B3CheckSpecial.cpp
+    b3/B3CheckValue.cpp
+    b3/B3Common.cpp
+    b3/B3Commutativity.cpp
+    b3/B3Compile.cpp
+    b3/B3Compilation.cpp
+    b3/B3Const32Value.cpp
+    b3/B3Const64Value.cpp
+    b3/B3ConstDoubleValue.cpp
+    b3/B3ConstFloatValue.cpp
+    b3/B3ConstrainedValue.cpp
+    b3/B3DataSection.cpp
+    b3/B3DuplicateTails.cpp
+    b3/B3Effects.cpp
+    b3/B3EliminateCommonSubexpressions.cpp
+    b3/B3FenceValue.cpp
+    b3/B3FixSSA.cpp
+    b3/B3FoldPathConstants.cpp
+    b3/B3FrequencyClass.cpp
+    b3/B3Generate.cpp
+    b3/B3HeapRange.cpp
+    b3/B3InferSwitches.cpp
+    b3/B3InsertionSet.cpp
+    b3/B3Kind.cpp
+    b3/B3LegalizeMemoryOffsets.cpp
+    b3/B3LowerMacros.cpp
+    b3/B3LowerMacrosAfterOptimizations.cpp
+    b3/B3LowerToAir.cpp
+    b3/B3MathExtras.cpp
+    b3/B3MemoryValue.cpp
+    b3/B3MoveConstants.cpp
+    b3/B3OpaqueByproducts.cpp
+    b3/B3Opcode.cpp
+    b3/B3Origin.cpp
+    b3/B3OriginDump.cpp
+    b3/B3PatchpointSpecial.cpp
+    b3/B3PatchpointValue.cpp
+    b3/B3PhaseScope.cpp
+    b3/B3PhiChildren.cpp
+    b3/B3Procedure.cpp
+    b3/B3PureCSE.cpp
+    b3/B3ReduceDoubleToFloat.cpp
+    b3/B3ReduceStrength.cpp
+    b3/B3SSACalculator.cpp
+    b3/B3SlotBaseValue.cpp
+    b3/B3StackmapGenerationParams.cpp
+    b3/B3StackmapSpecial.cpp
+    b3/B3StackmapValue.cpp
+    b3/B3StackSlot.cpp
+    b3/B3SwitchCase.cpp
+    b3/B3SwitchValue.cpp
+    b3/B3TimingScope.cpp
+    b3/B3Type.cpp
+    b3/B3UpsilonValue.cpp
+    b3/B3UseCounts.cpp
+    b3/B3Validate.cpp
+    b3/B3Value.cpp
+    b3/B3ValueKey.cpp
+    b3/B3ValueRep.cpp
+    b3/B3Variable.cpp
+    b3/B3VariableValue.cpp
+    b3/B3WasmAddressValue.cpp
+    b3/B3WasmBoundsCheckValue.cpp
+
+    bindings/ScriptFunctionCall.cpp
+    bindings/ScriptObject.cpp
+    bindings/ScriptValue.cpp
+
+    builtins/BuiltinExecutables.cpp
+    builtins/BuiltinExecutableCreator.cpp
+
+    bytecode/AccessCase.cpp
+    bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp
+    bytecode/ArithProfile.cpp
+    bytecode/ArrayAllocationProfile.cpp
+    bytecode/ArrayProfile.cpp
+    bytecode/BytecodeBasicBlock.cpp
+    bytecode/BytecodeGeneratorification.cpp
+    bytecode/BytecodeIntrinsicRegistry.cpp
+    bytecode/BytecodeLivenessAnalysis.cpp
+    bytecode/BytecodeRewriter.cpp
+    bytecode/CallEdge.cpp
+    bytecode/CallLinkInfo.cpp
+    bytecode/CallLinkStatus.cpp
+    bytecode/CallMode.cpp
+    bytecode/CallVariant.cpp
+    bytecode/CodeBlock.cpp
+    bytecode/CodeBlockHash.cpp
+    bytecode/CodeBlockJettisoningWatchpoint.cpp
+    bytecode/CodeOrigin.cpp
+    bytecode/CodeType.cpp
+    bytecode/ComplexGetStatus.cpp
+    bytecode/DFGExitProfile.cpp
+    bytecode/DOMJITAccessCasePatchpointParams.cpp
+    bytecode/DataFormat.cpp
+    bytecode/DeferredCompilationCallback.cpp
+    bytecode/DeferredSourceDump.cpp
+    bytecode/DirectEvalCodeCache.cpp
+    bytecode/EvalCodeBlock.cpp
+    bytecode/ExecutionCounter.cpp
+    bytecode/ExitKind.cpp
+    bytecode/ExitingJITType.cpp
+    bytecode/FunctionCodeBlock.cpp
+    bytecode/GetByIdStatus.cpp
+    bytecode/GetByIdVariant.cpp
+    bytecode/GetterSetterAccessCase.cpp
+    bytecode/InlineAccess.cpp
+    bytecode/InlineCallFrame.cpp
+    bytecode/InlineCallFrameSet.cpp
+    bytecode/IntrinsicGetterAccessCase.cpp
+    bytecode/JumpTable.cpp
+    bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp
+    bytecode/LazyOperandValueProfile.cpp
+    bytecode/MethodOfGettingAValueProfile.cpp
+    bytecode/ModuleNamespaceAccessCase.cpp
+    bytecode/ModuleProgramCodeBlock.cpp
+    bytecode/ObjectPropertyCondition.cpp
+    bytecode/ObjectPropertyConditionSet.cpp
+    bytecode/Opcode.cpp
+    bytecode/PolymorphicAccess.cpp
+    bytecode/PreciseJumpTargets.cpp
+    bytecode/ProgramCodeBlock.cpp
+    bytecode/PropertyCondition.cpp
+    bytecode/ProxyableAccessCase.cpp
+    bytecode/PutByIdFlags.cpp
+    bytecode/PutByIdStatus.cpp
+    bytecode/PutByIdVariant.cpp
+    bytecode/ReduceWhitespace.cpp
+    bytecode/SpecialPointer.cpp
+    bytecode/SpeculatedType.cpp
+    bytecode/StructureSet.cpp
+    bytecode/StructureStubClearingWatchpoint.cpp
+    bytecode/StructureStubInfo.cpp
+    bytecode/SuperSampler.cpp
+    bytecode/ToThisStatus.cpp
+    bytecode/TrackedReferences.cpp
+    bytecode/UnlinkedCodeBlock.cpp
+    bytecode/UnlinkedEvalCodeBlock.cpp
+    bytecode/UnlinkedFunctionCodeBlock.cpp
+    bytecode/UnlinkedFunctionExecutable.cpp
+    bytecode/UnlinkedInstructionStream.cpp
+    bytecode/UnlinkedModuleProgramCodeBlock.cpp
+    bytecode/UnlinkedProgramCodeBlock.cpp
+    bytecode/ValueRecovery.cpp
+    bytecode/VariableWriteFireDetail.cpp
+    bytecode/VirtualRegister.cpp
+    bytecode/Watchpoint.cpp
+
+    bytecompiler/BytecodeGenerator.cpp
+    bytecompiler/NodesCodegen.cpp
+
+    debugger/Debugger.cpp
+    debugger/DebuggerCallFrame.cpp
+    debugger/DebuggerLocation.cpp
+    debugger/DebuggerParseData.cpp
+    debugger/DebuggerScope.cpp
+
+    dfg/DFGAbstractHeap.cpp
+    dfg/DFGAbstractValue.cpp
+    dfg/DFGAdaptiveInferredPropertyValueWatchpoint.cpp
+    dfg/DFGAdaptiveStructureWatchpoint.cpp
+    dfg/DFGArgumentsEliminationPhase.cpp
+    dfg/DFGArgumentsUtilities.cpp
+    dfg/DFGArithMode.cpp
+    dfg/DFGArrayMode.cpp
+    dfg/DFGAtTailAbstractState.cpp
+    dfg/DFGAvailability.cpp
+    dfg/DFGAvailabilityMap.cpp
+    dfg/DFGBackwardsPropagationPhase.cpp
+    dfg/DFGBasicBlock.cpp
+    dfg/DFGBlockInsertionSet.cpp
+    dfg/DFGBlockSet.cpp
+    dfg/DFGByteCodeParser.cpp
+    dfg/DFGCFAPhase.cpp
+    dfg/DFGCFGSimplificationPhase.cpp
+    dfg/DFGCPSRethreadingPhase.cpp
+    dfg/DFGCSEPhase.cpp
+    dfg/DFGCapabilities.cpp
+    dfg/DFGCleanUpPhase.cpp
+    dfg/DFGClobberSet.cpp
+    dfg/DFGClobberize.cpp
+    dfg/DFGClobbersExitState.cpp
+    dfg/DFGCombinedLiveness.cpp
+    dfg/DFGCommon.cpp
+    dfg/DFGCommonData.cpp
+    dfg/DFGCompilationKey.cpp
+    dfg/DFGCompilationMode.cpp
+    dfg/DFGConstantFoldingPhase.cpp
+    dfg/DFGConstantHoistingPhase.cpp
+    dfg/DFGCriticalEdgeBreakingPhase.cpp
+    dfg/DFGDCEPhase.cpp
+    dfg/DFGDOMJITPatchpointParams.cpp
+    dfg/DFGDesiredIdentifiers.cpp
+    dfg/DFGDesiredTransitions.cpp
+    dfg/DFGDesiredWatchpoints.cpp
+    dfg/DFGDesiredWeakReferences.cpp
+    dfg/DFGDisassembler.cpp
+    dfg/DFGDoesGC.cpp
+    dfg/DFGDriver.cpp
+    dfg/DFGEdge.cpp
+    dfg/DFGEpoch.cpp
+    dfg/DFGFailedFinalizer.cpp
+    dfg/DFGFinalizer.cpp
+    dfg/DFGFixupPhase.cpp
+    dfg/DFGFlowIndexing.cpp
+    dfg/DFGFlushFormat.cpp
+    dfg/DFGFlushedAt.cpp
+    dfg/DFGLiveCatchVariablePreservationPhase.cpp
+    dfg/DFGFrozenValue.cpp
+    dfg/DFGGraph.cpp
+    dfg/DFGGraphSafepoint.cpp
+    dfg/DFGHeapLocation.cpp
+    dfg/DFGInPlaceAbstractState.cpp
+    dfg/DFGInferredTypeCheck.cpp
+    dfg/DFGInsertionSet.cpp
+    dfg/DFGIntegerCheckCombiningPhase.cpp
+    dfg/DFGIntegerRangeOptimizationPhase.cpp
+    dfg/DFGInvalidationPointInjectionPhase.cpp
+    dfg/DFGJITCode.cpp
+    dfg/DFGJITCompiler.cpp
+    dfg/DFGJITFinalizer.cpp
+    dfg/DFGJumpReplacement.cpp
+    dfg/DFGLICMPhase.cpp
+    dfg/DFGLazyJSValue.cpp
+    dfg/DFGLazyNode.cpp
+    dfg/DFGLivenessAnalysisPhase.cpp
+    dfg/DFGLongLivedState.cpp
+    dfg/DFGLoopPreHeaderCreationPhase.cpp
+    dfg/DFGMaximalFlushInsertionPhase.cpp
+    dfg/DFGMayExit.cpp
+    dfg/DFGMinifiedGraph.cpp
+    dfg/DFGMinifiedNode.cpp
+    dfg/DFGMovHintRemovalPhase.cpp
+    dfg/DFGMultiGetByOffsetData.cpp
+    dfg/DFGNaturalLoops.cpp
+    dfg/DFGNode.cpp
+    dfg/DFGNodeAbstractValuePair.cpp
+    dfg/DFGNodeFlags.cpp
+    dfg/DFGNodeFlowProjection.cpp
+    dfg/DFGNodeOrigin.cpp
+    dfg/DFGOSRAvailabilityAnalysisPhase.cpp
+    dfg/DFGOSREntry.cpp
+    dfg/DFGOSREntrypointCreationPhase.cpp
+    dfg/DFGOSRExit.cpp
+    dfg/DFGOSRExitBase.cpp
+    dfg/DFGOSRExitCompiler.cpp
+    dfg/DFGOSRExitCompiler32_64.cpp
+    dfg/DFGOSRExitCompiler64.cpp
+    dfg/DFGOSRExitCompilerCommon.cpp
+    dfg/DFGOSRExitFuzz.cpp
+    dfg/DFGOSRExitJumpPlaceholder.cpp
+    dfg/DFGOSRExitPreparation.cpp
+    dfg/DFGObjectAllocationSinkingPhase.cpp
+    dfg/DFGObjectMaterializationData.cpp
+    dfg/DFGOperations.cpp
+    dfg/DFGPhantomInsertionPhase.cpp
+    dfg/DFGPhase.cpp
+    dfg/DFGPhiChildren.cpp
+    dfg/DFGPlan.cpp
+    dfg/DFGPrePostNumbering.cpp
+    dfg/DFGPredictionInjectionPhase.cpp
+    dfg/DFGPredictionPropagationPhase.cpp
+    dfg/DFGPromotedHeapLocation.cpp
+    dfg/DFGPureValue.cpp
+    dfg/DFGPutStackSinkingPhase.cpp
+    dfg/DFGRegisteredStructureSet.cpp
+    dfg/DFGSSACalculator.cpp
+    dfg/DFGSSAConversionPhase.cpp
+    dfg/DFGSSALoweringPhase.cpp
+    dfg/DFGSafepoint.cpp
+    dfg/DFGSpeculativeJIT.cpp
+    dfg/DFGSpeculativeJIT32_64.cpp
+    dfg/DFGSpeculativeJIT64.cpp
+    dfg/DFGStackLayoutPhase.cpp
+    dfg/DFGStaticExecutionCountEstimationPhase.cpp
+    dfg/DFGStoreBarrierClusteringPhase.cpp
+    dfg/DFGStoreBarrierInsertionPhase.cpp
+    dfg/DFGStrengthReductionPhase.cpp
+    dfg/DFGStructureAbstractValue.cpp
+    dfg/DFGThreadData.cpp
+    dfg/DFGThunks.cpp
+    dfg/DFGTierUpCheckInjectionPhase.cpp
+    dfg/DFGToFTLDeferredCompilationCallback.cpp
+    dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp
+    dfg/DFGTransition.cpp
+    dfg/DFGTypeCheckHoistingPhase.cpp
+    dfg/DFGUnificationPhase.cpp
+    dfg/DFGUseKind.cpp
+    dfg/DFGValidate.cpp
+    dfg/DFGValueSource.cpp
+    dfg/DFGValueStrength.cpp
+    dfg/DFGVarargsForwardingPhase.cpp
+    dfg/DFGVariableAccessData.cpp
+    dfg/DFGVariableAccessDataDump.cpp
+    dfg/DFGVariableEvent.cpp
+    dfg/DFGVariableEventStream.cpp
+    dfg/DFGVirtualRegisterAllocationPhase.cpp
+    dfg/DFGWatchpointCollectionPhase.cpp
+    dfg/DFGWorklist.cpp
+
+    disassembler/ARM64Disassembler.cpp
+    disassembler/ARMLLVMDisassembler.cpp
+    disassembler/ARMv7Disassembler.cpp
+    disassembler/Disassembler.cpp
+    disassembler/UDis86Disassembler.cpp
+    disassembler/X86Disassembler.cpp
+
+    disassembler/ARM64/A64DOpcode.cpp
+
+    disassembler/ARMv7/ARMv7DOpcode.cpp
+
+    disassembler/udis86/udis86.c
+    disassembler/udis86/udis86_decode.c
+    disassembler/udis86/udis86_itab_holder.c
+    disassembler/udis86/udis86_syn-att.c
+    disassembler/udis86/udis86_syn-intel.c
+    disassembler/udis86/udis86_syn.c
+
+    domjit/DOMJITAbstractHeap.cpp
+    domjit/DOMJITHeapRange.cpp
+
+    ftl/FTLAbstractHeap.cpp
+    ftl/FTLAbstractHeapRepository.cpp
+    ftl/FTLAvailableRecovery.cpp
+    ftl/FTLCapabilities.cpp
+    ftl/FTLCommonValues.cpp
+    ftl/FTLCompile.cpp
+    ftl/FTLDOMJITPatchpointParams.cpp
+    ftl/FTLExceptionTarget.cpp
+    ftl/FTLExitArgument.cpp
+    ftl/FTLExitArgumentForOperand.cpp
+    ftl/FTLExitPropertyValue.cpp
+    ftl/FTLExitTimeObjectMaterialization.cpp
+    ftl/FTLExitValue.cpp
+    ftl/FTLFail.cpp
+    ftl/FTLForOSREntryJITCode.cpp
+    ftl/FTLJITCode.cpp
+    ftl/FTLJITFinalizer.cpp
+    ftl/FTLLazySlowPath.cpp
+    ftl/FTLLink.cpp
+    ftl/FTLLocation.cpp
+    ftl/FTLLowerDFGToB3.cpp
+    ftl/FTLOSREntry.cpp
+    ftl/FTLOSRExit.cpp
+    ftl/FTLOSRExitCompiler.cpp
+    ftl/FTLOSRExitHandle.cpp
+    ftl/FTLOperations.cpp
+    ftl/FTLOutput.cpp
+    ftl/FTLPatchpointExceptionHandle.cpp
+    ftl/FTLRecoveryOpcode.cpp
+    ftl/FTLSaveRestore.cpp
+    ftl/FTLSlowPathCall.cpp
+    ftl/FTLSlowPathCallKey.cpp
+    ftl/FTLState.cpp
+    ftl/FTLThunks.cpp
+    ftl/FTLValueRange.cpp
+
+    heap/AllocatorAttributes.cpp
+    heap/CellContainer.cpp
+    heap/CodeBlockSet.cpp
+    heap/CollectionScope.cpp
+    heap/CollectorPhase.cpp
+    heap/ConservativeRoots.cpp
+    heap/DeferGC.cpp
+    heap/DestructionMode.cpp
+    heap/EdenGCActivityCallback.cpp
+    heap/FullGCActivityCallback.cpp
+    heap/FreeList.cpp
+    heap/GCActivityCallback.cpp
+    heap/GCConductor.cpp
+    heap/GCLogging.cpp
+    heap/HandleSet.cpp
+    heap/HandleStack.cpp
+    heap/Heap.cpp
+    heap/HeapCell.cpp
+    heap/HeapHelperPool.cpp
+    heap/HeapProfiler.cpp
+    heap/HeapSnapshot.cpp
+    heap/HeapSnapshotBuilder.cpp
+    heap/HeapTimer.cpp
+    heap/HeapVerifier.cpp
+    heap/IncrementalSweeper.cpp
+    heap/JITStubRoutineSet.cpp
+    heap/LargeAllocation.cpp
+    heap/LiveObjectList.cpp
+    heap/MachineStackMarker.cpp
+    heap/MarkStack.cpp
+    heap/MarkedAllocator.cpp
+    heap/MarkedBlock.cpp
+    heap/MarkedSpace.cpp
+    heap/MarkingConstraint.cpp
+    heap/MarkingConstraintSet.cpp
+    heap/MutatorScheduler.cpp
+    heap/MutatorState.cpp
+    heap/SlotVisitor.cpp
+    heap/SpaceTimeMutatorScheduler.cpp
+    heap/StochasticSpaceTimeMutatorScheduler.cpp
+    heap/StopIfNecessaryTimer.cpp
+    heap/Subspace.cpp
+    heap/SynchronousStopTheWorldMutatorScheduler.cpp
+    heap/VisitRaceKey.cpp
+    heap/Weak.cpp
+    heap/WeakBlock.cpp
+    heap/WeakHandleOwner.cpp
+    heap/WeakSet.cpp
+    heap/WriteBarrierSupport.cpp
+
+    inspector/AsyncStackTrace.cpp
+    inspector/ConsoleMessage.cpp
+    inspector/ContentSearchUtilities.cpp
+    inspector/EventLoop.cpp
+    inspector/IdentifiersFactory.cpp
+    inspector/InjectedScript.cpp
+    inspector/InjectedScriptBase.cpp
+    inspector/InjectedScriptHost.cpp
+    inspector/InjectedScriptManager.cpp
+    inspector/InjectedScriptModule.cpp
+    inspector/InspectorAgentRegistry.cpp
+    inspector/InspectorFrontendRouter.cpp
+    inspector/InspectorBackendDispatcher.cpp
+    inspector/InspectorValues.cpp
+    inspector/JSGlobalObjectConsoleClient.cpp
+    inspector/JSGlobalObjectInspectorController.cpp
+    inspector/JSGlobalObjectScriptDebugServer.cpp
+    inspector/JSInjectedScriptHost.cpp
+    inspector/JSInjectedScriptHostPrototype.cpp
+    inspector/JSJavaScriptCallFrame.cpp
+    inspector/JSJavaScriptCallFramePrototype.cpp
+    inspector/JavaScriptCallFrame.cpp
+    inspector/PerGlobalObjectWrapperWorld.cpp
+    inspector/ScriptArguments.cpp
+    inspector/ScriptCallFrame.cpp
+    inspector/ScriptCallStack.cpp
+    inspector/ScriptCallStackFactory.cpp
+    inspector/ScriptDebugServer.cpp
+
+    inspector/agents/InspectorAgent.cpp
+    inspector/agents/InspectorConsoleAgent.cpp
+    inspector/agents/InspectorDebuggerAgent.cpp
+    inspector/agents/InspectorHeapAgent.cpp
+    inspector/agents/InspectorRuntimeAgent.cpp
+    inspector/agents/InspectorScriptProfilerAgent.cpp
+    inspector/agents/JSGlobalObjectConsoleAgent.cpp
+    inspector/agents/JSGlobalObjectDebuggerAgent.cpp
+    inspector/agents/JSGlobalObjectRuntimeAgent.cpp
+
+    interpreter/AbstractPC.cpp
+    interpreter/CLoopStack.cpp
+    interpreter/CallFrame.cpp
+    interpreter/Interpreter.cpp
+    interpreter/ProtoCallFrame.cpp
+    interpreter/ShadowChicken.cpp
+    interpreter/StackVisitor.cpp
+
+    jit/AssemblyHelpers.cpp
+    jit/BinarySwitch.cpp
+    jit/CCallHelpers.cpp
+    jit/CachedRecovery.cpp
+    jit/CallFrameShuffleData.cpp
+    jit/CallFrameShuffler.cpp
+    jit/CallFrameShuffler32_64.cpp
+    jit/CallFrameShuffler64.cpp
+    jit/ExecutableAllocationFuzz.cpp
+    jit/ExecutableAllocator.cpp
+    jit/GCAwareJITStubRoutine.cpp
+    jit/GPRInfo.cpp
+    jit/HostCallReturnValue.cpp
+    jit/ICStats.cpp
+    jit/IntrinsicEmitter.cpp
+    jit/JIT.cpp
+    jit/JITAddGenerator.cpp
+    jit/JITArithmetic.cpp
+    jit/JITArithmetic32_64.cpp
+    jit/JITBitAndGenerator.cpp
+    jit/JITBitOrGenerator.cpp
+    jit/JITBitXorGenerator.cpp
+    jit/JITCall.cpp
+    jit/JITCall32_64.cpp
+    jit/JITCode.cpp
+    jit/JITDisassembler.cpp
+    jit/JITDivGenerator.cpp
+    jit/JITExceptions.cpp
+    jit/JITInlineCacheGenerator.cpp
+    jit/JITLeftShiftGenerator.cpp
+    jit/JITMulGenerator.cpp
+    jit/JITNegGenerator.cpp
+    jit/JITOpcodes.cpp
+    jit/JITOpcodes32_64.cpp
+    jit/JITOperations.cpp
+    jit/JITPropertyAccess.cpp
+    jit/JITPropertyAccess32_64.cpp
+    jit/JITRightShiftGenerator.cpp
+    jit/JITStubRoutine.cpp
+    jit/JITSubGenerator.cpp
+    jit/JITThunks.cpp
+    jit/JITToDFGDeferredCompilationCallback.cpp
+    jit/JITWorklist.cpp
+    jit/PCToCodeOriginMap.cpp
+    jit/PolymorphicCallStubRoutine.cpp
+    jit/Reg.cpp
+    jit/RegisterAtOffset.cpp
+    jit/RegisterAtOffsetList.cpp
+    jit/RegisterSet.cpp
+    jit/Repatch.cpp
+    jit/ScratchRegisterAllocator.cpp
+    jit/SetupVarargsFrame.cpp
+    jit/TagRegistersMode.cpp
+    jit/TempRegisterSet.cpp
+    jit/ThunkGenerators.cpp
+
+    llint/LLIntCLoop.cpp
+    llint/LLIntData.cpp
+    llint/LLIntEntrypoint.cpp
+    llint/LLIntExceptions.cpp
+    llint/LLIntSlowPaths.cpp
+    llint/LLIntThunks.cpp
+    llint/LowLevelInterpreter.cpp
+
+    parser/Lexer.cpp
+    parser/ModuleAnalyzer.cpp
+    parser/Nodes.cpp
+    parser/NodesAnalyzeModule.cpp
+    parser/Parser.cpp
+    parser/ParserArena.cpp
+    parser/SourceProvider.cpp
+    parser/SourceProviderCache.cpp
+    parser/UnlinkedSourceCode.cpp
+    parser/VariableEnvironment.cpp
+
+    profiler/ProfilerBytecode.cpp
+    profiler/ProfilerBytecodeSequence.cpp
+    profiler/ProfilerBytecodes.cpp
+    profiler/ProfilerCompilation.cpp
+    profiler/ProfilerCompilationKind.cpp
+    profiler/ProfilerCompiledBytecode.cpp
+    profiler/ProfilerDatabase.cpp
+    profiler/ProfilerEvent.cpp
+    profiler/ProfilerJettisonReason.cpp
+    profiler/ProfilerOSRExit.cpp
+    profiler/ProfilerOSRExitSite.cpp
+    profiler/ProfilerOrigin.cpp
+    profiler/ProfilerOriginStack.cpp
+    profiler/ProfilerProfiledBytecodes.cpp
+    profiler/ProfilerUID.cpp
+
+    runtime/AbstractModuleRecord.cpp
+    runtime/ArgList.cpp
+    runtime/ArrayBuffer.cpp
+    runtime/ArrayBufferNeuteringWatchpoint.cpp
+    runtime/ArrayBufferView.cpp
+    runtime/ArrayConstructor.cpp
+    runtime/ArrayConventions.cpp
+    runtime/ArrayIteratorAdaptiveWatchpoint.cpp
+    runtime/ArrayIteratorPrototype.cpp
+    runtime/ArrayPrototype.cpp
+    runtime/AtomicsObject.cpp
+    runtime/AsyncFunctionConstructor.cpp
+    runtime/AsyncFunctionPrototype.cpp
+    runtime/BasicBlockLocation.cpp
+    runtime/BooleanConstructor.cpp
+    runtime/BooleanObject.cpp
+    runtime/BooleanPrototype.cpp
+    runtime/CallData.cpp
+    runtime/CatchScope.cpp
+    runtime/ClonedArguments.cpp
+    runtime/CodeCache.cpp
+    runtime/CodeSpecializationKind.cpp
+    runtime/CommonIdentifiers.cpp
+    runtime/CommonSlowPaths.cpp
+    runtime/CommonSlowPathsExceptions.cpp
+    runtime/CompilationResult.cpp
+    runtime/Completion.cpp
+    runtime/ConsoleClient.cpp
+    runtime/ConsoleObject.cpp
+    runtime/ConstantMode.cpp
+    runtime/ConstructData.cpp
+    runtime/ControlFlowProfiler.cpp
+    runtime/CustomGetterSetter.cpp
+    runtime/DataView.cpp
+    runtime/DateConstructor.cpp
+    runtime/DateConversion.cpp
+    runtime/DateInstance.cpp
+    runtime/DatePrototype.cpp
+    runtime/DirectArguments.cpp
+    runtime/DirectArgumentsOffset.cpp
+    runtime/DirectEvalExecutable.cpp
+    runtime/DumpContext.cpp
+    runtime/ECMAScriptSpecInternalFunctions.cpp
+    runtime/Error.cpp
+    runtime/ErrorConstructor.cpp
+    runtime/ErrorHandlingScope.cpp
+    runtime/ErrorInstance.cpp
+    runtime/ErrorPrototype.cpp
+    runtime/EvalExecutable.cpp
+    runtime/Exception.cpp
+    runtime/ExceptionEventLocation.cpp
+    runtime/ExceptionFuzz.cpp
+    runtime/ExceptionHelpers.cpp
+    runtime/ExceptionScope.cpp
+    runtime/ExecutableBase.cpp
+    runtime/FunctionConstructor.cpp
+    runtime/FunctionExecutable.cpp
+    runtime/FunctionExecutableDump.cpp
+    runtime/FunctionHasExecutedCache.cpp
+    runtime/FunctionPrototype.cpp
+    runtime/FunctionRareData.cpp
+    runtime/GeneratorFunctionConstructor.cpp
+    runtime/GeneratorFunctionPrototype.cpp
+    runtime/GeneratorPrototype.cpp
+    runtime/GetterSetter.cpp
+    runtime/HashMapImpl.cpp
+    runtime/Identifier.cpp
+    runtime/IndexingType.cpp
+    runtime/IndirectEvalExecutable.cpp
+    runtime/InferredType.cpp
+    runtime/InferredTypeTable.cpp
+    runtime/InferredValue.cpp
+    runtime/InitializeThreading.cpp
+    runtime/InspectorInstrumentationObject.cpp
+    runtime/InternalFunction.cpp
+    runtime/IntlCollator.cpp
+    runtime/IntlCollatorConstructor.cpp
+    runtime/IntlCollatorPrototype.cpp
+    runtime/IntlDateTimeFormat.cpp
+    runtime/IntlDateTimeFormatConstructor.cpp
+    runtime/IntlDateTimeFormatPrototype.cpp
+    runtime/IntlNumberFormat.cpp
+    runtime/IntlNumberFormatConstructor.cpp
+    runtime/IntlNumberFormatPrototype.cpp
+    runtime/IntlObject.cpp
+    runtime/IteratorOperations.cpp
+    runtime/IteratorPrototype.cpp
+    runtime/JSAPIValueWrapper.cpp
+    runtime/JSArray.cpp
+    runtime/JSArrayBuffer.cpp
+    runtime/JSArrayBufferConstructor.cpp
+    runtime/JSArrayBufferPrototype.cpp
+    runtime/JSArrayBufferView.cpp
+    runtime/JSAsyncFunction.cpp
+    runtime/JSBoundFunction.cpp
+    runtime/JSCJSValue.cpp
+    runtime/JSCallee.cpp
+    runtime/JSCell.cpp
+    runtime/JSCustomGetterSetterFunction.cpp
+    runtime/JSDataView.cpp
+    runtime/JSDataViewPrototype.cpp
+    runtime/JSDateMath.cpp
+    runtime/JSDestructibleObjectSubspace.cpp
+    runtime/JSEnvironmentRecord.cpp
+    runtime/JSFixedArray.cpp
+    runtime/JSFunction.cpp
+    runtime/JSGeneratorFunction.cpp
+    runtime/JSGlobalLexicalEnvironment.cpp
+    runtime/JSGlobalObject.cpp
+    runtime/JSGlobalObjectDebuggable.cpp
+    runtime/JSGlobalObjectFunctions.cpp
+    runtime/JSInternalPromise.cpp
+    runtime/JSInternalPromiseConstructor.cpp
+    runtime/JSInternalPromiseDeferred.cpp
+    runtime/JSInternalPromisePrototype.cpp
+    runtime/JSJob.cpp
+    runtime/JSLexicalEnvironment.cpp
+    runtime/JSLock.cpp
+    runtime/JSMap.cpp
+    runtime/JSMapIterator.cpp
+    runtime/JSModuleEnvironment.cpp
+    runtime/JSModuleLoader.cpp
+    runtime/JSModuleNamespaceObject.cpp
+    runtime/JSModuleRecord.cpp
+    runtime/JSNativeStdFunction.cpp
+    runtime/JSONObject.cpp
+    runtime/JSObject.cpp
+    runtime/JSPromise.cpp
+    runtime/JSPromiseConstructor.cpp
+    runtime/JSPromiseDeferred.cpp
+    runtime/JSPromisePrototype.cpp
+    runtime/JSPropertyNameEnumerator.cpp
+    runtime/JSPropertyNameIterator.cpp
+    runtime/JSProxy.cpp
+    runtime/JSScope.cpp
+    runtime/JSScriptFetcher.cpp
+    runtime/JSSegmentedVariableObject.cpp
+    runtime/JSSegmentedVariableObjectSubspace.cpp
+    runtime/JSSet.cpp
+    runtime/JSSetIterator.cpp
+    runtime/JSSourceCode.cpp
+    runtime/JSString.cpp
+    runtime/JSStringIterator.cpp
+    runtime/JSStringJoiner.cpp
+    runtime/JSStringSubspace.cpp
+    runtime/JSSymbolTableObject.cpp
+    runtime/JSTemplateRegistryKey.cpp
+    runtime/JSTypedArrayConstructors.cpp
+    runtime/JSTypedArrayPrototypes.cpp
+    runtime/JSTypedArrayViewConstructor.cpp
+    runtime/JSTypedArrayViewPrototype.cpp
+    runtime/JSTypedArrays.cpp
+    runtime/JSWeakMap.cpp
+    runtime/JSWeakSet.cpp
+    runtime/JSWithScope.cpp
+    runtime/JSWrapperObject.cpp
+    runtime/LazyClassStructure.cpp
+    runtime/LiteralParser.cpp
+    runtime/Lookup.cpp
+    runtime/MapBase.cpp
+    runtime/MapConstructor.cpp
+    runtime/MapIteratorPrototype.cpp
+    runtime/MapPrototype.cpp
+    runtime/MatchResult.cpp
+    runtime/MathCommon.cpp
+    runtime/MathObject.cpp
+    runtime/MemoryStatistics.cpp
+    runtime/ModuleLoaderPrototype.cpp
+    runtime/ModuleProgramExecutable.cpp
+    runtime/NativeErrorConstructor.cpp
+    runtime/NativeErrorPrototype.cpp
+    runtime/NativeExecutable.cpp
+    runtime/NativeStdFunctionCell.cpp
+    runtime/NullGetterFunction.cpp
+    runtime/NullSetterFunction.cpp
+    runtime/NumberConstructor.cpp
+    runtime/NumberObject.cpp
+    runtime/NumberPrototype.cpp
+    runtime/ObjectConstructor.cpp
+    runtime/ObjectPrototype.cpp
+    runtime/Operations.cpp
+    runtime/Options.cpp
+    runtime/ProgramExecutable.cpp
+    runtime/PropertyDescriptor.cpp
+    runtime/PropertySlot.cpp
+    runtime/PropertyTable.cpp
+    runtime/PrototypeMap.cpp
+    runtime/ProxyConstructor.cpp
+    runtime/ProxyObject.cpp
+    runtime/ProxyRevoke.cpp
+    runtime/ReflectObject.cpp
+    runtime/RegExp.cpp
+    runtime/RegExpCache.cpp
+    runtime/RegExpCachedResult.cpp
+    runtime/RegExpConstructor.cpp
+    runtime/RegExpMatchesArray.cpp
+    runtime/RegExpObject.cpp
+    runtime/RegExpPrototype.cpp
+    runtime/RuntimeType.cpp
+    runtime/SamplingCounter.cpp
+    runtime/SamplingProfiler.cpp
+    runtime/ScopeOffset.cpp
+    runtime/ScopedArguments.cpp
+    runtime/ScopedArgumentsTable.cpp
+    runtime/ScriptExecutable.cpp
+    runtime/SetConstructor.cpp
+    runtime/SetIteratorPrototype.cpp
+    runtime/SetPrototype.cpp
+    runtime/SimpleTypedArrayController.cpp
+    runtime/SmallStrings.cpp
+    runtime/SparseArrayValueMap.cpp
+    runtime/StackFrame.cpp
+    runtime/StrictEvalActivation.cpp
+    runtime/StringConstructor.cpp
+    runtime/StringIteratorPrototype.cpp
+    runtime/StringObject.cpp
+    runtime/StringPrototype.cpp
+    runtime/StringRecursionChecker.cpp
+    runtime/Structure.cpp
+    runtime/StructureChain.cpp
+    runtime/StructureIDTable.cpp
+    runtime/StructureRareData.cpp
+    runtime/Symbol.cpp
+    runtime/SymbolConstructor.cpp
+    runtime/SymbolObject.cpp
+    runtime/SymbolPrototype.cpp
+    runtime/SymbolTable.cpp
+    runtime/TemplateRegistry.cpp
+    runtime/TemplateRegistryKey.cpp
+    runtime/TemplateRegistryKeyTable.cpp
+    runtime/TestRunnerUtils.cpp
+    runtime/ThrowScope.cpp
+    runtime/TypeLocationCache.cpp
+    runtime/TypeProfiler.cpp
+    runtime/TypeProfilerLog.cpp
+    runtime/TypeSet.cpp
+    runtime/TypedArrayController.cpp
+    runtime/TypedArrayType.cpp
+    runtime/TypeofType.cpp
+    runtime/VM.cpp
+    runtime/VMEntryScope.cpp
+    runtime/VarOffset.cpp
+    runtime/Watchdog.cpp
+    runtime/WeakMapConstructor.cpp
+    runtime/WeakMapData.cpp
+    runtime/WeakMapPrototype.cpp
+    runtime/WeakSetConstructor.cpp
+    runtime/WeakSetPrototype.cpp
+
+    tools/CodeProfile.cpp
+    tools/CodeProfiling.cpp
+    tools/FunctionOverrides.cpp
+    tools/FunctionWhitelist.cpp
+    tools/JSDollarVM.cpp
+    tools/JSDollarVMPrototype.cpp
+    tools/SigillCrashAnalyzer.cpp
+    tools/VMInspector.cpp
+
+    wasm/JSWebAssembly.cpp
+    wasm/WasmB3IRGenerator.cpp
+    wasm/WasmBinding.cpp
+    wasm/WasmCallingConvention.cpp
+    wasm/WasmFormat.cpp
+    wasm/WasmMemory.cpp
+    wasm/WasmMemoryInformation.cpp
+    wasm/WasmModuleParser.cpp
+    wasm/WasmPageCount.cpp
+    wasm/WasmPlan.cpp
+    wasm/WasmSignature.cpp
+    wasm/WasmValidate.cpp
+
+    wasm/js/JSWebAssemblyCallee.cpp
+    wasm/js/JSWebAssemblyCompileError.cpp
+    wasm/js/JSWebAssemblyInstance.cpp
+    wasm/js/JSWebAssemblyLinkError.cpp
+    wasm/js/JSWebAssemblyMemory.cpp
+    wasm/js/JSWebAssemblyModule.cpp
+    wasm/js/JSWebAssemblyRuntimeError.cpp
+    wasm/js/JSWebAssemblyTable.cpp
+    wasm/js/WebAssemblyCompileErrorConstructor.cpp
+    wasm/js/WebAssemblyCompileErrorPrototype.cpp
+    wasm/js/WebAssemblyFunction.cpp
+    wasm/js/WebAssemblyInstanceConstructor.cpp
+    wasm/js/WebAssemblyInstancePrototype.cpp
+    wasm/js/WebAssemblyLinkErrorConstructor.cpp
+    wasm/js/WebAssemblyLinkErrorPrototype.cpp
+    wasm/js/WebAssemblyMemoryConstructor.cpp
+    wasm/js/WebAssemblyMemoryPrototype.cpp
+    wasm/js/WebAssemblyModuleConstructor.cpp
+    wasm/js/WebAssemblyModulePrototype.cpp
+    wasm/js/WebAssemblyModuleRecord.cpp
+    wasm/js/WebAssemblyPrototype.cpp
+    wasm/js/WebAssemblyRuntimeErrorConstructor.cpp
+    wasm/js/WebAssemblyRuntimeErrorPrototype.cpp
+    wasm/js/WebAssemblyTableConstructor.cpp
+    wasm/js/WebAssemblyTablePrototype.cpp
+    wasm/js/WebAssemblyToJSCallee.cpp
+
+    yarr/RegularExpression.cpp
+    yarr/YarrCanonicalizeUCS2.cpp
+    yarr/YarrInterpreter.cpp
+    yarr/YarrJIT.cpp
+    yarr/YarrPattern.cpp
+    yarr/YarrSyntaxChecker.cpp
+)
+
+# Extra flags for compile sources can go here.
+if (NOT MSVC)
+    set_source_files_properties(runtime/ProxyObject.cpp PROPERTIES COMPILE_FLAGS -fno-optimize-sibling-calls)
+else ()
+    # FIXME: Investigate if we need to set a similar flag on Windows.
+endif ()
+
+set(JavaScriptCore_OBJECT_LUT_SOURCES
+    runtime/ArrayConstructor.cpp
+    runtime/ArrayIteratorPrototype.cpp
+    runtime/BooleanPrototype.cpp
+    runtime/DateConstructor.cpp
+    runtime/DatePrototype.cpp
+    runtime/ErrorPrototype.cpp
+    runtime/GeneratorPrototype.cpp
+    runtime/InspectorInstrumentationObject.cpp
+    runtime/IntlCollatorConstructor.cpp
+    runtime/IntlCollatorPrototype.cpp
+    runtime/IntlDateTimeFormatConstructor.cpp
+    runtime/IntlDateTimeFormatPrototype.cpp
+    runtime/IntlNumberFormatConstructor.cpp
+    runtime/IntlNumberFormatPrototype.cpp
+    runtime/JSDataViewPrototype.cpp
+    runtime/JSGlobalObject.cpp
+    runtime/JSInternalPromiseConstructor.cpp
+    runtime/JSONObject.cpp
+    runtime/JSPromiseConstructor.cpp
+    runtime/JSPromisePrototype.cpp
+    runtime/MapPrototype.cpp
+    runtime/ModuleLoaderPrototype.cpp
+    runtime/NumberConstructor.cpp
+    runtime/NumberPrototype.cpp
+    runtime/ObjectConstructor.cpp
+    runtime/ReflectObject.cpp
+    runtime/RegExpConstructor.cpp
+    runtime/RegExpPrototype.cpp
+    runtime/SetPrototype.cpp
+    runtime/StringConstructor.cpp
+    runtime/StringIteratorPrototype.cpp
+    runtime/StringPrototype.cpp
+    runtime/SymbolConstructor.cpp
+    runtime/SymbolPrototype.cpp
+
+    wasm/js/WebAssemblyCompileErrorConstructor.cpp
+    wasm/js/WebAssemblyCompileErrorPrototype.cpp
+    wasm/js/WebAssemblyInstanceConstructor.cpp
+    wasm/js/WebAssemblyInstancePrototype.cpp
+    wasm/js/WebAssemblyLinkErrorConstructor.cpp
+    wasm/js/WebAssemblyLinkErrorPrototype.cpp
+    wasm/js/WebAssemblyMemoryConstructor.cpp
+    wasm/js/WebAssemblyMemoryPrototype.cpp
+    wasm/js/WebAssemblyModuleConstructor.cpp
+    wasm/js/WebAssemblyModulePrototype.cpp
+    wasm/js/WebAssemblyPrototype.cpp
+    wasm/js/WebAssemblyRuntimeErrorConstructor.cpp
+    wasm/js/WebAssemblyRuntimeErrorPrototype.cpp
+    wasm/js/WebAssemblyTableConstructor.cpp
+    wasm/js/WebAssemblyTablePrototype.cpp
+)
+
+set(JavaScriptCore_LIBRARIES
+    WTF${DEBUG_SUFFIX}
+    ${ICU_I18N_LIBRARIES}
+    ${LLVM_LIBRARIES}
+)
+
+set(JavaScriptCore_SCRIPTS_SOURCES_DIR "${JAVASCRIPTCORE_DIR}/Scripts")
+
+# Globbing relies on the fact that generator-specific file names are prefixed with their directory.
+# Top-level scripts should have a file extension, since they are invoked during the build.
+
+set(JavaScriptCore_SCRIPTS_SOURCES_PATHS
+    ${JavaScriptCore_SCRIPTS_SOURCES_DIR}/*.pl
+    ${JavaScriptCore_SCRIPTS_SOURCES_DIR}/*.py
+    ${JavaScriptCore_SCRIPTS_SOURCES_DIR}/builtins/builtins*.py
+)
+
+# Force JavaScriptCore to run scripts from the same staging path as WebCore.
+set(JavaScriptCore_SCRIPTS_DIR "${DERIVED_SOURCES_DIR}/ForwardingHeaders/JavaScriptCore/Scripts")
+
+file(MAKE_DIRECTORY ${JavaScriptCore_SCRIPTS_DIR})
+
+# The directory flattening performed below mirrors what the Mac port does with private headers.
+
+file(GLOB JavaScriptCore_SCRIPTS_SOURCES ${JavaScriptCore_SCRIPTS_SOURCES_PATHS})
+
+foreach (_file ${JavaScriptCore_SCRIPTS_SOURCES})
+    get_filename_component(_script "${_file}" NAME)
+    add_custom_command(
+        OUTPUT ${JavaScriptCore_SCRIPTS_DIR}/${_script}
+        MAIN_DEPENDENCY ${_file}
+        WORKING_DIRECTORY ${DERIVED_SOURCES_DIR}
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${_file} ${JavaScriptCore_SCRIPTS_DIR}/${_script}
+        VERBATIM)
+    list(APPEND JavaScriptCore_SCRIPTS ${JavaScriptCore_SCRIPTS_DIR}/${_script})
+endforeach ()
+
+set(UDIS_GEN_DEP
+    disassembler/udis86/ud_opcode.py
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/udis86_itab.c ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/udis86_itab.h
+    DEPENDS ${UDIS_GEN_DEP}
+    WORKING_DIRECTORY ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}
+    COMMAND ${PYTHON_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/disassembler/udis86/ud_itab.py ${JAVASCRIPTCORE_DIR}/disassembler/udis86/optable.xml ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}
+    VERBATIM)
+
+list(APPEND JavaScriptCore_HEADERS
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/udis86_itab.h
+)
+
+set(LLINT_ASM
+    llint/LowLevelInterpreter.asm
+    llint/LowLevelInterpreter32_64.asm
+    llint/LowLevelInterpreter64.asm
+)
+
+set(OFFLINE_ASM
+    offlineasm/arm.rb
+    offlineasm/arm64.rb
+    offlineasm/ast.rb
+    offlineasm/backends.rb
+    offlineasm/cloop.rb
+    offlineasm/config.rb
+    offlineasm/instructions.rb
+    offlineasm/mips.rb
+    offlineasm/offsets.rb
+    offlineasm/opt.rb
+    offlineasm/parser.rb
+    offlineasm/registers.rb
+    offlineasm/risc.rb
+    offlineasm/self_hash.rb
+    offlineasm/settings.rb
+    offlineasm/transform.rb
+    offlineasm/x86.rb
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/generate-bytecode-files
+    DEPENDS ${JAVASCRIPTCORE_DIR}/generate-bytecode-files bytecode/BytecodeList.json
+    COMMAND ${PYTHON_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/generate-bytecode-files --bytecodes_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h --init_bytecodes_asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm ${JAVASCRIPTCORE_DIR}/bytecode/BytecodeList.json
+    VERBATIM)
+
+list(APPEND JavaScriptCore_HEADERS
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntDesiredOffsets.h
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/offlineasm/generate_offset_extractor.rb
+    DEPENDS ${LLINT_ASM} ${OFFLINE_ASM} ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm
+    COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/offlineasm/generate_offset_extractor.rb -I${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/ ${JAVASCRIPTCORE_DIR}/llint/LowLevelInterpreter.asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntDesiredOffsets.h
+    VERBATIM)
+
+# We add the header files directly to the ADD_EXECUTABLE call instead of setting the
+# OBJECT_DEPENDS property in LLIntOffsetsExtractor.cpp because generate_offset_extractor.rb and
+# generate-bytecode-files may not regenerate the .h files in case the hash it calculates does not change.
+# In this case, if some of the dependencies specified in the ADD_CUSTOM_COMMANDs above have
+# changed the command will always be called because the mtime of the .h files will
+# always be older than that of their dependencies.
+# Additionally, setting the OBJECT_DEPENDS property will make the .h files a Makefile
+# dependency of both LLIntOffsetsExtractor and LLIntOffsetsExtractor.cpp, so the command will
+# actually be run multiple times!
+add_executable(LLIntOffsetsExtractor
+    ${JAVASCRIPTCORE_DIR}/llint/LLIntOffsetsExtractor.cpp
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntDesiredOffsets.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h
+)
+target_link_libraries(LLIntOffsetsExtractor WTF)
+
+# The build system will execute asm.rb every time LLIntOffsetsExtractor's mtime is newer than
+# LLIntAssembly.h's mtime. The problem we have here is: asm.rb has some built-in optimization
+# that generates a checksum of the LLIntOffsetsExtractor binary, if the checksum of the new
+# LLIntOffsetsExtractor matches, no output is generated. To make this target consistent and avoid
+# running this command for every build, we artificially update LLIntAssembly.h's mtime (using touch)
+# after every asm.rb run.
+if (MSVC)
+    set(LLIntOutput LowLevelInterpreterWin.asm)
+    set(OFFLINE_ASM_ARGS --assembler=MASM)
+else ()
+    set(LLIntOutput LLIntAssembly.h)
+endif ()
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/${LLIntOutput}
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/offlineasm/asm.rb
+    DEPENDS LLIntOffsetsExtractor ${LLINT_ASM} ${OFFLINE_ASM} ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm
+    COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/offlineasm/asm.rb -I${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/ ${JAVASCRIPTCORE_DIR}/llint/LowLevelInterpreter.asm $ ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/${LLIntOutput} ${OFFLINE_ASM_ARGS}
+    COMMAND ${CMAKE_COMMAND} -E touch_nocreate ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/${LLIntOutput}
+    WORKING_DIRECTORY ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}
+    VERBATIM)
+
+# The explanation for not making LLIntAssembly.h part of the OBJECT_DEPENDS property of some of
+# the .cpp files below is similar to the one in the previous comment. However, since these .cpp
+# files are used to build JavaScriptCore itself, we can just add LLIntAssembly.h to JSC_HEADERS
+# since it is used in the add_library() call at the end of this file.
+if (MSVC)
+    enable_language(ASM_MASM)
+    list(APPEND JavaScriptCore_SOURCES
+        ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LowLevelInterpreterWin.asm
+    )
+    # Win32 needs /safeseh with assembly, but Win64 does not.
+    if (CMAKE_SIZEOF_VOID_P EQUAL 4)
+        set_source_files_properties(${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LowLevelInterpreterWin.asm
+            PROPERTIES COMPILE_FLAGS  "/safeseh"
+        )
+    endif ()
+else ()
+    list(APPEND JavaScriptCore_HEADERS
+        ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntAssembly.h
+    )
+endif ()
+
+# WebAssembly generator
+
+macro(GENERATE_PYTHON _generator _input _output)
+    add_custom_command(
+        OUTPUT ${_output}
+        MAIN_DEPENDENCY ${_generator}
+        DEPENDS ${_input}
+        COMMAND ${PYTHON_EXECUTABLE} ${_generator} ${_input} ${_output}
+        VERBATIM)
+    list(APPEND JavaScriptCore_HEADERS ${_output})
+    ADD_SOURCE_DEPENDENCIES(${_input} ${_output})
+endmacro()
+GENERATE_PYTHON(${CMAKE_CURRENT_SOURCE_DIR}/wasm/generateWasmOpsHeader.py ${CMAKE_CURRENT_SOURCE_DIR}/wasm/wasm.json ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/WasmOps.h)
+GENERATE_PYTHON(${CMAKE_CURRENT_SOURCE_DIR}/wasm/generateWasmValidateInlinesHeader.py ${CMAKE_CURRENT_SOURCE_DIR}/wasm/wasm.json ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/WasmValidateInlines.h)
+GENERATE_PYTHON(${CMAKE_CURRENT_SOURCE_DIR}/wasm/generateWasmB3IRGeneratorInlinesHeader.py ${CMAKE_CURRENT_SOURCE_DIR}/wasm/wasm.json ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/WasmB3IRGeneratorInlines.h)
+
+# LUT generator
+
+set(HASH_LUT_GENERATOR ${CMAKE_CURRENT_SOURCE_DIR}/create_hash_table)
+macro(GENERATE_HASH_LUT _input _output)
+    add_custom_command(
+        OUTPUT ${_output}
+        MAIN_DEPENDENCY ${HASH_LUT_GENERATOR}
+        DEPENDS ${_input}
+        COMMAND ${PERL_EXECUTABLE} ${HASH_LUT_GENERATOR} ${_input} > ${_output}
+        VERBATIM)
+    list(APPEND JavaScriptCore_HEADERS ${_output})
+    ADD_SOURCE_DEPENDENCIES(${_input} ${_output})
+endmacro()
+
+# GENERATOR 1-A: LUT creator
+
+foreach (_file ${JavaScriptCore_OBJECT_LUT_SOURCES})
+    get_filename_component(_name ${_file} NAME_WE)
+    GENERATE_HASH_LUT(${CMAKE_CURRENT_SOURCE_DIR}/${_file} ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/${_name}.lut.h)
+endforeach ()
+
+set(JavaScriptCore_FORWARDING_HEADERS_DIRECTORIES
+    API
+    assembler
+    bindings
+    builtins
+    bytecode
+    debugger
+    dfg
+    disassembler
+    domjit
+    heap
+    inspector
+    interpreter
+    jit
+    llint
+    parser
+    profiler
+    replay
+    runtime
+    yarr
+
+    collector/handles
+
+    inspector/agents
+    inspector/augmentable
+    inspector/remote
+)
+
+# GENERATOR 1-B: particular LUT creator (for 1 file only)
+GENERATE_HASH_LUT(${CMAKE_CURRENT_SOURCE_DIR}/parser/Keywords.table ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Lexer.lut.h)
+
+#GENERATOR: "RegExpJitTables.h": tables used by Yarr
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/RegExpJitTables.h
+    MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/create_regex_tables
+    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/create_regex_tables > ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/RegExpJitTables.h
+    VERBATIM)
+ADD_SOURCE_DEPENDENCIES(${CMAKE_CURRENT_SOURCE_DIR}/yarr/YarrPattern.cpp ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/RegExpJitTables.h)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/YarrCanonicalizeUnicode.cpp
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/generateYarrCanonicalizeUnicode
+    DEPENDS ${JAVASCRIPTCORE_DIR}/ucd/CaseFolding.txt
+    COMMAND ${PYTHON_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/generateYarrCanonicalizeUnicode ${JAVASCRIPTCORE_DIR}/ucd/CaseFolding.txt ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/YarrCanonicalizeUnicode.cpp
+    VERBATIM)
+
+list(APPEND JavaScriptCore_SOURCES
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/YarrCanonicalizeUnicode.cpp
+)
+
+#GENERATOR: "KeywordLookup.h": keyword decision tree used by the lexer
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/KeywordLookup.h
+    MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/KeywordLookupGenerator.py
+    DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/parser/Keywords.table
+    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/KeywordLookupGenerator.py ${CMAKE_CURRENT_SOURCE_DIR}/parser/Keywords.table > ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/KeywordLookup.h
+    VERBATIM)
+ADD_SOURCE_DEPENDENCIES(${CMAKE_CURRENT_SOURCE_DIR}/parser/Lexer.cpp ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/KeywordLookup.h)
+
+
+# Inspector Interfaces
+
+set(JavaScriptCore_INSPECTOR_SCRIPTS_DIR "${JAVASCRIPTCORE_DIR}/inspector/scripts")
+
+set(JavaScriptCore_INSPECTOR_PROTOCOL_SCRIPTS
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/generate-inspector-protocol-bindings.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/cpp_generator.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/cpp_generator_templates.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_js_backend_commands.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_backend_dispatcher_header.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_backend_dispatcher_implementation.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_frontend_dispatcher_header.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_frontend_dispatcher_implementation.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_protocol_types_header.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_protocol_types_implementation.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generator.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generator_templates.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/__init__.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/models.py
+)
+
+set(JavaScriptCore_INSPECTOR_DOMAINS
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/ApplicationCache.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/CSS.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Console.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/DOM.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/DOMDebugger.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/DOMStorage.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Database.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Debugger.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/GenericTypes.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Heap.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Inspector.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/LayerTree.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Network.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/OverlayTypes.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Page.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Runtime.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/ScriptProfiler.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Timeline.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Worker.json
+)
+
+if (ENABLE_INDEXED_DATABASE)
+    list(APPEND JavaScriptCore_INSPECTOR_DOMAINS
+        ${JAVASCRIPTCORE_DIR}/inspector/protocol/IndexedDB.json
+    )
+endif ()
+
+if (ENABLE_RESOURCE_USAGE)
+    list(APPEND JavaScriptCore_INSPECTOR_DOMAINS
+        ${JAVASCRIPTCORE_DIR}/inspector/protocol/Memory.json
+    )
+endif ()
+
+if (ENABLE_WEB_REPLAY)
+    list(APPEND JavaScriptCore_INSPECTOR_DOMAINS
+        ${JAVASCRIPTCORE_DIR}/inspector/protocol/Replay.json
+    )
+endif ()
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/CombinedDomains.json
+    MAIN_DEPENDENCY ${JavaScriptCore_SCRIPTS_DIR}/generate-combined-inspector-json.py
+    DEPENDS ${JavaScriptCore_INSPECTOR_DOMAINS}
+    COMMAND ${PYTHON_EXECUTABLE} ${JavaScriptCore_SCRIPTS_DIR}/generate-combined-inspector-json.py ${JavaScriptCore_INSPECTOR_DOMAINS} > ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/CombinedDomains.json
+    VERBATIM)
+
+# Inspector Backend Dispatchers, Frontend Dispatchers, Type Builders
+file(MAKE_DIRECTORY ${DERIVED_SOURCES_WEBINSPECTORUI_DIR}/UserInterface/Protocol)
+file(MAKE_DIRECTORY ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector)
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorBackendDispatchers.cpp
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorBackendDispatchers.h
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorFrontendDispatchers.cpp
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorFrontendDispatchers.h
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorProtocolObjects.cpp
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorProtocolObjects.h
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorBackendCommands.js
+    MAIN_DEPENDENCY ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/CombinedDomains.json
+    DEPENDS ${JavaScriptCore_INSPECTOR_PROTOCOL_SCRIPTS}
+    COMMAND ${PYTHON_EXECUTABLE} ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/generate-inspector-protocol-bindings.py --outputDir "${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector" --framework JavaScriptCore ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/CombinedDomains.json
+    VERBATIM)
+
+# JSCBuiltins
+
+set(BUILTINS_GENERATOR_SCRIPTS
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generator.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_model.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_templates.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_combined_header.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_combined_implementation.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_separate_header.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_separate_implementation.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_wrapper_header.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_wrapper_implementation.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_internals_wrapper_header.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_internals_wrapper_implementation.py
+    ${JavaScriptCore_SCRIPTS_DIR}/generate-js-builtins.py
+    ${JavaScriptCore_SCRIPTS_DIR}/lazywriter.py
+)
+
+set(JavaScriptCore_BUILTINS_SOURCES
+    ${JAVASCRIPTCORE_DIR}/builtins/ArrayConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/ArrayIteratorPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/ArrayPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/AsyncFunctionPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/DatePrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/FunctionPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/GeneratorPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/GlobalObject.js
+    ${JAVASCRIPTCORE_DIR}/builtins/GlobalOperations.js
+    ${JAVASCRIPTCORE_DIR}/builtins/InspectorInstrumentationObject.js
+    ${JAVASCRIPTCORE_DIR}/builtins/InternalPromiseConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/IteratorHelpers.js
+    ${JAVASCRIPTCORE_DIR}/builtins/IteratorPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/MapPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/ModuleLoaderPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/NumberConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/NumberPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/ObjectConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/PromiseConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/PromiseOperations.js
+    ${JAVASCRIPTCORE_DIR}/builtins/PromisePrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/ReflectObject.js
+    ${JAVASCRIPTCORE_DIR}/builtins/RegExpPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/SetPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/StringConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/StringIteratorPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/StringPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/TypedArrayConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/TypedArrayPrototype.js
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/AirOpcode.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/AirOpcodeGenerated.h
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/b3/air/AirOpcode.opcodes
+    DEPENDS ${JAVASCRIPTCORE_DIR}/b3/air/opcode_generator.rb
+    COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/b3/air/opcode_generator.rb ${JAVASCRIPTCORE_DIR}/b3/air/AirOpcode.opcodes VERBATIM
+    WORKING_DIRECTORY ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}
+)
+
+list(APPEND JavaScriptCore_SOURCES
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/AirOpcode.h
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/AirOpcodeGenerated.h
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSCBuiltins.cpp ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSCBuiltins.h
+    MAIN_DEPENDENCY ${JavaScriptCore_SCRIPTS_DIR}/generate-js-builtins.py
+    DEPENDS ${JavaScriptCore_BUILTINS_SOURCES} ${BUILTINS_GENERATOR_SCRIPTS}
+    COMMAND ${PYTHON_EXECUTABLE} ${JavaScriptCore_SCRIPTS_DIR}/generate-js-builtins.py --framework JavaScriptCore --output-directory ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR} --combined ${JavaScriptCore_BUILTINS_SOURCES}
+    VERBATIM)
+
+list(APPEND JavaScriptCore_SOURCES
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorBackendDispatchers.cpp
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorFrontendDispatchers.cpp
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorProtocolObjects.cpp
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSCBuiltins.cpp
+)
+
+list(APPEND JavaScriptCore_HEADERS
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorBackendDispatchers.h
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorFrontendDispatchers.h
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorProtocolObjects.h
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSCBuiltins.h
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.min.js
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/inspector/InjectedScriptSource.js
+    DEPENDS ${JavaScriptCore_SCRIPTS_DIR}/xxd.pl ${JavaScriptCore_SCRIPTS_DIR}/jsmin.py
+    COMMAND ${CMAKE_COMMAND} -E echo "//# sourceURL=__InjectedScript_InjectedScriptSource.js" > ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.min.js
+    COMMAND ${PYTHON_EXECUTABLE} ${JavaScriptCore_SCRIPTS_DIR}/jsmin.py < ${JAVASCRIPTCORE_DIR}/inspector/InjectedScriptSource.js >> ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.min.js
+    COMMAND ${PERL_EXECUTABLE} ${JavaScriptCore_SCRIPTS_DIR}/xxd.pl InjectedScriptSource_js ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.min.js ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.h
+    VERBATIM)
+
+list(APPEND JavaScriptCore_HEADERS ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.h)
+
+# Web Replay inputs generator
+if (ENABLE_WEB_REPLAY)
+    set(JavaScript_WEB_REPLAY_INPUTS ${CMAKE_CURRENT_SOURCE_DIR}/replay/JSInputs.json)
+    add_custom_command(
+        OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSReplayInputs.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSReplayInputs.cpp
+        MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/replay/scripts/CodeGeneratorReplayInputs.py
+        DEPENDS ${JavaScript_WEB_REPLAY_INPUTS}
+        COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/replay/scripts/CodeGeneratorReplayInputs.py --outputDir ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/ --framework JavaScriptCore ${JavaScript_WEB_REPLAY_INPUTS}
+        VERBATIM)
+
+    list(APPEND JavaScriptCore_SOURCES
+        replay/EncodedValue.cpp
+        ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSReplayInputs.cpp
+    )
+    list(APPEND JavaScriptCore_HEADERS ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSReplayInputs.h)
+endif ()
+
+if (WTF_CPU_ARM)
+elseif (WTF_CPU_ARM64)
+elseif (WTF_CPU_HPPA)
+elseif (WTF_CPU_PPC)
+elseif (WTF_CPU_PPC64)
+elseif (WTF_CPU_PPC64LE)
+elseif (WTF_CPU_S390)
+elseif (WTF_CPU_S390X)
+elseif (WTF_CPU_MIPS)
+elseif (WTF_CPU_SH4)
+elseif (WTF_CPU_X86)
+elseif (WTF_CPU_X86_64)
+    if (MSVC AND ENABLE_JIT)
+        add_custom_command(
+            OUTPUT ${DERIVED_SOURCES_DIR}/JITStubsMSVC64.obj
+            MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/jit/JITStubsMSVC64.asm
+            COMMAND ml64 -nologo -c -Fo ${DERIVED_SOURCES_DIR}/JITStubsMSVC64.obj ${JAVASCRIPTCORE_DIR}/jit/JITStubsMSVC64.asm
+            VERBATIM)
+
+        list(APPEND JavaScriptCore_SOURCES ${DERIVED_SOURCES_DIR}/JITStubsMSVC64.obj)
+    endif ()
+else ()
+    message(FATAL_ERROR "Unknown CPU")
+endif ()
+
+
+WEBKIT_INCLUDE_CONFIG_FILES_IF_EXISTS()
+
+WEBKIT_CREATE_FORWARDING_HEADERS(JavaScriptCore DIRECTORIES ${JavaScriptCore_FORWARDING_HEADERS_DIRECTORIES} FILES ${JavaScriptCore_FORWARDING_HEADERS_FILES})
+
+target_include_directories(LLIntOffsetsExtractor PRIVATE ${JavaScriptCore_INCLUDE_DIRECTORIES})
+
+add_subdirectory(shell)
+
+WEBKIT_WRAP_SOURCELIST(${JavaScriptCore_SOURCES})
+WEBKIT_FRAMEWORK(JavaScriptCore)
+
+if (NOT "${PORT}" STREQUAL "Mac")
+    if (${JavaScriptCore_LIBRARY_TYPE} STREQUAL "SHARED")
+        POPULATE_LIBRARY_VERSION(JAVASCRIPTCORE)
+        set_target_properties(JavaScriptCore PROPERTIES VERSION ${JAVASCRIPTCORE_VERSION} SOVERSION ${JAVASCRIPTCORE_VERSION_MAJOR})
+        install(TARGETS JavaScriptCore DESTINATION "${LIB_INSTALL_DIR}")
+    endif ()
+endif ()
+
+# Force staging of shared scripts, even if they aren't directly used to build JavaScriptCore.
+
+add_custom_target(stageSharedScripts DEPENDS ${JavaScriptCore_SCRIPTS})
+add_dependencies(JavaScriptCore stageSharedScripts)
+
+if (MSVC)
+    add_custom_command(
+        TARGET JavaScriptCore
+        PRE_BUILD
+        COMMAND ${PERL_EXECUTABLE} ${WEBKIT_LIBRARIES_DIR}/tools/scripts/auto-version.pl ${DERIVED_SOURCES_DIR}
+        VERBATIM)
+
+    add_custom_command(
+        TARGET JavaScriptCore
+        POST_BUILD
+        COMMAND ${PERL_EXECUTABLE} ${WEBKIT_LIBRARIES_DIR}/tools/scripts/version-stamp.pl ${DERIVED_SOURCES_DIR} $
+        VERBATIM)
+endif ()
diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
deleted file mode 100644
index 84874e29c..000000000
--- a/Source/JavaScriptCore/ChangeLog
+++ /dev/null
@@ -1,16275 +0,0 @@
-2016-03-12  Tomas Popela  
-
-        [CLoop] - Fix CLoop on the 32-bit Big-Endians
-        https://bugs.webkit.org/show_bug.cgi?id=137020
-
-        Reviewed by Mark Lam.
-
-        * llint/LowLevelInterpreter.asm:
-        * llint/LowLevelInterpreter32_64.asm:
-
-2016-03-12  Tomas Popela  
-
-        LLINT op_put_to_scope and op_get_from_scope should use loadpFromInstruction to get operand from instruction
-        https://bugs.webkit.org/show_bug.cgi?id=132333
-
-        Unreviewed.
-
-        When loading operand variable from instruction in
-        _llint_op_get_from_scope and _llint_op_put_to_scope use
-        loadpFromInstruction instead of loadisFromInstruction. Also when
-        saving the operand in LLIntSlowPaths.cpp use the same way as in
-        CodeBlock.cpp.
-
-        * llint/LLIntSlowPaths.cpp:
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-
-2016-03-10  Carlos Garcia Campos  
-
-        Fix the build in Windows.
-
-        Reviewed by Alberto Garcia.
-
-        * interpreter/JSStack.cpp:
-        (JSC::commitSize):
-
-2015-12-07  Alberto Garcia  
-
-        Crashes on PPC64 due to mprotect() on address not aligned to the page size
-        https://bugs.webkit.org/show_bug.cgi?id=130237
-
-        Reviewed by Mark Lam.
-
-        Make sure that commitSize is at least as big as the page size.
-
-        * interpreter/JSStack.cpp:
-        (JSC::commitSize):
-        (JSC::JSStack::JSStack):
-        (JSC::JSStack::growSlowCase):
-        * interpreter/JSStack.h:
-
-2014-04-09  Mark Lam  
-
-        Ensure that LLINT accessing of the ProtoCallFrame is big endian friendly.
-        
-
-        Reviewed by Mark Hahnenberg.
-
-        Change ProtoCallFrame::paddedArgCount to be of type uint32_t.  The argCount
-        that it pads is of type int anyway.  It doesn't need to be 64 bit.  This
-        also makes it work with the LLINT which is loading it with a loadi
-        instruction.
-
-        We should add the PayLoadOffset to ProtoCallFrame::argCountAndCodeOriginValue
-        when loading the argCount.
-
-        * interpreter/ProtoCallFrame.h:
-        (JSC::ProtoCallFrame::setPaddedArgCount):
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-
-2015-01-12  Geoffrey Garen  
-
-        Out of bounds read in IdentifierArena::makeIdentifier
-        https://bugs.webkit.org/show_bug.cgi?id=140376
-
-        Patch by Alexey Proskuryakov.
-
-        Reviewed and ChangeLogged by Geoffrey Garen.
-
-        No test, since this is a small past-the-end read, which is very
-        difficult to turn into a reproducible failing test -- and existing tests
-        crash reliably using ASan.
-
-        * parser/ParserArena.h:
-        (JSC::IdentifierArena::makeIdentifier):
-        (JSC::IdentifierArena::makeIdentifierLCharFromUChar): Check for a
-        zero-length string input, like we do in the literal parser, since it is
-        not valid to dereference characters in a zero-length string.
-
-        A zero-length string is allowed in JavaScript -- for example, "".
-
-2015-04-24  Matthew Mirman  
-
-        Made Object.prototype.__proto__ native getter and setter check that this object not null or undefined
-        https://bugs.webkit.org/show_bug.cgi?id=141865
-        rdar://problem/19927273
-
-        Reviewed by Filip Pizlo.
-
-        * runtime/JSGlobalObjectFunctions.cpp:
-        (JSC::globalFuncProtoGetter):
-        (JSC::globalFuncProtoSetter):
-
-2015-04-17  Milan Crha  
-
-        [GTK][Stable] Missing implementation of callToJavaScript/callToNativeFunction with msys/mingw32
-        https://bugs.webkit.org/show_bug.cgi?id=132856
-
-        * jit/JITStubsX86.h:
-
-2015-01-06  Philip Chimento  
-
-        webkit-gtk 2.3.3 fails to build on OS X - Conflicting type "Fixed"
-        https://bugs.webkit.org/show_bug.cgi?id=126433
-
-        Don't include CoreFoundation.h when building the GTK port.
-
-        * Source/JavaScriptCore/API/WebKitAvailability.h: Add
-        !defined(BUILDING_GTK__) to defined(__APPLE__).
-
-2015-03-09  Mark Lam  
-
-        8-bit version of weakCompareAndSwap() can cause an infinite loop.
-        https://webkit.org/b/142513>
-
-        Reviewed by Filip Pizlo.
-
-        Added a test that exercises the 8-bit CAS from multiple threads.  The threads
-        will contend to set bits in a large array of bytes using the CAS function.
-
-        * API/tests/CompareAndSwapTest.cpp: Added.
-        (Bitmap::Bitmap):
-        (Bitmap::numBits):
-        (Bitmap::clearAll):
-        (Bitmap::concurrentTestAndSet):
-        (setBitThreadFunc):
-        (testCompareAndSwap):
-        * API/tests/testapi.c:
-        (main):
-        * JavaScriptCore.vcxproj/testapi/testapi.vcxproj:
-        * JavaScriptCore.vcxproj/testapi/testapi.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-
-2014-12-04  Oliver Hunt  
-
-        Serialization of MapData object provides unsafe access to internal types
-        https://bugs.webkit.org/show_bug.cgi?id=138653
-
-        Reviewed by Geoffrey Garen.
-
-        Converting these ASSERTs into RELEASE_ASSERTs, as it is now obvious
-        that despite trying hard to be safe in all cases it's simply to easy
-        to use an iterator in an unsafe state.
-
-        * runtime/MapData.h:
-        (JSC::MapData::const_iterator::key):
-        (JSC::MapData::const_iterator::value):
-
-2014-10-27  Mark Lam  
-
-        Crash when attempting to perform array iteration on a non-array with numeric keys not initialized.
-        
-
-        Reviewed by Geoffrey Garen.
-
-        The arrayIteratorNextThunkGenerator() thunk was not checking for the case where
-        the butterfly may be NULL.  This was the source of the crash, and is now fixed.
-
-        In addition, it is also not checking for the case where a property named "length"
-        may have been set on the iterated object.  The thunk only checks the butterfly's
-        publicLength for its iteration operation.  Array objects will work fine with this
-        because it always updates its butterfly's publicLength when its length changes.
-        In the case of iterable non-Array objects, the "length" property will require a
-        look up outside of the scope of this thunk.  The fix is simply to limit the fast
-        case checks in this thunk to Array objects.
-
-        * jit/ThunkGenerators.cpp:
-        (JSC::arrayIteratorNextThunkGenerator):
-
-2014-10-22  Byungseon Shin  
-
-        String(new Date(Mar 30 2014 01:00:00)) is wrong in CET
-        https://bugs.webkit.org/show_bug.cgi?id=130967
-
-        Reviewed by Mark Lam.
-
-        By definition of calculateLocalTimeOffset, input time should be UTC time.
-        But there are many cases when input time is based on local time.
-        So, it gives erroneous results while calculating offset of DST boundary time.
-        By adding a argument to distinguish UTC and local time, we can get the correct offset.
-
-        * JavaScriptCore.order:
-        * runtime/DateConstructor.cpp:
-        (JSC::constructDate):
-        (JSC::callDate):
-        (JSC::dateUTC):
-        * runtime/DateInstance.cpp:
-        (JSC::DateInstance::calculateGregorianDateTime):
-        (JSC::DateInstance::calculateGregorianDateTimeUTC):
-        * runtime/DatePrototype.cpp:
-        (JSC::setNewValueFromTimeArgs):
-        (JSC::setNewValueFromDateArgs):
-        (JSC::dateProtoFuncSetMilliSeconds):
-        (JSC::dateProtoFuncSetUTCMilliseconds):
-        (JSC::dateProtoFuncSetSeconds):
-        (JSC::dateProtoFuncSetUTCSeconds):
-        (JSC::dateProtoFuncSetMinutes):
-        (JSC::dateProtoFuncSetUTCMinutes):
-        (JSC::dateProtoFuncSetHours):
-        (JSC::dateProtoFuncSetUTCHours):
-        (JSC::dateProtoFuncSetDate):
-        (JSC::dateProtoFuncSetUTCDate):
-        (JSC::dateProtoFuncSetMonth):
-        (JSC::dateProtoFuncSetUTCMonth):
-        (JSC::dateProtoFuncSetFullYear):
-        (JSC::dateProtoFuncSetUTCFullYear):
-        (JSC::dateProtoFuncSetYear):
-        * runtime/JSDateMath.cpp:
-        (JSC::localTimeOffset):
-        (JSC::gregorianDateTimeToMS):
-        (JSC::msToGregorianDateTime):
-        (JSC::parseDateFromNullTerminatedCharacters):
-        * runtime/JSDateMath.h:
-        * runtime/VM.h:
-        (JSC::LocalTimeOffsetCache::LocalTimeOffsetCache):
-        (JSC::LocalTimeOffsetCache::reset):
-        Passing TimeType argument to distingush UTC time and local time.
-
-2014-08-19  Magnus Granberg  
-
-        TEXTREL in libjavascriptcoregtk-1.0.so.0.11.0 on x86 (or i586)
-        https://bugs.webkit.org/show_bug.cgi?id=70610
-
-        Reviewed by Darin Adler.
-
-        Setup %ebx so we can use the plt.
-
-        * jit/ThunkGenerators.cpp:
-
-2014-10-06  Philip Chimento  
-
-        webkit-gtk fails to link JavaScriptCore, missing symbols add_history and readline
-        https://bugs.webkit.org/show_bug.cgi?id=127059
-
-        Reviewed by Carlos Garcia Campos.
-
-        * GNUmakefile.am: add -ledit to jsc link args on OS_DARWIN.
-
-2014-04-15  Filip Pizlo  
-
-        Unreviewed, add the obvious thing that marks MakeRope as exiting since it can exit.
-
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::AbstractInterpreter::executeEffects):
-
-2014-08-18  Diego Pino Garcia  
-
-        Completed iterator can be revived by adding more than one new entry to the target object
-        https://bugs.webkit.org/show_bug.cgi?id=129993
-
-        Reviewed by Oliver Hunt.
-
-        When iterator reaches end, finish iterator.
-
-        * runtime/JSMapIterator.h:
-        (JSC::JSMapIterator::finish):
-        * runtime/JSSetIterator.h:
-        (JSC::JSSetIterator::finish):
-        * runtime/MapData.h:
-        (JSC::MapData::const_iterator::finish): set index of iterator to max
-        Int32.
-        * runtime/MapIteratorPrototype.cpp:
-        (JSC::MapIteratorPrototypeFuncNext):
-        * runtime/SetIteratorPrototype.cpp:
-        (JSC::SetIteratorPrototypeFuncNext):
-
-2014-07-04  Carlos Garcia Campos  
-
-        [GTK] [Stable] Crash while playing a video
-        https://bugs.webkit.org/show_bug.cgi?id=133940
-
-        Unreviewed. Rolling out r160688.
-
-        * jit/CCallHelpers.h:
-        * jit/Repatch.cpp:
-        (JSC::generateProtoChainAccessStub):
-        (JSC::tryBuildGetByIDList):
-
-2014-05-19  Mark Lam  
-
-        operationOptimize() should defer the GC for a while.
-        
-
-        Reviewed by Filip Pizlo.
-
-        Currently, operationOptimize() only defers the GC until its end.  As a result,
-        a GC may be triggered just before we return from operationOptimize(), and it may
-        jettison the optimize codeBlock that we're planning to OSR enter into when we
-        return from this function.  This is because the OSR entry on-ramp code hasn't
-        been executed yet, and hence, there is not yet a reference to this new codeBlock
-        from the stack, and there won't be until we've had a chance to return out of
-        operationOptimize() to run the OSR entry on-ramp code.
-
-        This issue is now fixed by using DeferGCForAWhile instead of DeferGC.  This
-        ensures that the GC will be deferred until after the OSR entry on-ramp can be
-        executed.
-
-        * jit/JITOperations.cpp:
-
-2014-05-09  Alberto Garcia  
-
-        jsmin.py license header confusing, mentions non-free license
-        https://bugs.webkit.org/show_bug.cgi?id=123665
-
-        Reviewed by Darin Adler.
-
-        Pull the most recent version from upstream, which has a clear
-        license.
-
-        * inspector/scripts/jsmin.py:
-
-2014-04-19  Filip Pizlo  
-
-        Make it easier to check if an integer sum would overflow
-        https://bugs.webkit.org/show_bug.cgi?id=131900
-
-        Reviewed by Darin Adler.
-
-        * dfg/DFGOperations.cpp:
-        * runtime/Operations.h:
-        (JSC::jsString):
-
-2014-04-19  Filip Pizlo  
-
-        Address some feedback on https://bugs.webkit.org/show_bug.cgi?id=130684.
-
-        * dfg/DFGOperations.cpp:
-        * runtime/JSString.h:
-        (JSC::JSRopeString::RopeBuilder::append):
-
-2014-04-15  Filip Pizlo  
-
-        compileMakeRope does not emit necessary bounds checks
-        https://bugs.webkit.org/show_bug.cgi?id=130684
-        
-
-        Reviewed by Oliver Hunt.
-        
-        Add string length bounds checks in a bunch of places. We should never allow a string
-        to have a length greater than 2^31-1 because it's not clear that the language has
-        semantics for it and because there is code that assumes that this cannot happen.
-        
-        Also add a bunch of tests to that effect to cover the various ways in which this was
-        previously allowed to happen.
-
-        * dfg/DFGOperations.cpp:
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileMakeRope):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileMakeRope):
-        * runtime/JSString.cpp:
-        (JSC::JSRopeString::RopeBuilder::expand):
-        * runtime/JSString.h:
-        (JSC::JSString::create):
-        (JSC::JSRopeString::RopeBuilder::append):
-        (JSC::JSRopeString::RopeBuilder::release):
-        (JSC::JSRopeString::append):
-        * runtime/Operations.h:
-        (JSC::jsString):
-        (JSC::jsStringFromRegisterArray):
-        (JSC::jsStringFromArguments):
-        * runtime/StringPrototype.cpp:
-        (JSC::stringProtoFuncIndexOf):
-        (JSC::stringProtoFuncSlice):
-        (JSC::stringProtoFuncSubstring):
-        (JSC::stringProtoFuncToLowerCase):
-        * tests/stress/make-large-string-jit-strcat.js: Added.
-        (foo):
-        * tests/stress/make-large-string-jit.js: Added.
-        (foo):
-        * tests/stress/make-large-string-strcat.js: Added.
-        * tests/stress/make-large-string.js: Added.
-
-2014-03-12  Mark Lam  
-
-        Update type of local vars to match the type of String length.
-        
-
-        Reviewed by Geoffrey Garen.
-
-        * runtime/JSStringJoiner.cpp:
-        (JSC::JSStringJoiner::join):
-
-2014-02-01  Michael Saboff  
-
-        REGRESSION (r163027?): CrashTracer: [USER] com.apple.WebKit.WebContent.Development at com.apple.JavaScriptCore: JSC::ArrayProfile::computeUpdatedPrediction + 4
-        https://bugs.webkit.org/show_bug.cgi?id=128037
-
-        Reviewed by Mark Lam.
-
-        op_call_varargs ops now needs an ArrayProfile since DFG inlines these since
-        change set r162739.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::emitCallVarargs):
-
-2014-01-30  Zan Dobersek  
-
-        [GTK] Only disable -ftree-dce optimization when compiling with GCC
-        https://bugs.webkit.org/show_bug.cgi?id=127911
-
-        Reviewed by Carlos Garcia Campos.
-
-        * GNUmakefile.am: Only disable the -ftree-dce optimization when using the GCC compiler.
-        Some Clang versions/configurations don't support the flag.
-
-2014-01-30  Zan Dobersek  
-
-        [GTK] Disable optimizations for JSC that turned out malignant after jsCStack branch merge
-        https://bugs.webkit.org/show_bug.cgi?id=127909
-
-        Reviewed by Carlos Garcia Campos.
-
-        * GNUmakefile.am: Disable the -fomit-frame-pointer optimization to achieve proper register usage
-        in operationCallEval. Disable the -ftree-dce optimization since it is causing additional failures
-        when using GCC 4.8, possibly due to a bug in the compiler itself.
-
-2014-01-29  Csaba Osztrogonác  
-
-        Remove ENABLE(JAVASCRIPT_DEBUGGER) leftovers
-        https://bugs.webkit.org/show_bug.cgi?id=127845
-
-        Reviewed by Joseph Pecoraro.
-
-        * Configurations/FeatureDefines.xcconfig:
-
-2014-01-29  Csaba Osztrogonác  
-
-        Remove ENABLE(JAVASCRIPT_DEBUGGER) guards
-        https://bugs.webkit.org/show_bug.cgi?id=127840
-
-        Reviewed by Mark Lam.
-
-        * inspector/scripts/CodeGeneratorInspector.py:
-
-2014-01-28  Commit Queue  
-
-        Unreviewed, rolling out r162987.
-        http://trac.webkit.org/changeset/162987
-        https://bugs.webkit.org/show_bug.cgi?id=127825
-
-        Broke Mountain Lion build (Requested by andersca on #webkit).
-
-        * inspector/InjectedScriptSource.js:
-        (.):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::getOwnPropertyDescriptor):
-        * runtime/PropertyDescriptor.cpp:
-        * runtime/PropertyDescriptor.h:
-        * runtime/PropertySlot.h:
-
-2014-01-28  Oliver Hunt  
-
-        Make DOM attributes appear to be faux accessor properties
-        https://bugs.webkit.org/show_bug.cgi?id=127797
-
-        Reviewed by Michael Saboff.
-
-        Add flag so we can identify which properties should have the old
-        custom property semantics vs. the new faux accessors. Update the
-        inspector protocol accordingly.
-
-        These faux accessors produce descriptors with "get" and "set"
-        properties, but both values are undefined so can't be used
-        directly. A few custom properties actually require their
-        existing magical behaviour, so we now have a flag to 
-        distinguish the expected output.
-
-        * inspector/InjectedScriptSource.js:
-        (.):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::getOwnPropertyDescriptor):
-        * runtime/PropertyDescriptor.cpp:
-        (JSC::PropertyDescriptor::setCustomDescriptor):
-        * runtime/PropertyDescriptor.h:
-        * runtime/PropertySlot.h:
-
-2014-01-28  Mark Lam  
-
-        Remove some unneeded debugger code.
-        https://bugs.webkit.org/show_bug.cgi?id=127805.
-
-        Reviewed by Oliver Hunt.
-
-        JSC will now always support the debugger. Hence, the #if ENABLE(JAVASCRIPT_DEBUGGER)
-        checks can be removed.
-
-        DebuggerCallFrame::callFrame() is also unused and will be removed.
-
-        * debugger/Breakpoint.h:
-        * debugger/Debugger.cpp:
-        * debugger/DebuggerCallFrame.h:
-        * inspector/InjectedScript.cpp:
-        (Inspector::InjectedScript::wrapCallFrames):
-        * inspector/InjectedScript.h:
-        * inspector/JSGlobalObjectScriptDebugServer.cpp:
-        * inspector/JSGlobalObjectScriptDebugServer.h:
-        * inspector/JSJavaScriptCallFrame.cpp:
-        * inspector/JSJavaScriptCallFrame.h:
-        * inspector/JSJavaScriptCallFramePrototype.cpp:
-        * inspector/JSJavaScriptCallFramePrototype.h:
-        * inspector/JavaScriptCallFrame.cpp:
-        * inspector/JavaScriptCallFrame.h:
-        * inspector/ScriptDebugListener.h:
-        * inspector/ScriptDebugServer.cpp:
-        * inspector/ScriptDebugServer.h:
-        * inspector/agents/InspectorDebuggerAgent.cpp:
-        * inspector/agents/InspectorDebuggerAgent.h:
-        * inspector/agents/InspectorRuntimeAgent.cpp:
-        (Inspector::InspectorRuntimeAgent::InspectorRuntimeAgent):
-        (Inspector::setPauseOnExceptionsState):
-        (Inspector::InspectorRuntimeAgent::evaluate):
-        (Inspector::InspectorRuntimeAgent::callFunctionOn):
-        (Inspector::InspectorRuntimeAgent::getProperties):
-        * inspector/agents/InspectorRuntimeAgent.h:
-
-2014-01-28  Geoffrey Garen  
-
-        REGRESSION: JavascriptCore crash during OS Installation (due to
-        Heap::m_operationInProgress ASSERT vs DelayedReleaseScope)
-        https://bugs.webkit.org/show_bug.cgi?id=127793
-
-        Reviewed by Mark Hahnenberg.
-
-        This was a mistaken ASSERT.
-
-        * API/tests/testapi.mm:
-        (-[EvilAllocationObject doEvilThingsWithContext:]): Added a test to verify
-        that GC from a DelayedReleaseScope doesn't crash.
-
-        * heap/DelayedReleaseScope.h:
-        (JSC::DelayedReleaseScope::~DelayedReleaseScope): Our contract is that
-        it is valid to do anything while running a DelayedReleaseScope -dealloc
-        method, so the Heap must be ready for new allocations and collections.
-
-        Change the Heap's operationInProgress value to NoOperation while running
-        -dealloc methods, so that it doesn't ASSERT in the face of new allocations
-        and collections.
-
-        * heap/Heap.h: Made DelayedReleaseScope a friend because exposing a setter
-        for m_operationInProgress seemed like the worse of the two options for
-        encapsulation: we don't really want arbitrary clients to set the Heap's
-        m_operationInProgress.
-
-2014-01-28  Mark Lam  
-
-        Jettison DFG code when neither breakpoints or the profiler are active.
-        
-
-        Reviewed by Geoffrey Garen.
-
-        We need to jettison the DFG CodeBlocks under the following circumstances:
-        1. When adding breakpoints to a CodeBlock, jettison it if it is a DFG CodeBlock.
-        2. When enabling stepping mode in a CodeBlock, jettison it if it a DFG CodeBlock.
-        3. When settign the enabled profiler in the VM, we need to jettison all DFG
-           CodeBlocks.
-
-        Instead of emitting speculation checks, the DFG code will now treat Breakpoint,
-        ProfileWillCall, and ProfileDidCall as no-ops similar to a Phantom node. We
-        still need to track these nodes so that they match the corresponding opcodes
-        in the baseline JIT when we jettison and OSR exit. Without them, we would OSR
-        exit to the wrong location in the baseline JIT code.
-
-        In DFGDriver's compileImpl() and DFGPlan's finalizeWithoutNotifyingCallback()
-        we fail the compilation effort with a CompilationInvalidated result. This allows
-        the DFG compiler to re-attampt the compilation of the function after some time
-        if it is hot. The CompilationInvalidated result is supposed to cause the DFG
-        to exercise an exponential back off before re-attempting compilation again
-        (see runtime/CompilationResult.h).
-
-        This patch improves the Octane score from ~2950 to ~3067.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::addBreakpoint):
-        (JSC::CodeBlock::setSteppingMode):
-        * bytecode/CodeBlock.h:
-        * debugger/Debugger.h:
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::AbstractInterpreter::executeEffects):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGDriver.cpp:
-        (JSC::DFG::compileImpl):
-        * dfg/DFGPlan.cpp:
-        (JSC::DFG::Plan::finalizeWithoutNotifyingCallback):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * profiler/LegacyProfiler.cpp:
-        (JSC::LegacyProfiler::startProfiling):
-        (JSC::LegacyProfiler::stopProfiling):
-        * runtime/VM.cpp:
-        (JSC::VM::VM):
-        (JSC::SetEnabledProfilerFunctor::operator()):
-        (JSC::VM::setEnabledProfiler):
-        * runtime/VM.h:
-        (JSC::VM::enabledProfiler):
-
-2014-01-27  Joseph Pecoraro  
-
-        -[JSContext evaluteScript:] calls JSEvaluteScript with startingLineNumber 0, later interpreted as a oneBasedInt
-        https://bugs.webkit.org/show_bug.cgi?id=127648
-
-        Reviewed by Geoffrey Garen.
-
-        The actual bug being fixed here is that the line number for
-        scripts evaluated via the JSC APIs is now sane. However,
-        there is no good infrastructure in place right now to test that.
-
-        * API/tests/testapi.c:
-        (main):
-        * API/tests/testapi.mm:
-        (testObjectiveCAPI):
-        Add tests for exception line numbers and handling of bad
-        startingLineNumbers in public APIs. These tests were already
-        passing, I just add them to make sure they are not regressed
-        in the future.
-
-        * API/JSBase.cpp:
-        (JSEvaluateScript):
-        (JSCheckScriptSyntax):
-        * API/JSBase.h:
-        * API/JSObjectRef.cpp:
-        (JSObjectMakeFunction):
-        * API/JSObjectRef.h:
-        * API/JSScriptRef.cpp:
-        * API/JSScriptRefPrivate.h:
-        * API/JSStringRef.h:
-        - Clarify documentation that startingLineNumber is 1 based and clamped.
-        - Add clamping in the implementation to put sane values into JSC::SourceProvider.
-
-        * inspector/agents/InspectorDebuggerAgent.cpp:
-        (Inspector::InspectorDebuggerAgent::didParseSource):
-        Remove the FIXME now that the SourceProvider is giving us expected values.
-
-2014-01-27  Joseph Pecoraro  
-
-        Web Inspector: CRASH when debugger closes remote inspecting JSContext
-        https://bugs.webkit.org/show_bug.cgi?id=127738
-
-        Reviewed by Timothy Hatcher.
-
-        RemoteInspectorXPCConnection could be accessed in a background dispatch
-        queue, while being deallocated on the main thread when a connection
-        was suddenly terminated.
-
-        Make RemoteInspectorXPCConnection a ThreadSafeRefCounted object. Always
-        keep the connection object ref'd until the main thread calls close()
-        and removes its reference. At that point we can close the connection,
-        queue, and deref safely on the background queue.
-
-        * inspector/remote/RemoteInspector.h:
-        * inspector/remote/RemoteInspector.mm:
-        (Inspector::RemoteInspector::setupXPCConnectionIfNeeded):
-        (Inspector::RemoteInspector::xpcConnectionFailed):
-        For simplicity RemoteInspectorXPCConnections's don't have any threading
-        primatives to prevent client callbacks after they are closed. RemoteInspector
-        does, so it just ignores possible callbacks from connections it no longer
-        cares about.
-
-        * inspector/remote/RemoteInspectorXPCConnection.h:
-        * inspector/remote/RemoteInspectorXPCConnection.mm:
-        (Inspector::RemoteInspectorXPCConnection::RemoteInspectorXPCConnection):
-        (Inspector::RemoteInspectorXPCConnection::~RemoteInspectorXPCConnection):
-        (Inspector::RemoteInspectorXPCConnection::close):
-        Keep the connection alive as long as the queue it can be used on
-        is alive. Clean up everything on the queue when close() is called.
-
-        (Inspector::RemoteInspectorXPCConnection::handleEvent):
-        Checking if closed here is not thread safe so it is meaningless.
-        Remove the check.
-
-        (Inspector::RemoteInspectorXPCConnection::sendMessage):
-        Bail based on the m_closed state.
-
-2014-01-27  Joseph Pecoraro  
-
-        JavaScriptCore: Enable -Wimplicit-fallthrough and add FALLTHROUGH annotation where needed
-        https://bugs.webkit.org/show_bug.cgi?id=127647
-
-        Reviewed by Anders Carlsson.
-
-        Explicitly annotate switch case fallthroughs in JavaScriptCore and
-        enable warnings for unannotated fallthroughs.
-
-        * dfg/DFGArithMode.h:
-        (doesOverflow):
-        Only insert FALLTHROUGH in release builds. In debug builds, the
-        FALLTHROUGH would be unreachable (due to the ASSERT_NOT_REACHED)
-        and would through a warning.
-
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::fillSpeculateInt32Internal):
-        (JSC::DFG::SpeculativeJIT::fillSpeculateInt52):
-        Due to the templatized nature of this function, a fallthrough
-        in one of the template expansions would be unreachable. Disable
-        the warning for this function.
-
-        * Configurations/Base.xcconfig:
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-        * dfg/DFGCFGSimplificationPhase.cpp:
-        (JSC::DFG::CFGSimplificationPhase::run):
-        * dfg/DFGValidate.cpp:
-        (JSC::DFG::Validate::validateCPS):
-        * parser/Lexer.cpp:
-        (JSC::Lexer::lex):
-        * parser/Parser.cpp:
-        (JSC::Parser::parseStatement):
-        (JSC::Parser::parseProperty):
-        * runtime/JSArray.cpp:
-        (JSC::JSArray::push):
-        * runtime/JSONObject.cpp:
-        (JSC::Walker::walk):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::putByIndex):
-        (JSC::JSObject::putByIndexBeyondVectorLength):
-        * runtime/JSObject.h:
-        (JSC::JSObject::setIndexQuickly):
-        (JSC::JSObject::initializeIndex):
-        * runtime/LiteralParser.cpp:
-        (JSC::LiteralParser::parse):
-        * yarr/YarrInterpreter.cpp:
-        (JSC::Yarr::Interpreter::backtrackParenthesesOnceBegin):
-        (JSC::Yarr::Interpreter::backtrackParenthesesOnceEnd):
-        * yarr/YarrParser.h:
-        (JSC::Yarr::Parser::CharacterClassParserDelegate::atomPatternCharacter):
-        (JSC::Yarr::Parser::CharacterClassParserDelegate::atomBuiltInCharacterClass):
-        (JSC::Yarr::Parser::parseEscape):
-        (JSC::Yarr::Parser::parseTokens):
-
-2014-01-27  Andy Estes  
-
-        Scrub WebKit API headers of WTF macros
-        https://bugs.webkit.org/show_bug.cgi?id=127706
-
-        Reviewed by David Kilzer.
-
-        * Configurations/FeatureDefines.xcconfig: Added ENABLE_INSPECTOR.
-
-2014-01-27  Mark Lam  
-
-        Remove unused CodeBlock::createActivation().
-        
-
-        Reviewed by Filip Pizlo.
-
-        * bytecode/CodeBlock.cpp:
-        * bytecode/CodeBlock.h:
-
-2014-01-26  Andreas Kling  
-
-        JSC: Pack unlinked instructions harder.
-        
-
-        Store UnlinkedCodeBlock's instructions in a variable-length stream
-        to reduce memory usage. Compression rate ends up around 60-61%.
-
-        The format is very simple. Every instruction starts with a 1 byte
-        opcode. It's followed by an opcode-dependent number of argument
-        values, each encoded separately for maximum packing. There are
-        7 packed value formats:
-
-            5-bit positive integer
-            5-bit negative integer
-            13-bit positive integer
-            13-bit positive integer
-            5-bit constant register index
-            13-bit constant register index
-            32-bit value (fallback)
-
-        27.5 MB progression on Membuster3. (~2% of total memory.)
-
-        Reviewed by Filip Pizlo.
-
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * bytecode/UnlinkedInstructionStream.h: Added.
-        (JSC::UnlinkedInstructionStream::count):
-        (JSC::UnlinkedInstructionStream::Reader::atEnd):
-        * bytecode/UnlinkedInstructionStream.cpp: Added.
-        (JSC::UnlinkedInstructionStream::Reader::Reader):
-        (JSC::UnlinkedInstructionStream::Reader::read8):
-        (JSC::UnlinkedInstructionStream::Reader::read32):
-        (JSC::UnlinkedInstructionStream::Reader::next):
-        (JSC::append8):
-        (JSC::append32):
-        (JSC::UnlinkedInstructionStream::UnlinkedInstructionStream):
-        (JSC::UnlinkedInstructionStream::unpackForDebugging):
-        * bytecompiler/BytecodeGenerator.cpp:
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-        * bytecode/UnlinkedCodeBlock.cpp:
-        (JSC::UnlinkedCodeBlock::lineNumberForBytecodeOffset):
-        (JSC::dumpLineColumnEntry):
-        (JSC::UnlinkedCodeBlock::expressionRangeForBytecodeOffset):
-        (JSC::UnlinkedCodeBlock::setInstructions):
-        (JSC::UnlinkedCodeBlock::instructions):
-        * bytecode/UnlinkedCodeBlock.h:
-        (JSC::BytecodeGenerator::generate):
-
-2014-01-26  Joseph Pecoraro  
-
-        Web Inspector: Move InspectorDebuggerAgent into JavaScriptCore
-        https://bugs.webkit.org/show_bug.cgi?id=127629
-
-        Rubber-stamped by Sam Weinig.
-
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        - Add new files to the build.
-        - Also, since non REMOTE_INSPECTOR ports cannot yet connect to a
-          JSGlobalObject for inspection remove those files as they don't
-          need to be built.
-
-        * inspector/EventLoop.cpp: Added.
-        (Inspector::EventLoop::cycle):
-        * inspector/EventLoop.h: Added.
-        (Inspector::EventLoop::EventLoop):
-        (Inspector::EventLoop::ended):
-        Add a JavaScriptCore version of EventLoop. This is currently only
-        used by the Mac port for JSGlobalObject remote inspection. Keep
-        the WebCore/platform version alive because for the Mac port it does
-        slightly different things involving AppKit.
-
-        * inspector/JSGlobalObjectInspectorController.cpp:
-        (Inspector::JSGlobalObjectInspectorController::JSGlobalObjectInspectorController):
-        Create DebuggerAgent and hook up ScriptDebugServer where needed.
-
-        * inspector/JSGlobalObjectScriptDebugServer.cpp: Added.
-        (Inspector::JSGlobalObjectScriptDebugServer::JSGlobalObjectScriptDebugServer):
-        (Inspector::JSGlobalObjectScriptDebugServer::addListener):
-        (Inspector::JSGlobalObjectScriptDebugServer::removeListener):
-        (Inspector::JSGlobalObjectScriptDebugServer::recompileAllJSFunctions):
-        (Inspector::JSGlobalObjectScriptDebugServer::runEventLoopWhilePaused):
-        * inspector/JSGlobalObjectScriptDebugServer.h: Added.
-        Simple implementation of ScriptDebugServer with a JSGlobalObject.
-
-        * inspector/agents/InspectorDebuggerAgent.cpp: Renamed from Source/WebCore/inspector/InspectorDebuggerAgent.cpp.
-        * inspector/agents/InspectorDebuggerAgent.h: Renamed from Source/WebCore/inspector/InspectorDebuggerAgent.h.
-        Copied from WebCore. A few methods need to be made virtual so that Web implementations
-        can override and extend the funcitonality. E.g. sourceMapURLForScript and enable/disable.
-        
-        * inspector/agents/JSGlobalObjectDebuggerAgent.cpp: Added.
-        * inspector/agents/JSGlobalObjectDebuggerAgent.h: Added.
-        (Inspector::JSGlobalObjectDebuggerAgent::JSGlobalObjectDebuggerAgent):
-        (Inspector::JSGlobalObjectDebuggerAgent::startListeningScriptDebugServer):
-        (Inspector::JSGlobalObjectDebuggerAgent::stopListeningScriptDebugServer):
-        (Inspector::JSGlobalObjectDebuggerAgent::injectedScriptForEval):
-        Simple implementation of DebuggerAGent with a JSGlobalObject.
-
-2014-01-25  Mark Lam  
-
-        Gardening: fix build breakage from previous commit.
-
-        Not reviewed.
-
-        * profiler/ProfileNode.cpp:
-        (JSC::ProfileNode::debugPrintData):
-        - Removed obsolete references to "visible" timers.
-
-2014-01-25  Timothy Hatcher  
-
-        Remove dead code from the JSC profiler.
-
-        https://bugs.webkit.org/show_bug.cgi?id=127643
-
-        Reviewed by Mark Lam.
-
-        * profiler/Profile.cpp:
-        * profiler/Profile.h:
-        * profiler/ProfileGenerator.cpp:
-        (JSC::ProfileGenerator::stopProfiling):
-        * profiler/ProfileNode.cpp:
-        (JSC::ProfileNode::ProfileNode):
-        (JSC::ProfileNode::stopProfiling):
-        (JSC::ProfileNode::endAndRecordCall):
-        (JSC::ProfileNode::debugPrintData):
-        (JSC::ProfileNode::debugPrintDataSampleStyle):
-        * profiler/ProfileNode.h:
-        (JSC::ProfileNode::totalTime):
-        (JSC::ProfileNode::setTotalTime):
-        (JSC::ProfileNode::selfTime):
-        (JSC::ProfileNode::setSelfTime):
-        (JSC::ProfileNode::totalPercent):
-        (JSC::ProfileNode::selfPercent):
-        Remove support for things like focus and exclude. The Inspector does those in JS now.
-
-2014-01-25  Sam Weinig  
-
-        Remove unused support for DRAGGABLE_REGION
-        https://bugs.webkit.org/show_bug.cgi?id=127642
-
-        Reviewed by Simon Fraser.
-
-        * Configurations/FeatureDefines.xcconfig:
-
-2014-01-25  Darin Adler  
-
-        Try to fix Mac build.
-
-        * runtime/DatePrototype.cpp: Put the include of  inside
-        a conditional since we don't have that header in our Mac build configuration.
-
-2014-01-25  Darin Adler  
-
-        Call deprecatedCharacters instead of characters at more call sites
-        https://bugs.webkit.org/show_bug.cgi?id=127631
-
-        Reviewed by Sam Weinig.
-
-        * API/JSValueRef.cpp:
-        (JSValueMakeFromJSONString):
-        * API/OpaqueJSString.cpp:
-        (OpaqueJSString::~OpaqueJSString):
-        * bindings/ScriptValue.cpp:
-        (Deprecated::jsToInspectorValue):
-        * inspector/ContentSearchUtilities.cpp:
-        (Inspector::ContentSearchUtilities::createSearchRegexSource):
-        * inspector/InspectorValues.cpp:
-        * runtime/Identifier.h:
-        (JSC::Identifier::deprecatedCharacters):
-        * runtime/JSStringBuilder.h:
-        (JSC::JSStringBuilder::append):
-        Use the new name.
-
-2014-01-25  Darin Adler  
-
-        Get rid of ICU_UNICODE and WCHAR_UNICODE remnants
-        https://bugs.webkit.org/show_bug.cgi?id=127623
-
-        Reviewed by Anders Carlsson.
-
-        * runtime/DatePrototype.cpp: Removed USE(ICU_UNICODE) checks, since that's always true now.
-
-2014-01-25  Darin Adler  
-
-        [Mac] Rewrite locale-specific date formatting code to remove strange string creation
-        https://bugs.webkit.org/show_bug.cgi?id=127624
-
-        Reviewed by Anders Carlsson.
-
-        * runtime/DatePrototype.cpp:
-        (JSC::formatLocaleDate): Use some smart pointers and conversion operators we already
-        have to do the formatting in a more straightforward way.
-
-2014-01-25  Anders Carlsson  
-
-        Remove atomicIncrement/atomicDecrement
-        https://bugs.webkit.org/show_bug.cgi?id=127625
-
-        Reviewed by Andreas Kling.
-
-        Replace atomicIncrement/atomicDecrement with std::atomic.
-
-        * bytecode/Watchpoint.h:
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::lower):
-        * profiler/ProfilerDatabase.cpp:
-        (JSC::Profiler::Database::Database):
-        (JSC::Profiler::Database::addDatabaseToAtExit):
-
-2014-01-24  Joseph Pecoraro  
-
-        Web Inspector: Move InspectorRuntimeAgent into JavaScriptCore
-        https://bugs.webkit.org/show_bug.cgi?id=127605
-
-        Reviewed by Timothy Hatcher.
-
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        Add new files to the build.
-
-        * inspector/agents/InspectorRuntimeAgent.h: Renamed from Source/WebCore/inspector/InspectorRuntimeAgent.h.
-        * inspector/agents/InspectorRuntimeAgent.cpp: Renamed from Source/WebCore/inspector/InspectorRuntimeAgent.cpp.
-        (Inspector::InspectorRuntimeAgent::InspectorRuntimeAgent):
-        (Inspector::InspectorRuntimeAgent::parse):
-        (Inspector::InspectorRuntimeAgent::evaluate):
-        (Inspector::InspectorRuntimeAgent::callFunctionOn):
-        (Inspector::InspectorRuntimeAgent::getProperties):
-        - Move the agent into JavaScriptCore.
-        - Modernize and cleanup.
-        - Make globalVM a pure virtual function for subclasses to implement.
-
-        * inspector/agents/JSGlobalObjectRuntimeAgent.h: Added.
-        * inspector/agents/JSGlobalObjectRuntimeAgent.cpp: Added.
-        (Inspector::JSGlobalObjectRuntimeAgent::JSGlobalObjectRuntimeAgent):
-        (Inspector::JSGlobalObjectRuntimeAgent::didCreateFrontendAndBackend):
-        (Inspector::JSGlobalObjectRuntimeAgent::willDestroyFrontendAndBackend):
-        (Inspector::JSGlobalObjectRuntimeAgent::globalVM):
-        (Inspector::JSGlobalObjectRuntimeAgent::injectedScriptForEval):
-        Straightforward JSGlobalObject implementation.
-
-        * inspector/JSGlobalObjectInspectorController.cpp:
-        (Inspector::JSGlobalObjectInspectorController::JSGlobalObjectInspectorController):
-        Add a runtime agent when inspecting a JSContext!
-
-2014-01-23  Joseph Pecoraro  
-
-        Move JavaScriptCallFrame and ScriptDebugServer into JavaScriptCore for inspector
-        https://bugs.webkit.org/show_bug.cgi?id=127543
-
-        Reviewed by Geoffrey Garen.
-
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        Add new files.
-
-        * inspector/ScriptDebugListener.h:
-        Extract WebCore knowledge from ScriptDebugServer. This will
-        eventually be made to work outside of WebCore.
-
-        * inspector/ScriptDebugServer.h: Renamed from Source/WebCore/bindings/js/ScriptDebugServer.h.
-        * inspector/ScriptDebugServer.cpp: Renamed from Source/WebCore/bindings/js/ScriptDebugServer.cpp.
-        (Inspector::ScriptDebugServer::evaluateBreakpointAction):
-        (Inspector::ScriptDebugServer::dispatchDidPause):
-        (Inspector::ScriptDebugServer::dispatchBreakpointActionLog):
-        (Inspector::ScriptDebugServer::dispatchBreakpointActionSound):
-        (Inspector::ScriptDebugServer::sourceParsed):
-        (Inspector::ScriptDebugServer::dispatchFunctionToListeners):
-        (Inspector::ScriptDebugServer::handlePause):
-        Modernize code, and call the new ScriptDebugListener callbacks where appropriate.
-
-        * inspector/JSJavaScriptCallFrame.cpp: Renamed from Source/WebCore/bindings/js/JSJavaScriptCallFrameCustom.cpp.
-        (Inspector::JSJavaScriptCallFrame::JSJavaScriptCallFrame):
-        (Inspector::JSJavaScriptCallFrame::finishCreation):
-        (Inspector::JSJavaScriptCallFrame::createPrototype):
-        (Inspector::JSJavaScriptCallFrame::destroy):
-        (Inspector::JSJavaScriptCallFrame::releaseImpl):
-        (Inspector::JSJavaScriptCallFrame::~JSJavaScriptCallFrame):
-        (Inspector::JSJavaScriptCallFrame::evaluate):
-        (Inspector::JSJavaScriptCallFrame::scopeType):
-        (Inspector::JSJavaScriptCallFrame::caller):
-        (Inspector::JSJavaScriptCallFrame::sourceID):
-        (Inspector::JSJavaScriptCallFrame::line):
-        (Inspector::JSJavaScriptCallFrame::column):
-        (Inspector::JSJavaScriptCallFrame::functionName):
-        (Inspector::JSJavaScriptCallFrame::scopeChain):
-        (Inspector::JSJavaScriptCallFrame::thisObject):
-        (Inspector::JSJavaScriptCallFrame::type):
-        (Inspector::toJS):
-        (Inspector::toJSJavaScriptCallFrame):
-        * inspector/JSJavaScriptCallFrame.h: Added.
-        (Inspector::JSJavaScriptCallFrame::createStructure):
-        (Inspector::JSJavaScriptCallFrame::create):
-        (Inspector::JSJavaScriptCallFrame::impl):
-        * inspector/JSJavaScriptCallFramePrototype.cpp: Added.
-        (Inspector::JSJavaScriptCallFramePrototype::finishCreation):
-        (Inspector::jsJavaScriptCallFramePrototypeFunctionEvaluate):
-        (Inspector::jsJavaScriptCallFramePrototypeFunctionScopeType):
-        (Inspector::jsJavaScriptCallFrameAttributeCaller):
-        (Inspector::jsJavaScriptCallFrameAttributeSourceID):
-        (Inspector::jsJavaScriptCallFrameAttributeLine):
-        (Inspector::jsJavaScriptCallFrameAttributeColumn):
-        (Inspector::jsJavaScriptCallFrameAttributeFunctionName):
-        (Inspector::jsJavaScriptCallFrameAttributeScopeChain):
-        (Inspector::jsJavaScriptCallFrameAttributeThisObject):
-        (Inspector::jsJavaScriptCallFrameAttributeType):
-        (Inspector::jsJavaScriptCallFrameConstantGLOBAL_SCOPE):
-        (Inspector::jsJavaScriptCallFrameConstantLOCAL_SCOPE):
-        (Inspector::jsJavaScriptCallFrameConstantWITH_SCOPE):
-        (Inspector::jsJavaScriptCallFrameConstantCLOSURE_SCOPE):
-        (Inspector::jsJavaScriptCallFrameConstantCATCH_SCOPE):
-        * inspector/JSJavaScriptCallFramePrototype.h: Added.
-        (Inspector::JSJavaScriptCallFramePrototype::create):
-        (Inspector::JSJavaScriptCallFramePrototype::createStructure):
-        (Inspector::JSJavaScriptCallFramePrototype::JSJavaScriptCallFramePrototype):
-        * inspector/JavaScriptCallFrame.cpp: Renamed from Source/WebCore/bindings/js/JavaScriptCallFrame.cpp.
-        (Inspector::JavaScriptCallFrame::caller):
-        * inspector/JavaScriptCallFrame.h: Renamed from Source/WebCore/bindings/js/JavaScriptCallFrame.h.
-        Port of JavaScriptCallFrame.idl to a set of native JS classes.
-
-2014-01-24  Mark Lam  
-
-        DebuggerCallFrame::evaluateWithCallFrame() should not execute a null executable.
-        
-
-        Reviewed by Oliver Hunt.
-
-        In DebuggerCallFrame::evaluateWithCallFrame(), if the script string that
-        is passed in is bad, it will fail to create an Executable i.e.
-        EvalExecutable::create() returns a null pointer. However,
-        DebuggerCallFrame::evaluateWithCallFrame() was just clearing the
-        exception and proceeded to execute the null pointer as an Executable.
-        A crash ensues.
-
-        Now, if an exception is detected while creating the Executable, we
-        abort instead.
-
-        * debugger/DebuggerCallFrame.cpp:
-        (JSC::DebuggerCallFrame::evaluateWithCallFrame):
-
-2014-01-24  Oliver Hunt  
-
-        Put functions need to take a base object and a this value, and perform type checks on |this|
-        https://bugs.webkit.org/show_bug.cgi?id=127594
-
-        Reviewed by Geoffrey Garen.
-
-        Change the signature for static setter functions, and update uses
-
-        * create_hash_table:
-        * runtime/Lookup.h:
-        (JSC::putEntry):
-        * runtime/PutPropertySlot.h:
-        * runtime/RegExpConstructor.cpp:
-        (JSC::setRegExpConstructorInput):
-        (JSC::setRegExpConstructorMultiline):
-
-2014-01-24  Oliver Hunt  
-
-        Generic JSObject::put should handle static properties in the classinfo hierarchy
-        https://bugs.webkit.org/show_bug.cgi?id=127523
-
-        Reviewed by Geoffrey Garen.
-
-        This patch makes JSObject::put correctly call static setters
-        defined by the ClassInfo.
-
-        To make this not clobber performance, the ClassInfo HashTable
-        now includes a flag to indicate that it contains setters. This
-        required updating the lut generator so that it tracked (and emitted)
-        this.
-
-        The rest of the change was making a number of the methods take
-        a VM rather than an ExecState*, so that Structure could set the
-        getter/setter flags during construction (if necessary).
-
-        This also means most objects do not need to perform a lookupPut
-        manually anymore, so most custom ::put's are no longer needed.
-        DOMWindow is the only exception as it has interesting security
-        related semantics.
-
-        * create_hash_table:
-        * interpreter/CallFrame.h:
-        (JSC::ExecState::arrayConstructorTable):
-        (JSC::ExecState::arrayPrototypeTable):
-        (JSC::ExecState::booleanPrototypeTable):
-        (JSC::ExecState::dataViewTable):
-        (JSC::ExecState::dateTable):
-        (JSC::ExecState::dateConstructorTable):
-        (JSC::ExecState::errorPrototypeTable):
-        (JSC::ExecState::globalObjectTable):
-        (JSC::ExecState::jsonTable):
-        (JSC::ExecState::numberConstructorTable):
-        (JSC::ExecState::numberPrototypeTable):
-        (JSC::ExecState::objectConstructorTable):
-        (JSC::ExecState::privateNamePrototypeTable):
-        (JSC::ExecState::regExpTable):
-        (JSC::ExecState::regExpConstructorTable):
-        (JSC::ExecState::regExpPrototypeTable):
-        (JSC::ExecState::stringConstructorTable):
-        (JSC::ExecState::promisePrototypeTable):
-        (JSC::ExecState::promiseConstructorTable):
-        * runtime/ArrayConstructor.cpp:
-        (JSC::ArrayConstructor::getOwnPropertySlot):
-        * runtime/ArrayPrototype.cpp:
-        (JSC::ArrayPrototype::getOwnPropertySlot):
-        * runtime/BooleanPrototype.cpp:
-        (JSC::BooleanPrototype::getOwnPropertySlot):
-        * runtime/ClassInfo.h:
-        (JSC::ClassInfo::propHashTable):
-        * runtime/DateConstructor.cpp:
-        (JSC::DateConstructor::getOwnPropertySlot):
-        * runtime/DatePrototype.cpp:
-        (JSC::DatePrototype::getOwnPropertySlot):
-        * runtime/ErrorPrototype.cpp:
-        (JSC::ErrorPrototype::getOwnPropertySlot):
-        * runtime/JSDataViewPrototype.cpp:
-        (JSC::JSDataViewPrototype::getOwnPropertySlot):
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::getOwnPropertySlot):
-        * runtime/JSONObject.cpp:
-        (JSC::JSONObject::getOwnPropertySlot):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::put):
-        (JSC::JSObject::deleteProperty):
-        * runtime/JSPromiseConstructor.cpp:
-        (JSC::JSPromiseConstructor::getOwnPropertySlot):
-        * runtime/JSPromisePrototype.cpp:
-        (JSC::JSPromisePrototype::getOwnPropertySlot):
-        * runtime/Lookup.h:
-        (JSC::HashTable::copy):
-        (JSC::putEntry):
-        (JSC::lookupPut):
-        * runtime/NamePrototype.cpp:
-        (JSC::NamePrototype::getOwnPropertySlot):
-        * runtime/NumberConstructor.cpp:
-        (JSC::NumberConstructor::getOwnPropertySlot):
-        * runtime/NumberConstructor.h:
-        * runtime/NumberPrototype.cpp:
-        (JSC::NumberPrototype::getOwnPropertySlot):
-        * runtime/ObjectConstructor.cpp:
-        (JSC::ObjectConstructor::getOwnPropertySlot):
-        * runtime/RegExpConstructor.cpp:
-        (JSC::RegExpConstructor::getOwnPropertySlot):
-        * runtime/RegExpConstructor.h:
-        * runtime/RegExpObject.cpp:
-        (JSC::RegExpObject::getOwnPropertySlot):
-        (JSC::RegExpObject::put):
-        * runtime/RegExpPrototype.cpp:
-        (JSC::RegExpPrototype::getOwnPropertySlot):
-        * runtime/StringConstructor.cpp:
-        (JSC::StringConstructor::getOwnPropertySlot):
-        * runtime/Structure.cpp:
-        (JSC::Structure::Structure):
-        (JSC::Structure::freezeTransition):
-        (JSC::ClassInfo::hasStaticSetterOrReadonlyProperties):
-
-2014-01-24  Commit Queue  
-
-        Unreviewed, rolling out r162713.
-        http://trac.webkit.org/changeset/162713
-        https://bugs.webkit.org/show_bug.cgi?id=127593
-
-        broke media/network-no-source-const-shadow (Requested by
-        thorton on #webkit).
-
-        * create_hash_table:
-        * interpreter/CallFrame.h:
-        (JSC::ExecState::arrayConstructorTable):
-        (JSC::ExecState::arrayPrototypeTable):
-        (JSC::ExecState::booleanPrototypeTable):
-        (JSC::ExecState::dataViewTable):
-        (JSC::ExecState::dateTable):
-        (JSC::ExecState::dateConstructorTable):
-        (JSC::ExecState::errorPrototypeTable):
-        (JSC::ExecState::globalObjectTable):
-        (JSC::ExecState::jsonTable):
-        (JSC::ExecState::numberConstructorTable):
-        (JSC::ExecState::numberPrototypeTable):
-        (JSC::ExecState::objectConstructorTable):
-        (JSC::ExecState::privateNamePrototypeTable):
-        (JSC::ExecState::regExpTable):
-        (JSC::ExecState::regExpConstructorTable):
-        (JSC::ExecState::regExpPrototypeTable):
-        (JSC::ExecState::stringConstructorTable):
-        (JSC::ExecState::promisePrototypeTable):
-        (JSC::ExecState::promiseConstructorTable):
-        * runtime/ArrayConstructor.cpp:
-        (JSC::ArrayConstructor::getOwnPropertySlot):
-        * runtime/ArrayPrototype.cpp:
-        (JSC::ArrayPrototype::getOwnPropertySlot):
-        * runtime/BooleanPrototype.cpp:
-        (JSC::BooleanPrototype::getOwnPropertySlot):
-        * runtime/ClassInfo.h:
-        (JSC::ClassInfo::propHashTable):
-        * runtime/DateConstructor.cpp:
-        (JSC::DateConstructor::getOwnPropertySlot):
-        * runtime/DatePrototype.cpp:
-        (JSC::DatePrototype::getOwnPropertySlot):
-        * runtime/ErrorPrototype.cpp:
-        (JSC::ErrorPrototype::getOwnPropertySlot):
-        * runtime/JSDataViewPrototype.cpp:
-        (JSC::JSDataViewPrototype::getOwnPropertySlot):
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::getOwnPropertySlot):
-        * runtime/JSONObject.cpp:
-        (JSC::JSONObject::getOwnPropertySlot):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::put):
-        (JSC::JSObject::deleteProperty):
-        * runtime/JSPromiseConstructor.cpp:
-        (JSC::JSPromiseConstructor::getOwnPropertySlot):
-        * runtime/JSPromisePrototype.cpp:
-        (JSC::JSPromisePrototype::getOwnPropertySlot):
-        * runtime/Lookup.h:
-        (JSC::HashTable::copy):
-        (JSC::putEntry):
-        (JSC::lookupPut):
-        * runtime/NamePrototype.cpp:
-        (JSC::NamePrototype::getOwnPropertySlot):
-        * runtime/NumberConstructor.cpp:
-        (JSC::NumberConstructor::getOwnPropertySlot):
-        (JSC::NumberConstructor::put):
-        * runtime/NumberConstructor.h:
-        * runtime/NumberPrototype.cpp:
-        (JSC::NumberPrototype::getOwnPropertySlot):
-        * runtime/ObjectConstructor.cpp:
-        (JSC::ObjectConstructor::getOwnPropertySlot):
-        * runtime/RegExpConstructor.cpp:
-        (JSC::RegExpConstructor::getOwnPropertySlot):
-        (JSC::RegExpConstructor::put):
-        * runtime/RegExpConstructor.h:
-        * runtime/RegExpObject.cpp:
-        (JSC::RegExpObject::getOwnPropertySlot):
-        (JSC::RegExpObject::put):
-        * runtime/RegExpPrototype.cpp:
-        (JSC::RegExpPrototype::getOwnPropertySlot):
-        * runtime/StringConstructor.cpp:
-        (JSC::StringConstructor::getOwnPropertySlot):
-        * runtime/Structure.cpp:
-        (JSC::Structure::Structure):
-        (JSC::Structure::freezeTransition):
-
-2014-01-24  Mark Lam  
-
-        ASSERT(!m_markedSpace.m_currentDelayedReleaseScope) reloading page in inspector.
-        
-
-        Reviewed by Mark Hahnenberg.
-
-        1. We should not enter a HeapIterationScope when we iterate the CodeBlocks.
-           Apparently, iterating the CodeBlocks does not count as heap iteration.
-
-        2. If we're detaching the debugger due to the JSGlobalObject destructing,
-           then we don't need to clear the debugger requests in the associated
-           CodeBlocks. The JSGlobalObject destructing would mean that those
-           CodeBlocks would be destructing too, and it may not be safe to access
-           them anyway at this point.
-
-        The assertion failure is because we had entered a HeapIterationScope
-        while the JSGlobalObject is destructing, which in turn means that GC
-        sweeping is in progress. It's not legal to iterate the heap while the GC
-        is sweeping. Once we fixed the above 2 issues, we will no longer have
-        the conditions that manifests this assertion failure.
-
-        * debugger/Debugger.cpp:
-        (JSC::Debugger::detach):
-        (JSC::Debugger::setSteppingMode):
-        (JSC::Debugger::toggleBreakpoint):
-        (JSC::Debugger::clearBreakpoints):
-        (JSC::Debugger::clearDebuggerRequests):
-        * debugger/Debugger.h:
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::~JSGlobalObject):
-
-2014-01-24  Brent Fulgham  
-
-        [Win] Convert some NMake files to MSBuild project files
-        https://bugs.webkit.org/show_bug.cgi?id=127579
-
-        Reviewed by Tim Horton.
-
-        * JavaScriptCore.vcxproj/JavaScriptCore.make: Removed.
-        * JavaScriptCore.vcxproj/JavaScriptCore.proj: Added.
-
-2014-01-24  Mark Lam  
-
-        Fixed a bad assertion in CodeBlock::removeBreakpoint().
-        
-
-        Reviewed by Joseph Pecoraro.
-
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlock::removeBreakpoint):
-
-2014-01-24  Joseph Pecoraro  
-
-        fast/profiler tests ASSERTing after moving recompileAllJSFunctions off a timer
-        https://bugs.webkit.org/show_bug.cgi?id=127566
-
-        Reviewed by Oliver Hunt.
-
-        Make the VM handle recompilation as soon as possible after it is requested.
-
-        * debugger/Debugger.cpp:
-        (JSC::Debugger::recompileAllJSFunctions):
-        When in a JavaScript stack, mark for recompilation when possible.
-
-        * runtime/VMEntryScope.h:
-        (JSC::VMEntryScope::setRecompilationNeeded):
-        * runtime/VMEntryScope.cpp:
-        (JSC::VMEntryScope::VMEntryScope):
-        (JSC::VMEntryScope::~VMEntryScope):
-        Handle recompilation when the top VMEntryScope is popped.
-        Pass the needs recompilation flag up the stack if needed.
-
-2014-01-24  Oliver Hunt  
-
-        Generic JSObject::put should handle static properties in the classinfo hierarchy
-        https://bugs.webkit.org/show_bug.cgi?id=127523
-
-        Reviewed by Geoffrey Garen.
-
-        This patch makes JSObject::put correctly call static setters
-        defined by the ClassInfo.
-
-        To make this not clobber performance, the ClassInfo HashTable
-        now includes a flag to indicate that it contains setters. This
-        required updating the lut generator so that it tracked (and emitted)
-        this.
-
-        The rest of the change was making a number of the methods take
-        a VM rather than an ExecState*, so that Structure could set the
-        getter/setter flags during construction (if necessary).
-
-        This also means most objects do not need to perform a lookupPut
-        manually anymore, so most custom ::put's are no longer needed.
-        DOMWindow is the only exception as it has interesting security
-        related semantics.
-
-        * create_hash_table:
-        * interpreter/CallFrame.h:
-        (JSC::ExecState::arrayConstructorTable):
-        (JSC::ExecState::arrayPrototypeTable):
-        (JSC::ExecState::booleanPrototypeTable):
-        (JSC::ExecState::dataViewTable):
-        (JSC::ExecState::dateTable):
-        (JSC::ExecState::dateConstructorTable):
-        (JSC::ExecState::errorPrototypeTable):
-        (JSC::ExecState::globalObjectTable):
-        (JSC::ExecState::jsonTable):
-        (JSC::ExecState::numberConstructorTable):
-        (JSC::ExecState::numberPrototypeTable):
-        (JSC::ExecState::objectConstructorTable):
-        (JSC::ExecState::privateNamePrototypeTable):
-        (JSC::ExecState::regExpTable):
-        (JSC::ExecState::regExpConstructorTable):
-        (JSC::ExecState::regExpPrototypeTable):
-        (JSC::ExecState::stringConstructorTable):
-        (JSC::ExecState::promisePrototypeTable):
-        (JSC::ExecState::promiseConstructorTable):
-        * runtime/ArrayConstructor.cpp:
-        (JSC::ArrayConstructor::getOwnPropertySlot):
-        * runtime/ArrayPrototype.cpp:
-        (JSC::ArrayPrototype::getOwnPropertySlot):
-        * runtime/BooleanPrototype.cpp:
-        (JSC::BooleanPrototype::getOwnPropertySlot):
-        * runtime/ClassInfo.h:
-        (JSC::ClassInfo::propHashTable):
-        * runtime/DateConstructor.cpp:
-        (JSC::DateConstructor::getOwnPropertySlot):
-        * runtime/DatePrototype.cpp:
-        (JSC::DatePrototype::getOwnPropertySlot):
-        * runtime/ErrorPrototype.cpp:
-        (JSC::ErrorPrototype::getOwnPropertySlot):
-        * runtime/JSDataViewPrototype.cpp:
-        (JSC::JSDataViewPrototype::getOwnPropertySlot):
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::getOwnPropertySlot):
-        * runtime/JSONObject.cpp:
-        (JSC::JSONObject::getOwnPropertySlot):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::put):
-        (JSC::JSObject::deleteProperty):
-        * runtime/JSPromiseConstructor.cpp:
-        (JSC::JSPromiseConstructor::getOwnPropertySlot):
-        * runtime/JSPromisePrototype.cpp:
-        (JSC::JSPromisePrototype::getOwnPropertySlot):
-        * runtime/Lookup.h:
-        (JSC::HashTable::copy):
-        (JSC::putEntry):
-        (JSC::lookupPut):
-        * runtime/NamePrototype.cpp:
-        (JSC::NamePrototype::getOwnPropertySlot):
-        * runtime/NumberConstructor.cpp:
-        (JSC::NumberConstructor::getOwnPropertySlot):
-        * runtime/NumberConstructor.h:
-        * runtime/NumberPrototype.cpp:
-        (JSC::NumberPrototype::getOwnPropertySlot):
-        * runtime/ObjectConstructor.cpp:
-        (JSC::ObjectConstructor::getOwnPropertySlot):
-        * runtime/RegExpConstructor.cpp:
-        (JSC::RegExpConstructor::getOwnPropertySlot):
-        * runtime/RegExpConstructor.h:
-        * runtime/RegExpObject.cpp:
-        (JSC::RegExpObject::getOwnPropertySlot):
-        (JSC::RegExpObject::put):
-        * runtime/RegExpPrototype.cpp:
-        (JSC::RegExpPrototype::getOwnPropertySlot):
-        * runtime/StringConstructor.cpp:
-        (JSC::StringConstructor::getOwnPropertySlot):
-        * runtime/Structure.cpp:
-        (JSC::Structure::Structure):
-        (JSC::Structure::freezeTransition):
-        (JSC::ClassInfo::hasStaticSetterOrReadonlyProperties):
-
-2014-01-24  Mark Lam  
-
-        Skip op_profiler callbacks if !VM::m_enabledProfiler.
-        https://bugs.webkit.org/show_bug.cgi?id=127567.
-
-        Reviewed by Geoffrey Garen.
-
-        The profiler may not be always active (recording). When it's not active
-        (as in VM::m_enabledProfiler is null), then we might as well skip the
-        op_profiler callbacks. The callbacks themselves were already previously
-        gated by a VM::enabledProfiler() check. So, this change does not change
-        any profiler behavior.
-
-        For the DFG, we'll turn the op_profiler handling into speculation checks
-        and OSR exit to the baseline JIT if the profiler becomes active.
-
-        This brings the Octane score up to ~3000 from ~2840.
-
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::AbstractInterpreter::executeEffects):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * jit/JITOpcodes.cpp:
-        (JSC::JIT::emit_op_profile_will_call):
-        (JSC::JIT::emit_op_profile_did_call):
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::emit_op_profile_will_call):
-        (JSC::JIT::emit_op_profile_did_call):
-        * llint/LowLevelInterpreter.asm:
-        * runtime/VM.h:
-        (JSC::VM::enabledProfilerAddress):
-
-2014-01-24  Mark Lam  
-
-        Removing the need for Debugger* and m_shouldPause op_debug check.
-        
-
-        Reviewed by Geoffrey Garen.
-
-        This patch replaces the checking of the Debugger::m_shouldPause flag
-        with a procedure to set a SteppingMode flag on all CodeBlocks under
-        the management of the debugger. This simplifies the op_debug checking
-        logic in all the execution engines.
-
-        * bytecode/CodeBlock.cpp:
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlock::hasDebuggerRequests):
-        (JSC::CodeBlock::debuggerRequestsAddress):
-        (JSC::CodeBlock::setSteppingMode):
-        (JSC::CodeBlock::clearDebuggerRequests):
-        - CodeBlock::m_debuggerRequests is a union of m_numBreakpoints and the
-          new m_steppingMode. The debugger can add/remove breakpoints to the
-          CodeBlock as well as set the stepping mode. By having
-          m_debuggerRequests as a union of the 2 bit fields, the op_debug code
-          can now check if any of the 2 requests made on the CodeBlock is still
-          in effect just by testing a single int.
-
-        * debugger/Debugger.cpp:
-        (JSC::Debugger::Debugger):
-        (JSC::Debugger::detach):
-        - This was bug from before where I forgot to clear the CodeBlock
-          breakpoints before detaching. We now take care of it by clearing all
-          debugger requests made to the CodeBlock.
-
-        (JSC::Debugger::SetSteppingModeFunctor::SetSteppingModeFunctor):
-        (JSC::Debugger::SetSteppingModeFunctor::operator()):
-        (JSC::Debugger::setSteppingMode):
-        (JSC::Debugger::ClearCodeBlockDebuggerRequestsFunctor::ClearCodeBlockDebuggerRequestsFunctor):
-        (JSC::Debugger::ClearCodeBlockDebuggerRequestsFunctor::operator()):
-        (JSC::Debugger::clearBreakpoints):
-
-        (JSC::Debugger::ClearDebuggerRequestsFunctor::ClearDebuggerRequestsFunctor):
-        (JSC::Debugger::ClearDebuggerRequestsFunctor::operator()):
-        (JSC::Debugger::clearDebuggerRequests):
-        - We need a distinct clearDebuggerRequests() from clearBreakpoints()
-          because:
-          1. When we detach a globalObject, we only want to clear the debugger
-             requests in CodeBlocks from that global.
-          2. Clearing the debugger requests in the CodeBlocks is not the same
-             as clearing the breakpoints. The breakpoints are still in effect
-             for the next time a globalObject is attached, or for other
-             globalObjects that are still attached.
-
-        (JSC::Debugger::setPauseOnNextStatement):
-        (JSC::Debugger::breakProgram):
-        (JSC::Debugger::stepIntoStatement):
-        (JSC::Debugger::updateCallFrameAndPauseIfNeeded):
-        (JSC::Debugger::pauseIfNeeded):
-        (JSC::Debugger::exception):
-        (JSC::Debugger::willExecuteProgram):
-        (JSC::Debugger::didReachBreakpoint):
-        * debugger/Debugger.h:
-        - We're always going to support the debugger. So, there's no longer
-          a need to check ENABLE(JAVASCRIPT_DEBUGGER). Removed the unneeded code.
-
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * interpreter/Interpreter.cpp:
-        (JSC::Interpreter::debug):
-        * jit/JITOpcodes.cpp:
-        (JSC::JIT::emit_op_debug):
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::emit_op_debug):
-        * llint/LowLevelInterpreter.asm:
-        * runtime/JSGlobalObject.h:
-        (JSC::JSGlobalObject::setDebugger):
-
-2014-01-24  Michael Saboff  
-
-        ARM Offline assembler temporary register allocator has duplicate register when building fat binaries
-        https://bugs.webkit.org/show_bug.cgi?id=127545
-
-        Reviewed by Mark Lam.
-
-        Eliminate the conditional addition of r11/r7 from getModifiedListARMCommon as the
-        .concat will add the new register to ARM_EXTRA_GPRS.  If getModifiedListARMCommon is
-        invoked a second time, there will be a second r11 or r7, which messes things up.
-        Instead, r6 was added to ARM_EXTRA_GPRS.  r6 is currently an unused register.
-
-        * offlineasm/arm.rb:
-
-2014-01-23  Joseph Pecoraro  
-
-        Move ContentSearchUtils, ScriptBreakpoint, and ScriptDebugListener into JavaScriptCore for inspector
-        https://bugs.webkit.org/show_bug.cgi?id=127537
-
-        Reviewed by Timothy Hatcher.
-
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * inspector/ContentSearchUtilities.cpp: Renamed from Source/WebCore/inspector/ContentSearchUtils.cpp.
-        (Inspector::ContentSearchUtilities::createSearchRegexSource):
-        (Inspector::ContentSearchUtilities::sizetExtractor):
-        (Inspector::ContentSearchUtilities::textPositionFromOffset):
-        (Inspector::ContentSearchUtilities::getRegularExpressionMatchesByLines):
-        (Inspector::ContentSearchUtilities::lineEndings):
-        (Inspector::ContentSearchUtilities::buildObjectForSearchMatch):
-        (Inspector::ContentSearchUtilities::createSearchRegex):
-        (Inspector::ContentSearchUtilities::countRegularExpressionMatches):
-        (Inspector::ContentSearchUtilities::searchInTextByLines):
-        (Inspector::ContentSearchUtilities::scriptCommentPattern):
-        (Inspector::ContentSearchUtilities::stylesheetCommentPattern):
-        (Inspector::ContentSearchUtilities::findMagicComment):
-        (Inspector::ContentSearchUtilities::findScriptSourceURL):
-        (Inspector::ContentSearchUtilities::findScriptSourceMapURL):
-        (Inspector::ContentSearchUtilities::findStylesheetSourceMapURL):
-        * inspector/ContentSearchUtilities.h: Renamed from Source/WebCore/inspector/ContentSearchUtils.h.
-        * inspector/ScriptBreakpoint.h: Renamed from Source/WebCore/inspector/ScriptBreakpoint.h.
-        (Inspector::ScriptBreakpointAction::ScriptBreakpointAction):
-        (Inspector::ScriptBreakpoint::ScriptBreakpoint):
-        * inspector/ScriptDebugListener.h: Renamed from Source/WebCore/inspector/ScriptDebugListener.h.
-        (Inspector::ScriptDebugListener::Script::Script):
-        (Inspector::ScriptDebugListener::~ScriptDebugListener):
-        * runtime/RegExp.cpp:
-        (JSC::RegExp::match):
-
-2014-01-23  Joseph Pecoraro  
-
-        Move RegularExpression into JavaScriptCore for inspector
-        https://bugs.webkit.org/show_bug.cgi?id=127526
-
-        Reviewed by Geoffrey Garen.
-
-        Move RegularExpression into JavaScriptCore/yarr so it can
-        be used later on by JavaScriptCore/inspector. Convert to
-        the JSC::Yarr namespace.
-
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * yarr/RegularExpression.cpp: Renamed from Source/WebCore/platform/text/RegularExpression.cpp.
-        (JSC::Yarr::RegularExpression::Private::create):
-        (JSC::Yarr::RegularExpression::Private::Private):
-        (JSC::Yarr::RegularExpression::Private::compile):
-        (JSC::Yarr::RegularExpression::RegularExpression):
-        (JSC::Yarr::RegularExpression::~RegularExpression):
-        (JSC::Yarr::RegularExpression::operator=):
-        (JSC::Yarr::RegularExpression::match):
-        (JSC::Yarr::RegularExpression::searchRev):
-        (JSC::Yarr::RegularExpression::matchedLength):
-        (JSC::Yarr::replace):
-        (JSC::Yarr::RegularExpression::isValid):
-        * yarr/RegularExpression.h: Renamed from Source/WebCore/platform/text/RegularExpression.h.
-
-2014-01-23  Joseph Pecoraro  
-
-        Web Inspector: Remove recompileAllJSFunctions timer in ScriptDebugServer
-        https://bugs.webkit.org/show_bug.cgi?id=127409
-
-        Reviewed by Geoffrey Garen.
-
-        * inspector/InspectorAgentBase.h:
-        When disconnecting agents, provide a InspectorDisconnectReason for
-        the disconnection. It could be that an inspector frontend is just
-        disconnecting or that the inspected object is going away entirely
-        and we can avoid doing some work.
-
-        * runtime/JSGlobalObjectDebuggable.h:
-        * runtime/JSGlobalObjectDebuggable.cpp:
-        (JSC::JSGlobalObjectDebuggable::~JSGlobalObjectDebuggable):
-        (JSC::JSGlobalObjectDebuggable::disconnect):
-        (JSC::JSGlobalObjectDebuggable::disconnectInternal):
-        Pass different reasons for the different disconnects.
-
-        * inspector/InspectorAgentRegistry.cpp:
-        (Inspector::InspectorAgentRegistry::willDestroyFrontendAndBackend):
-        * inspector/InspectorAgentRegistry.h:
-        * inspector/JSGlobalObjectInspectorController.cpp:
-        (Inspector::JSGlobalObjectInspectorController::disconnectFrontend):
-        * inspector/JSGlobalObjectInspectorController.h:
-        * inspector/agents/InspectorAgent.cpp:
-        (Inspector::InspectorAgent::willDestroyFrontendAndBackend):
-        * inspector/agents/InspectorAgent.h:
-        Pass InspectorDisconnectReason around where needed.
-
-2014-01-23  Mark Lam  
-
-        Enable DFG for the Debugger and Profiler.
-        
-
-        Reviewed by Geoffrey Garen.
-
-        In this patch, we implement DFG op_debug as a series of 3 checks:
-        1. Check if the debugger pointer is non-null. This is needed in case
-           the debugger has been detached but the DFG code is still running
-           on the stack.
-        2. Check if Debugger::m_shouldPause is true.
-        3. Check if CodeBlock::m_numBreakpoints is non-zero.
-
-        These are the same 3 checks done in the LLINT and baselineJIT. But unlike
-        the LLINT and baselineJIT, these DFG checks are implemented as
-        speculationChecks. If the check fails, we OSR exit to the baselineJIT and
-        let it do the work of servicing the op_debug callback.
-
-        Stepping through code in the debugger would work the same way. The top
-        function being debugged has to be a LLINT or baselineJIT function because
-        we would have OSR exited if there is a breakpoint in that function. When
-        we step out of that function to its caller, we expect that the caller will
-        call back to the debugger at the next op_debug. If the caller function is
-        a DFG function, the op_debug site will fail its speculation check on
-        Debugger::m_shouldPause and deopt into a baselineJIT function. Execution
-        continues from there as usual, and the debugger gets its callback.
-
-        For the profile, op_profile_will_call and op_profile_did_call are
-        implemented as simple runtime calls to service the profiler.
-
-        With this patch, Octane performance with the WebInspector open jump from
-        ~2000 to ~2500 (25% progression).
-
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlock::numBreakpointsAddress):
-        * bytecode/ExitKind.cpp:
-        (JSC::exitKindToString):
-        * bytecode/ExitKind.h:
-        * debugger/Debugger.cpp:
-        (JSC::Debugger::toggleBreakpoint):
-        - removed an obsolete assertion. The debugger can now handle DFG
-          CodeBlocks too.
-        * debugger/Debugger.h:
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::AbstractInterpreter::executeEffects):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * dfg/DFGCapabilities.cpp:
-        (JSC::DFG::capabilityLevel):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        * dfg/DFGSafeToExecute.h:
-        (JSC::DFG::safeToExecute):
-        * dfg/DFGSpeculativeJIT.h:
-        (JSC::DFG::SpeculativeJIT::callOperation):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * runtime/JSGlobalObject.h:
-        (JSC::JSGlobalObject::debuggerAddress):
-
-2014-01-23  Max Vujovic  
-
-        Remove CSS Custom Filters code and tests
-        https://bugs.webkit.org/show_bug.cgi?id=127382
-
-        Reviewed by Simon Fraser.
-
-        * Configurations/FeatureDefines.xcconfig:
-
-2014-01-22  Brent Fulgham  
-
-        [Win] Update project and solution files for 64-bit builds.
-        https://bugs.webkit.org/show_bug.cgi?id=127457
-
-        Reviewed by Eric Carlson.
-
-        * JavaScriptCore.vcxproj/JavaScriptCore.submit.sln: Add 64-bit target.
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj: Update for VS2013
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters: Add missing
-        file from project view.
-        * JavaScriptCore.vcxproj/jsc/jsc.vcxproj: Update for VS2013
-        * JavaScriptCore.vcxproj/testRegExp/testRegExp.vcxproj: Ditto
-        * JavaScriptCore.vcxproj/testapi/testapi.vcxproj: Ditto
-
-2014-01-22  Mark Lam  
-
-        Poor man's fast breakpoints for a 2.3x debugger speedup.
-        
-
-        Reviewed by Geoffrey Garen.
-
-        Previously we gained back some performance (run at baseline JIT speeds)
-        when the WebInspector is opened provided no breakpoints are set. This
-        was achieved by simply skipping all op_debug callbacks to the debugger
-        if no breakpoints are set. If any breakpoints are set, the debugger will
-        set a m_needsOpDebugCallbacks flag which causes the callbacks to be
-        called, and we don't get the baseline JIT speeds anymore.
-
-        With this patch, we will now track the number of breakpoints set in the
-        CodeBlock that they are set in. The LLINT and baseline JIT code will
-        check CodeBlock::m_numBreakpoints to determine if the op_debug callbacks
-        need to be called. With this, we will only enable op_debug callbacks for
-        CodeBlocks that need it i.e. those with breakpoints set in them.
-
-        Debugger::m_needsOpDebugCallbacks is now obsoleted. The LLINT and baseline
-        JIT code still needs to check Debugger::m_shouldPause to determine if the
-        debugger is in stepping mode and hence, needs op_debug callbacks enabled
-        for everything until the debugger "continues" the run and exit stepping
-        mode.
-
-        Also in this patch, I fixed a regression in DOM breakpoints which relies
-        Debugger::breakProgram() to pause the debugger.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::dumpBytecode):
-        - Missed accounting for op_debug's new hasBreakpointFlag operand here when
-          it was added.
-        (JSC::CodeBlock::CodeBlock):
-        (JSC::CodeBlock::hasOpDebugForLineAndColumn):
-        - This is needed in Debugger::toggleBreakpoint() to determine if a
-          breakpoint falls within a CodeBlock or not. Simply checking the bounds
-          of the CodeBlock is insufficient. For example, let's say we have the
-          following JS code:
-
-              // begin global scope
-              function f1() {
-                  function f2() {
-                     ... // set breakpoint here.
-                  }
-              }
-              // end global scope
-
-          Using the CodeBlock bounds alone, the breakpoint above will to appear
-          to be in the global program CodeBlock, and the CodeBlocks for function
-          f1() and f2(). With CodeBlock::hasOpDebugForLineAndColumn() we can
-          rule out the global program CodeBlock and f1(), and only apply the
-          breakpoint to f2(0 where it belongs.
-
-          CodeBlock::hasOpDebugForLineAndColumn() works by iterating over all
-          the opcodes in the CodeBlock to look for op_debug's. For each op_debug,
-          it calls CodeBlock::expressionRangeForBytecodeOffset() to do a binary
-          seach to get the line and column info for that op_debug. This is a
-          N * log(N) algorithm. However, a quick hands on test using the
-          WebInspector (with this patch applied) to exercise setting, breaking
-          on, and clearing breakpoints, as well as stepping through some code
-          shows no noticeable degradation of the user experience compared to the
-          baseline without this patch.
-
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlock::numBreakpoints):
-        (JSC::CodeBlock::numBreakpointsOffset):
-        (JSC::CodeBlock::addBreakpoint):
-        (JSC::CodeBlock::removeBreakpoint):
-        (JSC::CodeBlock::clearAllBreakpoints):
-        * debugger/Breakpoint.h:
-        - defined Breakpoint::unspecifiedColumn so that we can explicitly indicate
-          when the WebInspector was setting a line breakpoint and did not provide
-          a column value. CodeBlock::hasOpDebugForLineAndColumn() needs this
-          information in order to loosen its matching criteria for op_debug
-          bytecodes for the specified breakpoint line and column values provided
-          by the debugger.
-
-          Previously, we just hijack a 0 value column as an unspecified column.
-          However, the WebInspector operates on 0-based ints for column values.
-          Hence, 0 should be a valid column value and should not be hijacked to
-          mean an unspecified column.
-
-        * debugger/Debugger.cpp:
-        (JSC::Debugger::Debugger):
-        - added tracking of the VM that the debugger is used with. This is
-          needed by Debugger::breakProgram().
-
-          The VM pointer is attained from the first JSGlobalObject that the debugger
-          attaches to. When the debugger detaches from the last JSGlobalObject, it
-          will nullify its VM pointer to allow a new one to be set on the next
-          attach.
-
-          We were always only using each debugger instance with one VM. This change
-          makes it explicit with an assert to ensure that all globalObjects that
-          the debugger attaches to beongs to the same VM.
-
-        (JSC::Debugger::attach):
-        (JSC::Debugger::detach):
-        (JSC::Debugger::setShouldPause):
-
-        (JSC::Debugger::registerCodeBlock):
-        (JSC::Debugger::unregisterCodeBlock):
-        - registerCodeBlock() is responsible for applying pre-existing breakpoints
-          to new CodeBlocks being installed. Similarly, unregisterCodeBlock()
-          clears the breakpoints.
-
-        (JSC::Debugger::toggleBreakpoint):
-        - This is the workhorse function that checks if a breakpoint falls within
-          a CodeBlock or not. If it does, then it can either enable or disable
-          said breakpoint in the CodeBlock. In the current implementation,
-          enabling/disabling the breakpoint simply means incrementing/decrementing
-          the CodeBlock's m_numBreakpoints.
-
-        (JSC::Debugger::applyBreakpoints):
-
-        (JSC::Debugger::ToggleBreakpointFunctor::ToggleBreakpointFunctor):
-        (JSC::Debugger::ToggleBreakpointFunctor::operator()):
-        (JSC::Debugger::toggleBreakpoint):
-        - Iterates all relevant CodeBlocks and apply the specified breakpoint
-          if appropriate. This is called when a new breakpoint is being defined
-          by the WebInspector and needs to be applied to an already installed
-          CodeBlock.
-
-        (JSC::Debugger::setBreakpoint):
-        (JSC::Debugger::removeBreakpoint):
-        (JSC::Debugger::hasBreakpoint):
-        (JSC::Debugger::ClearBreakpointsFunctor::ClearBreakpointsFunctor):
-        (JSC::Debugger::ClearBreakpointsFunctor::operator()):
-        (JSC::Debugger::clearBreakpoints):
-
-        (JSC::Debugger::breakProgram):
-        - Fixed a regression that broke DOM breakpoints. The issue is that with
-          the skipping of op_debug callbacks, we don't always have an updated
-          m_currentCallFrame. Normally, m_currentCallFrame is provided as arg
-          in the op_debug callback. In this case, we can get the CallFrame* from
-          m_vm->topCallFrame.
-
-        (JSC::Debugger::updateCallFrameAndPauseIfNeeded):
-        (JSC::Debugger::pauseIfNeeded):
-        (JSC::Debugger::willExecuteProgram):
-        * debugger/Debugger.h:
-        (JSC::Debugger::Debugger):
-        (JSC::Debugger::shouldPause):
-
-        * heap/CodeBlockSet.h:
-        (JSC::CodeBlockSet::iterate):
-        * heap/Heap.h:
-        (JSC::Heap::forEachCodeBlock):
-        - Added utility to iterate all CodeBlocks in the heap / VM.
-
-        * interpreter/Interpreter.cpp:
-        (JSC::Interpreter::debug):
-
-        * jit/JITOpcodes.cpp:
-        (JSC::JIT::emit_op_debug):
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::emit_op_debug):
-        * llint/LowLevelInterpreter.asm:
-        - These now checks CodeBlock::m_numBreakpoints and Debugger::m_shouldPause
-          instead of Debugger::m_needsOpDebugCallbacks.
-
-        * runtime/Executable.cpp:
-        (JSC::ScriptExecutable::installCode):
-
-2014-01-22  Myles C. Maxfield  
-
-        Remove CSS3_TEXT_DECORATION define
-        https://bugs.webkit.org/show_bug.cgi?id=127333
-
-        This is required for unprefixing the text-decoration-* CSS properties.
-
-        Reviewed by Simon Fraser.
-
-        * Configurations/FeatureDefines.xcconfig:
-
-2014-01-22  Alexey Proskuryakov  
-
-        Update JS whitespace definition for changes in Unicode 6.3
-        https://bugs.webkit.org/show_bug.cgi?id=127450
-        
-
-        Reviewed by Oliver Hunt.
-
-        Covered by existing tests when running against a Unicode back-end that supports
-        Unicode 6.3 or higher.
-
-        * runtime/JSGlobalObjectFunctions.cpp: (JSC::isStrWhiteSpace): Explicitly allow
-        U+180E MONGOLIAN VOWEL SEPARATOR, because we need to keep recognizing all characters
-        that used to be whitespace.
-
-2014-01-21  Mark Hahnenberg  
-
-        Registers used in writeBarrierOnOperand can cause clobbering on some platforms
-        https://bugs.webkit.org/show_bug.cgi?id=127357
-
-        Reviewed by Filip Pizlo.
-
-        Some platforms use t0 and t1 for their first two arguments, so using those to load the 
-        cell for the write barrier is a bad idea because it will get clobbered.
-
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-
-2014-01-21  Mark Rowe  
-
-        Mac production build fix.
-
-        Move the shell script build phase to copy jsc into JavaScriptCore.framework
-        out of the jsc target and in to the All target so that it's not run during
-        production builds. Xcode appears to the parent directories of paths referenced
-        in the Output Files of the build phase, which leads to problems when the
-        SYMROOT for the JavaScriptCore framework and the jsc executables are later merged.
-
-        I've also fixed the path to the Resources folder in the script while I'm here.
-        On iOS the framework bundle is shallow so the correct destination is Resources/
-        rather than Versions/A/Resources. This is handled by tweaking the
-        JAVASCRIPTCORE_RESOURCES_DIR configuration setting to be relative rather than
-        a complete path so we can reuse it in the script. The references in JSC.xcconfig
-        and ToolExecutable.xcconfig are updated to prepend JAVASCRIPTCORE_FRAMEWORKS_DIR
-        to preserve their former values.
-
-        * Configurations/Base.xcconfig:
-        * Configurations/JSC.xcconfig:
-        * Configurations/ToolExecutable.xcconfig:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-
-2014-01-19  Andreas Kling  
-
-        JSC Parser: Shrink BindingNode.
-        
-
-        The "divot" and "end" source locations are always identical for
-        BindingNodes, so store only "start" and "end" instead.
-
-        1.19 MB progression on Membuster3.
-
-        Reviewed by Geoff Garen.
-
-        * bytecompiler/NodesCodegen.cpp:
-        (JSC::BindingNode::bindValue):
-        * parser/ASTBuilder.h:
-        (JSC::ASTBuilder::createBindingLocation):
-        * parser/NodeConstructors.h:
-        (JSC::BindingNode::create):
-        (JSC::BindingNode::BindingNode):
-        * parser/Nodes.h:
-        (JSC::BindingNode::divotStart):
-        (JSC::BindingNode::divotEnd):
-        * parser/Parser.cpp:
-        (JSC::Parser::createBindingPattern):
-        * parser/SyntaxChecker.h:
-        (JSC::SyntaxChecker::operatorStackPop):
-
-2014-01-20  Filip Pizlo  
-
-        op_captured_mov and op_new_captured_func in UnlinkedCodeBlocks should use the IdentifierMap instead of the strings directly
-        https://bugs.webkit.org/show_bug.cgi?id=127311
-        
-
-        Reviewed by Andreas Kling.
-        
-        This makes UnlinkedCodeBlocks use 32-bit instruction streams again.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-        * bytecode/UnlinkedCodeBlock.h:
-        (JSC::UnlinkedInstruction::UnlinkedInstruction):
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::addVar):
-        (JSC::BytecodeGenerator::emitInitLazyRegister):
-        (JSC::BytecodeGenerator::createArgumentsIfNecessary):
-        * bytecompiler/BytecodeGenerator.h:
-        (JSC::BytecodeGenerator::watchableVariable):
-        (JSC::BytecodeGenerator::hasWatchableVariable):
-
-2014-01-20  Mark Lam  
-
-        Removing CodeBlock::opDebugBytecodeOffsetForLineAndColumn() and friends.
-        
-
-        Reviewed by Geoffrey Garen.
-
-        We're changing plans and will be going with CodeBlock level breakpoints
-        instead of bytecode level breakpoints. As a result, we no longer need
-        the services of CodeBlock::opDebugBytecodeOffsetForLineAndColumn() (and
-        friends). This patch will remove that unused code.
-
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * bytecode/CodeBlock.cpp:
-        * bytecode/CodeBlock.h:
-        * bytecode/LineColumnInfo.h: Removed.
-        * bytecode/UnlinkedCodeBlock.cpp:
-        (JSC::UnlinkedCodeBlock::dumpExpressionRangeInfo):
-        * bytecode/UnlinkedCodeBlock.h:
-
-2014-01-20  Mark Hahnenberg  
-
-        CodeBlockSet::traceMarked doesn't need to visit the ownerExecutable
-        https://bugs.webkit.org/show_bug.cgi?id=127301
-
-        Reviewed by Oliver Hunt.
-
-        We used to just call CodeBlock::visitAggregate, but now we call visitChildren 
-        on the ownerExecutable, which is unnecessary. 
-
-        * heap/CodeBlockSet.cpp:
-        (JSC::CodeBlockSet::traceMarked):
-
-2014-01-20  Anders Carlsson  
-
-        Fix build.
-
-        * heap/BlockAllocator.h:
-
-2014-01-20  Anders Carlsson  
-
-        Stop using ThreadCondition in BlockAllocator
-        https://bugs.webkit.org/show_bug.cgi?id=126313
-
-        Reviewed by Sam Weinig.
-
-        * heap/BlockAllocator.cpp:
-        (JSC::BlockAllocator::~BlockAllocator):
-        (JSC::BlockAllocator::waitForDuration):
-        (JSC::BlockAllocator::blockFreeingThreadMain):
-        * heap/BlockAllocator.h:
-        (JSC::BlockAllocator::deallocate):
-
-2014-01-19  Anders Carlsson  
-
-        Convert GCThreadSharedData over to STL threading primitives
-        https://bugs.webkit.org/show_bug.cgi?id=127256
-
-        Reviewed by Andreas Kling.
-
-        * heap/GCThread.cpp:
-        (JSC::GCThread::waitForNextPhase):
-        (JSC::GCThread::gcThreadMain):
-        * heap/GCThreadSharedData.cpp:
-        (JSC::GCThreadSharedData::GCThreadSharedData):
-        (JSC::GCThreadSharedData::~GCThreadSharedData):
-        (JSC::GCThreadSharedData::startNextPhase):
-        (JSC::GCThreadSharedData::endCurrentPhase):
-        (JSC::GCThreadSharedData::didStartMarking):
-        (JSC::GCThreadSharedData::didFinishMarking):
-        * heap/GCThreadSharedData.h:
-        * heap/SlotVisitor.cpp:
-        (JSC::SlotVisitor::donateKnownParallel):
-        (JSC::SlotVisitor::drainFromShared):
-
-2014-01-18  Andreas Kling  
-
-        CodeBlock: Size m_callLinkInfos and m_byValInfos to fit earlier.
-        
-
-        Reviewed by Anders Carlsson.
-
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlock::setNumberOfByValInfos):
-        (JSC::CodeBlock::setNumberOfCallLinkInfos):
-
-            Use resizeToFit() instead of grow() for these vectors, since
-            we know the final size here.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::shrinkToFit):
-
-            No need to shrink here anymore. We were not even shrinking
-            m_byValInfo before!
-
-2014-01-18  Andreas Kling  
-
-        CodeBlock: Size m_function{Exprs,Decls} to fit from creation.
-        
-
-        Reviewed by Anders Carlsson.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-
-            Use resizeToFit() instead of grow() for m_functionExprs and
-            m_functionDecls since we know they will never change size.
-
-        (JSC::CodeBlock::shrinkToFit):
-
-            No need to shrink them here anymore.
-
-2014-01-18  Andreas Kling  
-
-        Remove unused CodeBlock::m_additionalIdentifiers member.
-        
-
-        Reviewed by Anders Carlsson.
-
-        * bytecode/CodeBlock.h:
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-        (JSC::CodeBlock::shrinkToFit):
-
-            Remove m_additionalIdentifiers, nothing uses it.
-
-2014-01-18  Andreas Kling  
-
-        Remove two unused CodeBlock functions.
-        
-
-        Kill copyPostParseDataFrom() and copyPostParseDataFromAlternative()
-        since they are not used.
-
-        Reviewed by Anders Carlsson.
-
-        * bytecode/CodeBlock.cpp:
-        * bytecode/CodeBlock.h:
-
-2014-01-18  Andreas Kling  
-
-        CodeBlock: Size m_exceptionHandlers to fit from creation.
-        
-
-        Avoid allocation churn for CodeBlock::m_exceptionHandlers.
-
-        Reviewed by Anders Carlsson.
-
-        * bytecode/CodeBlock.h:
-
-            Removed unused CodeBlock::allocateHandlers() function.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-
-            Use resizeToFit() instead of grow() for m_exceptionHandlers
-            since we know it's never going to change size.
-
-        (JSC::CodeBlock::shrinkToFit):
-
-            No need to shrink m_exceptionHandlers here since it's already
-            the perfect size.
-
-2014-01-18  Mark Lam  
-
-        Add a hasBreakpointFlag arg to the op_debug bytecode.
-        https://bugs.webkit.org/show_bug.cgi?id=127230.
-
-        Reviewed by Geoffrey Garen.
-
-        This is in anticipation of upcoming changes to support bytecode level
-        breakpoints. This patch adds the flag to the op_debug bytecode and
-        initializes it, but does not use it yet.
-
-        * bytecode/Opcode.h:
-        (JSC::padOpcodeName):
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::emitDebugHook):
-        * llint/LowLevelInterpreter.asm:
-
-2014-01-18  Alberto Garcia  
-
-        JavaScriptCore uses PLATFORM(MAC) when it means OS(DARWIN)
-        https://bugs.webkit.org/show_bug.cgi?id=99683
-
-        Reviewed by Anders Carlsson.
-
-        * jit/ThunkGenerators.cpp:
-        * tools/CodeProfile.cpp:
-        (JSC::symbolName):
-        (JSC::CodeProfile::sample):
-
-2014-01-18  Anders Carlsson  
-
-        Remove ENABLE_THREADED_HTML_PARSER defines everywhere
-        https://bugs.webkit.org/show_bug.cgi?id=127225
-
-        Reviewed by Andreas Kling.
-
-        This concludes the removal of over 8.8 million lines of threaded parser code.
-
-        * Configurations/FeatureDefines.xcconfig:
-
-2014-01-18  Mark Lam  
-
-        Adding UnlinkedCodeBlock::opDebugBytecodeOffsetForLineAndColumn()..
-        https://bugs.webkit.org/show_bug.cgi?id=127127.
-
-        Reviewed by Geoffrey Garen.
-
-        In order to implement bytecode level breakpoints, we need a mechanism
-        for computing the best fit op_debug bytecode offset for any valid given
-        line and column value in the source. The "best fit" op_debug bytecode
-        in this case is defined below in the comment for
-        UnlinkedCodeBlock::opDebugBytecodeOffsetForLineAndColumn().
-
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::opDebugBytecodeOffsetForLineAndColumn):
-        - Convert the line and column to unlinked line and column values and
-          pass them to UnlinkedCodeBlock::opDebugBytecodeOffsetForLineAndColumn()
-          to do the real work.
-
-        * bytecode/CodeBlock.h:
-        * bytecode/LineColumnInfo.h: Added.
-        (JSC::LineColumnInfo::operator <):
-        (JSC::LineColumnInfo::LineColumnPair::LineColumnPair):
-        (JSC::LineColumnInfo::operator ==):
-        (JSC::LineColumnInfo::operator !=):
-        (JSC::LineColumnInfo::operator <=):
-        (JSC::LineColumnInfo::operator >):
-        (JSC::LineColumnInfo::operator >=):
-        * bytecode/LineInfo.h: Removed.
-
-        * bytecode/UnlinkedCodeBlock.cpp:
-        (JSC::UnlinkedCodeBlock::decodeExpressionRangeLineAndColumn):
-        - Factored this out of expressionRangeForBytecodeOffset() so that it can
-          be called from multiple places.
-        (JSC::dumpLineColumnEntry):
-        (JSC::UnlinkedCodeBlock::dumpExpressionRangeInfo):
-        (JSC::UnlinkedCodeBlock::dumpOpDebugLineColumnInfoList):
-        - Some dumpers for debugging use only.
-        (JSC::UnlinkedCodeBlock::expressionRangeForBytecodeOffset):
-        (JSC::UnlinkedCodeBlock::opDebugBytecodeOffsetForLineAndColumn):
-        - Finds the earliest op_debug bytecode whose line and column matches the
-          specified line and column values. If an exact match is not found, then
-          finds the nearest op_debug bytecode that precedes the specified line
-          and column values. If there are more than one op_debug at that preceding
-          line and column value, then the earliest of those op_debug bytecodes will
-          be be selected. The offset of the selected bytecode will be returned.
-
-          We want the earliest one because when we have multiple op_debug bytecodes
-          that map to a given line and column, a debugger user would expect to break
-          on the first one and step through the rest thereafter if needed.
-
-        (JSC::compareLineColumnInfo):
-        (JSC::UnlinkedCodeBlock::opDebugLineColumnInfoList):
-        - Creates the sorted opDebugLineColumnInfoList on demand. This list is
-          stored in the UnlinkedCodeBlock's rareData.
-        * bytecode/UnlinkedCodeBlock.h:
-
-2014-01-18  Zan Dobersek  
-
-        Inspector scripts are not compatible with Python v3
-        https://bugs.webkit.org/show_bug.cgi?id=127128
-
-        Reviewed by Benjamin Poulain.
-
-        * inspector/scripts/generate-combined-inspector-json.py: Turn print statements into print function calls.
-        * inspector/scripts/jsmin.py: Try importing the StringIO class from the StringIO module (which will work for
-        Python v2) or, on import error, import the class from the io module (which will work for Python v3).
-
-2014-01-17  Anders Carlsson  
-
-        String::is8Bit() crashes if m_impl is null, handle this.
-
-        * API/OpaqueJSString.h:
-        (OpaqueJSString::OpaqueJSString):
-
-2014-01-17  Anders Carlsson  
-
-        Try to fix the Windows build.
-
-        * API/OpaqueJSString.cpp:
-        (OpaqueJSString::~OpaqueJSString):
-        (OpaqueJSString::characters):
-        * API/OpaqueJSString.h:
-        (OpaqueJSString::OpaqueJSString):
-
-2014-01-17  Anders Carlsson  
-
-        Get rid of OpaqueJSString::deprecatedCharacters()
-        https://bugs.webkit.org/show_bug.cgi?id=127161
-
-        Reviewed by Sam Weinig.
-
-        Handle OpaqueJSString::m_string being either 8-bit or 16-bit and add extra
-        code paths for the 8-bit cases.
-        
-        Unfortunately, JSStringGetCharactersPtr is still expected to return a 16-bit character pointer.
-        Handle this by storing a separate 16-bit string and initializing it on demand when JSStringGetCharactersPtr
-        is called and the backing string is 8-bit.
-        
-        This has the nice side effect of making JSStringGetCharactersPtr thread-safe when it wasn't before.
-        (In theory, someone could have a JSStringRef backed by an 8-bit string and call JSStringGetCharactersPtr on it
-        causing an unsafe upconversion to a 16-bit string).
-
-        * API/JSStringRef.cpp:
-        (JSStringGetCharactersPtr):
-        Call OpaqueJSString::characters.
-
-        (JSStringGetUTF8CString):
-        Add a code path that handles 8-bit strings.
-
-        (JSStringIsEqual):
-        Call OpaqueJSString::equal.
-
-        * API/JSStringRefCF.cpp:
-        (JSStringCreateWithCFString):
-        Reformat the code to use an early return instead of putting most of the code inside the body of an if statement.
-
-        (JSStringCopyCFString):
-        Create an 8-bit CFStringRef if possible.
-
-        * API/OpaqueJSString.cpp:
-        (OpaqueJSString::create):
-        Use nullptr.
-
-        (OpaqueJSString::~OpaqueJSString):
-        Free m_characters.
-
-        (OpaqueJSString::characters):
-        Do the up-conversion and store the result in m_characters.
-
-        (OpaqueJSString::equal):
-        New helper function.
-
-        * API/OpaqueJSString.h:
-        (OpaqueJSString::is8Bit):
-        New function that returns whether a string is 8-bit or not.
-
-        (OpaqueJSString::characters8):
-        (OpaqueJSString::characters16):
-        Add getters.
-
-2014-01-17  Peter Molnar  
-
-        Remove workaround for compilers not supporting deleted functions
-        https://bugs.webkit.org/show_bug.cgi?id=127166
-
-        Reviewed by Andreas Kling.
-
-        * inspector/InspectorAgentRegistry.h:
-
-2014-01-17  Commit Queue  
-
-        Unreviewed, rolling out r162185, r162186, and r162187.
-        http://trac.webkit.org/changeset/162185
-        http://trac.webkit.org/changeset/162186
-        http://trac.webkit.org/changeset/162187
-        https://bugs.webkit.org/show_bug.cgi?id=127164
-
-        Broke JSStringCreateWithCharactersNoCopy, as evidenced by a
-        JSC API test (Requested by ap on #webkit).
-
-        * API/JSStringRef.cpp:
-        (JSStringGetCharactersPtr):
-        (JSStringGetUTF8CString):
-        (JSStringIsEqual):
-        * API/JSStringRefCF.cpp:
-        (JSStringCreateWithCFString):
-        (JSStringCopyCFString):
-        * API/OpaqueJSString.cpp:
-        (OpaqueJSString::create):
-        (OpaqueJSString::identifier):
-        * API/OpaqueJSString.h:
-        (OpaqueJSString::create):
-        (OpaqueJSString::characters):
-        (OpaqueJSString::deprecatedCharacters):
-        (OpaqueJSString::OpaqueJSString):
-
-2014-01-16  Anders Carlsson  
-
-        Export OpaqueJSString destructor.
-
-        * API/OpaqueJSString.h:
-
-2014-01-16  Anders Carlsson  
-
-        Build fix.
-
-        * API/OpaqueJSString.h:
-
-2014-01-16  Anders Carlsson  
-
-        Get rid of OpaqueJSString::deprecatedCharacters()
-        https://bugs.webkit.org/show_bug.cgi?id=127161
-
-        Reviewed by Sam Weinig.
-
-        Handle OpaqueJSString::m_string being either 8-bit or 16-bit and add extra
-        code paths for the 8-bit cases.
-        
-        Unfortunately, JSStringGetCharactersPtr is still expected to return a 16-bit character pointer.
-        Handle this by storing a separate 16-bit string and initializing it on demand when JSStringGetCharactersPtr
-        is called. This has the nice side effect of making JSStringGetCharactersPtr thread-safe when it wasn't before.
-        (In theory, someone could have a JSStringRef backed by an 8-bit string and call JSStringGetCharactersPtr on it
-        causing an unsafe upconversion to a 16-bit string).
-
-        * API/JSStringRef.cpp:
-        (JSStringGetCharactersPtr):
-        Call OpaqueJSString::characters.
-
-        (JSStringGetUTF8CString):
-        Add a code path that handles 8-bit strings.
-
-        (JSStringIsEqual):
-        Call OpaqueJSString::equal.
-
-        * API/JSStringRefCF.cpp:
-        (JSStringCreateWithCFString):
-        Reformat the code to use an early return instead of putting most of the code inside the body of an if statement.
-
-        (JSStringCopyCFString):
-        Create an 8-bit CFStringRef if possible.
-
-        * API/OpaqueJSString.cpp:
-        (OpaqueJSString::create):
-        Use nullptr.
-
-        (OpaqueJSString::~OpaqueJSString):
-        Free m_characters.
-
-        (OpaqueJSString::characters):
-        Do the up-conversion and store the result in m_characters.
-
-        (OpaqueJSString::equal):
-        New helper function.
-
-        * API/OpaqueJSString.h:
-        (OpaqueJSString::is8Bit):
-        New function that returns whether a string is 8-bit or not.
-
-        (OpaqueJSString::characters8):
-        (OpaqueJSString::characters16):
-        Add getters.
-
-2014-01-16  Anders Carlsson  
-
-        Change all uses of FINAL to final now that all our compilers support it
-        https://bugs.webkit.org/show_bug.cgi?id=127142
-
-        Reviewed by Benjamin Poulain.
-
-        * inspector/JSGlobalObjectInspectorController.h:
-        * inspector/agents/InspectorAgent.h:
-        * inspector/remote/RemoteInspector.h:
-        * inspector/remote/RemoteInspectorDebuggableConnection.h:
-        * inspector/scripts/CodeGeneratorInspector.py:
-        (Generator.go):
-        * runtime/JSGlobalObjectDebuggable.h:
-        * runtime/JSPromiseReaction.cpp:
-
-2014-01-16  Oliver Hunt  
-
-        throwing an objc object (or general binding object) triggers an assertion
-        https://bugs.webkit.org/show_bug.cgi?id=127146
-
-        Reviewed by Alexey Proskuryakov.
-
-        This is simply a bogus assertion as we can't guarantee a bindings object
-        won't intercept assignment to .stack
-
-        * interpreter/Interpreter.cpp:
-        (JSC::Interpreter::unwind):
-
-2014-01-16  Peter Molnar  
-
-        Remove workaround for compilers not supporting explicit override control
-        https://bugs.webkit.org/show_bug.cgi?id=127111
-
-        Reviewed by Anders Carlsson.
-
-        Now all compilers support explicit override control, this workaround can be removed.
-
-        * API/JSAPIWrapperObject.mm:
-        * API/JSCallbackObject.h:
-        * API/JSManagedValue.mm:
-        * API/JSScriptRef.cpp:
-        * bytecode/CodeBlock.h:
-        * bytecode/CodeBlockJettisoningWatchpoint.h:
-        * bytecode/ProfiledCodeBlockJettisoningWatchpoint.h:
-        * bytecode/StructureStubClearingWatchpoint.h:
-        * dfg/DFGArrayifySlowPathGenerator.h:
-        * dfg/DFGCallArrayAllocatorSlowPathGenerator.h:
-        * dfg/DFGFailedFinalizer.h:
-        * dfg/DFGJITCode.h:
-        * dfg/DFGJITFinalizer.h:
-        * dfg/DFGSaneStringGetByValSlowPathGenerator.h:
-        * dfg/DFGSlowPathGenerator.h:
-        * dfg/DFGSpeculativeJIT64.cpp:
-        * heap/Heap.h:
-        * heap/IncrementalSweeper.h:
-        * heap/SuperRegion.h:
-        * inspector/InspectorValues.h:
-        * inspector/JSGlobalObjectInspectorController.h:
-        * inspector/agents/InspectorAgent.h:
-        * inspector/remote/RemoteInspector.h:
-        * inspector/remote/RemoteInspectorDebuggableConnection.h:
-        * inspector/scripts/CodeGeneratorInspector.py:
-        (Generator.go):
-        * jit/ClosureCallStubRoutine.h:
-        * jit/ExecutableAllocatorFixedVMPool.cpp:
-        * jit/GCAwareJITStubRoutine.h:
-        * jit/JITCode.h:
-        * jit/JITToDFGDeferredCompilationCallback.h:
-        * parser/Nodes.h:
-        * parser/SourceProvider.h:
-        * runtime/DataView.h:
-        * runtime/GCActivityCallback.h:
-        * runtime/GenericTypedArrayView.h:
-        * runtime/JSGlobalObjectDebuggable.h:
-        * runtime/JSPromiseReaction.cpp:
-        * runtime/RegExpCache.h:
-        * runtime/SimpleTypedArrayController.h:
-        * runtime/SymbolTable.h:
-        * runtime/WeakMapData.h:
-
-2014-01-15  Joseph Pecoraro  
-
-        [iOS] Clean up REMOTE_INSPECTOR code in OpenSource after the iOS merge
-        https://bugs.webkit.org/show_bug.cgi?id=127069
-
-        Reviewed by Timothy Hatcher.
-
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        Export XPCConnection because it is needed by RemoteInspector.h.
-
-        * inspector/remote/RemoteInspectorXPCConnection.h:
-        * inspector/remote/RemoteInspector.h:
-        * inspector/remote/RemoteInspector.mm:
-        (Inspector::RemoteInspector::startDisabled):
-        (Inspector::RemoteInspector::shared):
-        Allow RemoteInspector singleton to start disabled.
-
-2014-01-15  Brian Burg  
-
-        Web Inspector: capture probe samples on the backend
-        https://bugs.webkit.org/show_bug.cgi?id=126668
-
-        Reviewed by Joseph Pecoraro.
-
-        Add the 'probe' breakpoint action to the protocol. Change the setBreakpoint
-        commands to return a list of assigned breakpoint action identifiers
-        Add a type for breakpoint action identifiers. Add an event for sending
-        captured probe samples to the inspector frontend.
-
-        * inspector/protocol/Debugger.json:
-
-2014-01-10  Mark Hahnenberg  
-
-        Copying should be generational
-        https://bugs.webkit.org/show_bug.cgi?id=126555
-
-        Reviewed by Geoffrey Garen.
-
-        This patch adds support for copying to our generational collector. Eden collections 
-        always trigger copying. Full collections use our normal fragmentation-based heuristics.
-
-        The way this works is that the CopiedSpace now has the notion of an old generation set of CopiedBlocks
-        and a new generation of CopiedBlocks. During each mutator cycle new CopiedSpace allocations reside
-        in the new generation. When a collection occurs, those blocks are moved to the old generation.
-
-        One key thing to remember is that both new and old generation objects in the MarkedSpace can
-        refer to old or new generation allocations in CopiedSpace. This is why we must fire write barriers 
-        when assigning to an old (MarkedSpace) object's Butterfly.
-
-        * heap/CopiedAllocator.h:
-        (JSC::CopiedAllocator::tryAllocateDuringCopying):
-        * heap/CopiedBlock.h:
-        (JSC::CopiedBlock::CopiedBlock):
-        (JSC::CopiedBlock::didEvacuateBytes):
-        (JSC::CopiedBlock::isOld):
-        (JSC::CopiedBlock::didPromote):
-        * heap/CopiedBlockInlines.h:
-        (JSC::CopiedBlock::reportLiveBytes):
-        (JSC::CopiedBlock::reportLiveBytesDuringCopying):
-        * heap/CopiedSpace.cpp:
-        (JSC::CopiedSpace::CopiedSpace):
-        (JSC::CopiedSpace::~CopiedSpace):
-        (JSC::CopiedSpace::init):
-        (JSC::CopiedSpace::tryAllocateOversize):
-        (JSC::CopiedSpace::tryReallocateOversize):
-        (JSC::CopiedSpace::doneFillingBlock):
-        (JSC::CopiedSpace::didStartFullCollection):
-        (JSC::CopiedSpace::doneCopying):
-        (JSC::CopiedSpace::size):
-        (JSC::CopiedSpace::capacity):
-        (JSC::CopiedSpace::isPagedOut):
-        * heap/CopiedSpace.h:
-        (JSC::CopiedSpace::CopiedGeneration::CopiedGeneration):
-        * heap/CopiedSpaceInlines.h:
-        (JSC::CopiedSpace::contains):
-        (JSC::CopiedSpace::recycleEvacuatedBlock):
-        (JSC::CopiedSpace::allocateBlock):
-        (JSC::CopiedSpace::startedCopying):
-        * heap/CopyVisitor.cpp:
-        (JSC::CopyVisitor::copyFromShared):
-        * heap/CopyVisitorInlines.h:
-        (JSC::CopyVisitor::allocateNewSpace):
-        (JSC::CopyVisitor::allocateNewSpaceSlow):
-        * heap/GCThreadSharedData.cpp:
-        (JSC::GCThreadSharedData::didStartCopying):
-        * heap/Heap.cpp:
-        (JSC::Heap::copyBackingStores):
-        * heap/SlotVisitorInlines.h:
-        (JSC::SlotVisitor::copyLater):
-        * heap/TinyBloomFilter.h:
-        (JSC::TinyBloomFilter::add):
-
-2014-01-14  Mark Lam  
-
-        ASSERTION FAILED: !hasError() in JSC::Parser::createSavePoint().
-        https://bugs.webkit.org/show_bug.cgi?id=126990.
-
-        Reviewed by Geoffrey Garen.
-
-        * parser/Parser.cpp:
-        (JSC::Parser::parseConstDeclarationList):
-        - We were missing an error check after attempting to parse an initializer
-          expression. This is now fixed.
-
-2014-01-14  Joseph Pecoraro  
-
-        Web Inspector: For Remote Inspection link WebProcess's to their parent UIProcess
-        https://bugs.webkit.org/show_bug.cgi?id=126995
-
-        Reviewed by Timothy Hatcher.
-
-        * inspector/remote/RemoteInspector.mm:
-        (Inspector::RemoteInspector::listingForDebuggable):
-        For each WebView, list the parent process. Listing the parent per WebView
-        is already supported back when we supported processes that could host WebViews
-        for multiple applications.
-
-        * inspector/remote/RemoteInspectorConstants.h:
-        Add a separate key for the bundle identifier, separate from application identifier.
-
-        * inspector/remote/RemoteInspectorDebuggable.cpp:
-        (Inspector::RemoteInspectorDebuggable::info):
-        * inspector/remote/RemoteInspectorDebuggable.h:
-        (Inspector::RemoteInspectorDebuggableInfo::RemoteInspectorDebuggableInfo):
-        (Inspector::RemoteInspectorDebuggableInfo::hasParentProcess):
-        If a RemoteInspectorDebuggable has a non-zero parent process identifier
-        it is a proxy for the parent process.
-
-2014-01-14  Brian J. Burg  
-
-        Add ENABLE(WEB_REPLAY) feature flag to the build system
-        https://bugs.webkit.org/show_bug.cgi?id=126949
-
-        Reviewed by Joseph Pecoraro.
-
-        * Configurations/FeatureDefines.xcconfig:
-
-2014-01-14  Peter Molnar  
-
-        [EFL] FTL buildfix, add missing includes
-        https://bugs.webkit.org/show_bug.cgi?id=126641
-
-        Reviewed by Csaba Osztrogonác.
-
-        * ftl/FTLOSREntry.cpp:
-        * ftl/FTLOSRExitCompiler.cpp:
-
-2014-01-14  Joseph Pecoraro  
-
-        Web Inspector: RemoteInspector::updateDebuggable may miss a push
-        https://bugs.webkit.org/show_bug.cgi?id=126965
-
-        Reviewed by Timothy Hatcher.
-
-        * inspector/remote/RemoteInspector.mm:
-        (Inspector::RemoteInspector::updateDebuggable):
-        Always push an update. If a debuggable went from allowed to
-        not allowed, we would have missed pushing an update.
-
-2014-01-13  Mark Hahnenberg  
-
-        Performance regression on dromaeo due to generational marking
-        https://bugs.webkit.org/show_bug.cgi?id=126901
-
-        Reviewed by Oliver Hunt.
-
-        We were seeing some performance regression with ENABLE_GGC == 0, so this patch
-        ifdefs out more things to get rid of the additional overhead.
-
-        * heap/Heap.cpp:
-        (JSC::Heap::markRoots):
-        (JSC::Heap::writeBarrier):
-        * heap/MarkedBlock.cpp:
-        (JSC::MarkedBlock::clearMarks):
-        (JSC::MarkedBlock::clearMarksWithCollectionType):
-        * heap/MarkedSpace.cpp:
-        (JSC::MarkedSpace::resetAllocators):
-        * heap/MarkedSpace.h:
-        (JSC::MarkedSpace::didAllocateInBlock):
-        * heap/SlotVisitorInlines.h:
-        (JSC::SlotVisitor::internalAppend):
-        (JSC::SlotVisitor::reportExtraMemoryUsage):
-
-2014-01-13  Brian Burg  
-
-        Web Inspector: protocol generator should support integer-typed declarations
-        https://bugs.webkit.org/show_bug.cgi?id=126828
-
-        Reviewed by Joseph Pecoraro.
-
-        Add new binding classes for parameter/ad-hoc and normal integer type declarations.
-
-        * inspector/scripts/CodeGeneratorInspector.py:
-        (TypeBindings.create_type_declaration_):
-        (TypeBindings.create_type_declaration_.PlainInteger):
-        (TypeBindings.create_type_declaration_.PlainInteger.resolve_inner):
-        (TypeBindings.create_type_declaration_.PlainInteger.request_user_runtime_cast):
-        (TypeBindings.create_type_declaration_.PlainInteger.request_internal_runtime_cast):
-        (TypeBindings.create_type_declaration_.PlainInteger.get_code_generator):
-        (TypeBindings.create_type_declaration_.PlainInteger.get_validator_call_text):
-        (TypeBindings.create_type_declaration_.PlainInteger.reduce_to_raw_type):
-        (TypeBindings.create_type_declaration_.PlainInteger.get_type_model):
-        (TypeBindings.create_type_declaration_.PlainInteger.get_setter_value_expression_pattern):
-        (TypeBindings.create_type_declaration_.PlainInteger.get_array_item_c_type_text):
-        (TypeBindings.create_type_declaration_.TypedefInteger):
-        (TypeBindings.create_type_declaration_.TypedefInteger.resolve_inner):
-        (TypeBindings.create_type_declaration_.TypedefInteger.request_user_runtime_cast):
-        (TypeBindings.create_type_declaration_.TypedefInteger.request_internal_runtime_cast):
-        (TypeBindings.create_type_declaration_.TypedefInteger.get_code_generator):
-        (TypeBindings.create_type_declaration_.TypedefInteger.get_code_generator.CodeGenerator):
-        (TypeBindings.create_type_declaration_.TypedefInteger.get_code_generator.CodeGenerator.generate_type_builder):
-        (TypeBindings.create_type_declaration_.TypedefInteger.get_code_generator.CodeGenerator.generate_type_builder.int):
-        (TypeBindings.create_type_declaration_.TypedefInteger.get_code_generator.CodeGenerator.register_use):
-        (TypeBindings.create_type_declaration_.TypedefInteger.get_code_generator.CodeGenerator.get_generate_pass_id):
-        (TypeBindings.create_type_declaration_.TypedefInteger.get_validator_call_text):
-        (TypeBindings.create_type_declaration_.TypedefInteger.reduce_to_raw_type):
-        (TypeBindings.create_type_declaration_.TypedefInteger.get_type_model):
-        (TypeBindings.create_type_declaration_.TypedefInteger.get_setter_value_expression_pattern):
-        (TypeBindings.create_type_declaration_.TypedefInteger.get_array_item_c_type_text):
-
-2014-01-13  Zalan Bujtas  
-
-        Enable SUBPIXEL_LAYOUT on Mac
-        
-
-        Reviewed by Simon Fraser.
-
-        * Configurations/FeatureDefines.xcconfig:
-
-2014-01-13  Zan Dobersek  
-
-        Unreviewed. Changes in r161686 are exposing a bug in GCC where the global .cfi_startproc directive
-        is not inserted early enough into the generated assembler code when building in debug mode, causing
-        compilation failures on ports using the GCC compilers. To work around the problem, only utilize the
-        OFFLINE_ASM_* macros that use .cfi_ directives when compiling with Clang.
-
-        * llint/LowLevelInterpreter.cpp:
-
-2014-01-12  Commit Queue  
-
-        Unreviewed, rolling out r161840.
-        http://trac.webkit.org/changeset/161840
-        https://bugs.webkit.org/show_bug.cgi?id=126870
-
-        Caused jsscore and layout test failures (Requested by smfr on
-        #webkit).
-
-        * API/JSValueRef.cpp:
-        (JSValueMakeFromJSONString):
-        * bindings/ScriptValue.cpp:
-        (Deprecated::jsToInspectorValue):
-        * inspector/InspectorValues.cpp:
-        * runtime/DatePrototype.cpp:
-        (JSC::formatLocaleDate):
-        * runtime/Identifier.h:
-        (JSC::Identifier::characters):
-        * runtime/JSStringBuilder.h:
-        (JSC::JSStringBuilder::append):
-
-2014-01-12  Darin Adler  
-
-        Add deprecatedCharacters as a synonym for characters and convert most call sites
-        https://bugs.webkit.org/show_bug.cgi?id=126858
-
-        Reviewed by Anders Carlsson.
-
-        * API/JSStringRef.cpp:
-        (JSStringGetCharactersPtr):
-        (JSStringGetUTF8CString):
-        (JSStringIsEqual):
-        * API/JSStringRefCF.cpp:
-        (JSStringCopyCFString):
-        * API/OpaqueJSString.h:
-        (OpaqueJSString::characters):
-        (OpaqueJSString::deprecatedCharacters):
-        (OpaqueJSString::length):
-        (OpaqueJSString::OpaqueJSString):
-        * inspector/InspectorValues.cpp:
-        (Inspector::InspectorValue::parseJSON):
-        * runtime/JSGlobalObjectFunctions.cpp:
-        (JSC::parseInt):
-        * runtime/StringPrototype.cpp:
-        (JSC::localeCompare):
-        (JSC::stringProtoFuncFontsize):
-        (JSC::stringProtoFuncLink):
-        Use deprecatedCharacters instead of characters.
-
-2014-01-12  Darin Adler  
-
-        Reduce use of String::characters
-        https://bugs.webkit.org/show_bug.cgi?id=126854
-
-        Reviewed by Sam Weinig.
-
-        * API/JSValueRef.cpp:
-        (JSValueMakeFromJSONString): Use characters16 instead of characters for 16-bit case.
-        Had to remove length check because an empty string could be either 8 bit or 16 bit.
-        Don't need a null string check before calling is8Bit because JSStringRef can't hold
-        a null string.
-
-        * bindings/ScriptValue.cpp:
-        (Deprecated::jsToInspectorValue): Use the existing string here instead of creating
-        a new one by calling characters and length on the old string. I think this may be
-        left over from when string types were not the same in JavaScriptCore and WebCore.
-        Also rewrite the property names loop to use modern for syntax and fewer locals.
-
-        * inspector/InspectorValues.cpp:
-        (Inspector::escapeChar): Changed to use appendLiteral instead of hard-coding string
-        lengths. Moved handling of "<" and ">" in here instead of at the call site.
-        (Inspector::doubleQuoteString): Simplify the code so there is no use of characters
-        and length. This is still an inefficient way of doing this job and could use a rethink.
-
-        * runtime/DatePrototype.cpp:
-        (JSC::formatLocaleDate): Use RetainPtr, createCFString, and the conversion from
-        CFStringRef to WTF::String to remove a lot of unneeded code.
-
-        * runtime/Identifier.h: Removed unneeded Identifier::characters function.
-
-        * runtime/JSStringBuilder.h:
-        (JSC::JSStringBuilder::append): Use characters16 instead of characters function here,
-        since we have already checked is8Bit above.
-
-2014-01-12  Andy Estes  
-
-        [iOS] Enable the JSC Objective-C API
-
-        Rubber-stamped by Simon Fraser.
-
-        * API/JSBase.h:
-
-2014-01-12  Carlos Garcia Campos  
-
-        Unreviewed. Fix make distcheck.
-
-        * GNUmakefile.am: Add inline-and-minify-stylesheets-and-scripts.py
-        to EXTRA_DIST and fix InjectedScriptSource.h generation rule.
-        * GNUmakefile.list.am: Move InjectedScriptSource.h to
-        built_nosources to make sure it's not disted.
-
-2014-01-11  Anders Carlsson  
-
-        Try again to fix the build.
-
-        * inspector/InspectorAgentRegistry.cpp:
-        * inspector/InspectorAgentRegistry.h:
-
-2014-01-11  Anders Carlsson  
-
-        Try to prevent the Vector copy constructor from being instantiated.
-
-        * inspector/InspectorAgentRegistry.cpp:
-        (Inspector::InspectorAgentRegistry::InspectorAgentRegistry):
-        * inspector/InspectorAgentRegistry.h:
-
-2014-01-11  Anders Carlsson  
-
-        Try something else.
-
-        * inspector/InspectorAgentRegistry.cpp:
-        (Inspector::InspectorAgentRegistry::~InspectorAgentRegistry):
-        * inspector/InspectorAgentRegistry.h:
-
-2014-01-11  Dean Jackson  
-
-        [JSC] Revise typed array implementations to match ECMAScript and WebGL Specification
-        https://bugs.webkit.org/show_bug.cgi?id=126754
-
-        Reviewed by Filip Pizlo.
-
-        The ECMAScript specification forbids calling the typed array
-        constructors without using "new". Change the call data to return
-        none so we throw and exception in these cases.
-
-        * runtime/JSGenericTypedArrayViewConstructorInlines.h:
-        (JSC::JSGenericTypedArrayViewConstructor::getCallData):
-
-2014-01-11  Anders Carlsson  
-
-        Try to fix the build by introducing a constructor.
-
-        * inspector/InspectorAgentRegistry.cpp:
-        (Inspector::InspectorAgentRegistry::InspectorAgentRegistry):
-        * inspector/InspectorAgentRegistry.h:
-
-2014-01-11  Anders Carlsson  
-
-        * inspector/InspectorAgentRegistry.h:
-
-        Remove an unused function.
-
-2014-01-11  Anders Carlsson  
-
-        InspectorAgentRegistry should use std::unique_ptr
-        https://bugs.webkit.org/show_bug.cgi?id=126826
-
-        Reviewed by Sam Weinig.
-
-        * inspector/InspectorAgentRegistry.cpp:
-        (Inspector::InspectorAgentRegistry::append):
-        * inspector/InspectorAgentRegistry.h:
-        * inspector/JSGlobalObjectInspectorController.cpp:
-        (Inspector::JSGlobalObjectInspectorController::JSGlobalObjectInspectorController):
-        * inspector/agents/InspectorAgent.h:
-
-2014-01-10  Joseph Pecoraro  
-
-        Web Inspector: Push InspectorAgent down into JSC, give JSC an InspectorController
-        https://bugs.webkit.org/show_bug.cgi?id=126763
-
-        Reviewed by Timothy Hatcher.
-
-        Introduce JSGlobalObjectInspectorController. This is the InspectorController
-        for a JSContext. It is created by the JSGlobalObject Remote Inspector Debuggable
-        when a remote frontend connects, and is destroyed when the remote frontend
-        disconnects of the JSGlobalObject is destroyed.
-
-        * inspector/JSGlobalObjectInspectorController.h: Added.
-        * inspector/JSGlobalObjectInspectorController.cpp: Added.
-        (Inspector::JSGlobalObjectInspectorController::JSGlobalObjectInspectorController):
-        (Inspector::JSGlobalObjectInspectorController::~JSGlobalObjectInspectorController):
-        (Inspector::JSGlobalObjectInspectorController::connectFrontend):
-        (Inspector::JSGlobalObjectInspectorController::disconnectFrontend):
-        (Inspector::JSGlobalObjectInspectorController::dispatchMessageFromFrontend):
-        (Inspector::JSGlobalObjectInspectorController::functionCallHandler):
-        (Inspector::JSGlobalObjectInspectorController::evaluateHandler):
-        Create/destory agents, create/destroy dispatches, implement InspectorEnvironment.
-
-        * runtime/JSGlobalObjectDebuggable.h:
-        * runtime/JSGlobalObjectDebuggable.cpp:
-        (JSC::JSGlobalObjectDebuggable::~JSGlobalObjectDebuggable):
-        (JSC::JSGlobalObjectDebuggable::connect):
-        (JSC::JSGlobalObjectDebuggable::disconnect):
-        (JSC::JSGlobalObjectDebuggable::dispatchMessageFromRemoteFrontend):
-        Forward actions to the InspectorController object.
-
-        * inspector/agents/InspectorAgent.h: Renamed from Source/WebCore/inspector/InspectorAgent.h.
-        * inspector/agents/InspectorAgent.cpp: Renamed from Source/WebCore/inspector/InspectorAgent.cpp.
-        (Inspector::InspectorAgent::InspectorAgent):
-        (Inspector::InspectorAgent::~InspectorAgent):
-        (Inspector::InspectorAgent::didCreateFrontendAndBackend):
-        (Inspector::InspectorAgent::inspect):
-        (Inspector::InspectorAgent::evaluateForTestInFrontend):
-        Implement InspectorAgent in JavaScriptCore in namespace Inspector.
-
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * CMakeLists.txt:
-        * ChangeLog:
-        * GNUmakefile.am:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.vcxproj/copy-files.cmd:
-        Add files and new inspector/agents subdirectory.
-
-2014-01-10  Commit Queue  
-
-        Unreviewed, rolling out r161702.
-        http://trac.webkit.org/changeset/161702
-        https://bugs.webkit.org/show_bug.cgi?id=126803
-
-        Broke multiple tests (Requested by ap on #webkit).
-
-        * runtime/JSGenericTypedArrayViewConstructorInlines.h:
-        (JSC::JSGenericTypedArrayViewConstructor::getCallData):
-
-2014-01-10  David Kilzer  
-
-        Clean up architectures in xcconfig files
-        
-
-        Reviewed by Andy Estes.
-
-        * Configurations/Base.xcconfig:
-        * Configurations/JavaScriptCore.xcconfig: Remove armv6, ppc.
-        * Configurations/ToolExecutable.xcconfig: Sort.
-        - Add new arch.
-
-2014-01-10  Dean Jackson  
-
-        [JSC] Revise typed array implementations to match ECMAScript and WebGL Specification
-        https://bugs.webkit.org/show_bug.cgi?id=126754
-
-        Reviewed by Filip Pizlo.
-
-        The ECMAScript specification forbids calling the typed array
-        constructors without using "new". Change the call data to return
-        none so we throw and exception in these cases.
-
-        * runtime/JSGenericTypedArrayViewConstructorInlines.h:
-        (JSC::JSGenericTypedArrayViewConstructor::getCallData):
-
-2014-01-10  Benjamin Poulain  
-
-        Remove the BlackBerry port from trunk
-        https://bugs.webkit.org/show_bug.cgi?id=126715
-
-        Reviewed by Anders Carlsson.
-
-        * assembler/ARMAssembler.h:
-        (JSC::ARMAssembler::cacheFlush):
-        * assembler/ARMv7Assembler.h:
-        (JSC::ARMv7Assembler::replaceWithJump):
-        (JSC::ARMv7Assembler::maxJumpReplacementSize):
-        (JSC::ARMv7Assembler::cacheFlush):
-        * assembler/MacroAssemblerARMv7.h:
-        (JSC::MacroAssemblerARMv7::revertJumpReplacementToBranchPtrWithPatch):
-        * heap/MachineStackMarker.cpp:
-        (JSC::getPlatformThreadRegisters):
-        (JSC::otherThreadStackPointer):
-        (JSC::freePlatformThreadRegisters):
-        * jit/ExecutableAllocator.h:
-
-2014-01-10  Joseph Pecoraro  
-
-        Web Inspector: Remove unimplemented or static ScriptDebugServer features
-        https://bugs.webkit.org/show_bug.cgi?id=126784
-
-        Reviewed by Timothy Hatcher.
-
-        * inspector/protocol/Debugger.json:
-
-2014-01-10  Michael Saboff  
-
-        REGRESSION(C stack work): stack traces no longer work in CrashTracer, lldb, and other tools
-        https://bugs.webkit.org/show_bug.cgi?id=126764
-
-        Reviewed by Geoffrey Garen.
-
-        Updated callToJavaScript and cllToNativeFunction to properly replicate the caller's
-        return PC and frame pointer in the sentinel frame.  For X86-64, added .cfi_
-        directives to create eh_frame info for all LLInt symbols so that the various
-        unwinding code understands that we are using a separate JS stack referenced
-        by BP and at what offsets in that frame the prior PC (register 16) and prior
-        BP (register 6) can be found.  These two changes are sufficient for stack tracing
-        to work for Mac OSX.
-
-        * llint/LowLevelInterpreter.cpp:
-        * llint/LowLevelInterpreter64.asm:
-
-2014-01-10  Tamas Gergely  
-
-        [EFL][JSC] Enable udis86 disassembler on efl.
-        https://bugs.webkit.org/show_bug.cgi?id=125502
-
-        Reviewed by Michael Saboff.
-
-        Enable udis86 disassembler on efl and fix build warnings.
-
-        * CMakeLists.txt:
-          Add udis86 disassembler source files.
-        * disassembler/udis86/udis86_decode.c:
-        (decode_modrm_rm):
-          Build warning fixes.
-        * disassembler/udis86/udis86_syn-att.c:
-        (gen_operand):
-          Build warning fixes.
-        * disassembler/udis86/udis86_syn-intel.c:
-        (gen_operand):
-          Build warning fixes.
-        * disassembler/udis86/udis86_types.h:
-          Correct FMT64 for uint64_t.
-
-2014-01-09  Benjamin Poulain  
-
-        Remove the BlackBerry files outside WebCore
-        https://bugs.webkit.org/show_bug.cgi?id=126715
-
-        Reviewed by Anders Carlsson.
-
-        * PlatformBlackBerry.cmake: Removed.
-        * runtime/GCActivityCallbackBlackBerry.cpp: Removed.
-        * shell/PlatformBlackBerry.cmake: Removed.
-
-2014-01-10  Geoffrey Garen  
-
-        Removed Blackberry #ifdefs and platform code from JavaScriptCore
-        https://bugs.webkit.org/show_bug.cgi?id=126757
-
-        Reviewed by Sam Weinig.
-
-        * PlatformBlackBerry.cmake: Removed.
-        * heap/HeapTimer.cpp:
-        * heap/HeapTimer.h:
-        * heap/IncrementalSweeper.cpp:
-        * heap/IncrementalSweeper.h:
-        * jsc.cpp:
-        (main):
-        * runtime/GCActivityCallbackBlackBerry.cpp: Removed.
-        * runtime/MemoryStatistics.cpp:
-        (JSC::globalMemoryStatistics):
-
-2014-01-07  Mark Hahnenberg  
-
-        Marking should be generational
-        https://bugs.webkit.org/show_bug.cgi?id=126552
-
-        Reviewed by Geoffrey Garen.
-
-        Re-marking the same objects over and over is a waste of effort. This patch implements 
-        the sticky mark bit algorithm (along with our already-present write barriers) to reduce 
-        overhead during garbage collection caused by rescanning objects.
-
-        There are now two collection modes, EdenCollection and FullCollection. EdenCollections
-        only visit new objects or objects that were added to the remembered set by a write barrier.
-        FullCollections are normal collections that visit all objects regardless of their 
-        generation.
-
-        In this patch EdenCollections do not do anything in CopiedSpace. This will be fixed in 
-        https://bugs.webkit.org/show_bug.cgi?id=126555.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::visitAggregate):
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlockSet::mark):
-        * dfg/DFGOperations.cpp:
-        * heap/CodeBlockSet.cpp:
-        (JSC::CodeBlockSet::add):
-        (JSC::CodeBlockSet::traceMarked):
-        (JSC::CodeBlockSet::rememberCurrentlyExecutingCodeBlocks):
-        * heap/CodeBlockSet.h:
-        * heap/CopiedBlockInlines.h:
-        (JSC::CopiedBlock::reportLiveBytes):
-        * heap/CopiedSpace.cpp:
-        (JSC::CopiedSpace::didStartFullCollection):
-        * heap/CopiedSpace.h:
-        (JSC::CopiedSpace::heap):
-        * heap/Heap.cpp:
-        (JSC::Heap::Heap):
-        (JSC::Heap::didAbandon):
-        (JSC::Heap::markRoots):
-        (JSC::Heap::copyBackingStores):
-        (JSC::Heap::addToRememberedSet):
-        (JSC::Heap::collectAllGarbage):
-        (JSC::Heap::collect):
-        (JSC::Heap::didAllocate):
-        (JSC::Heap::writeBarrier):
-        * heap/Heap.h:
-        (JSC::Heap::isInRememberedSet):
-        (JSC::Heap::operationInProgress):
-        (JSC::Heap::shouldCollect):
-        (JSC::Heap::isCollecting):
-        (JSC::Heap::isWriteBarrierEnabled):
-        (JSC::Heap::writeBarrier):
-        * heap/HeapOperation.h:
-        * heap/MarkStack.cpp:
-        (JSC::MarkStackArray::~MarkStackArray):
-        (JSC::MarkStackArray::clear):
-        (JSC::MarkStackArray::fillVector):
-        * heap/MarkStack.h:
-        * heap/MarkedAllocator.cpp:
-        (JSC::isListPagedOut):
-        (JSC::MarkedAllocator::isPagedOut):
-        (JSC::MarkedAllocator::tryAllocateHelper):
-        (JSC::MarkedAllocator::addBlock):
-        (JSC::MarkedAllocator::removeBlock):
-        (JSC::MarkedAllocator::reset):
-        * heap/MarkedAllocator.h:
-        (JSC::MarkedAllocator::MarkedAllocator):
-        * heap/MarkedBlock.cpp:
-        (JSC::MarkedBlock::clearMarks):
-        (JSC::MarkedBlock::clearRememberedSet):
-        (JSC::MarkedBlock::clearMarksWithCollectionType):
-        (JSC::MarkedBlock::lastChanceToFinalize):
-        * heap/MarkedBlock.h: Changed atomSize to 16 bytes because we have no objects smaller
-        than 16 bytes. This is also to pay for the additional Bitmap for the remembered set.
-        (JSC::MarkedBlock::didConsumeEmptyFreeList):
-        (JSC::MarkedBlock::setRemembered):
-        (JSC::MarkedBlock::clearRemembered):
-        (JSC::MarkedBlock::atomicClearRemembered):
-        (JSC::MarkedBlock::isRemembered):
-        * heap/MarkedSpace.cpp:
-        (JSC::MarkedSpace::~MarkedSpace):
-        (JSC::MarkedSpace::resetAllocators):
-        (JSC::MarkedSpace::visitWeakSets):
-        (JSC::MarkedSpace::reapWeakSets):
-        (JSC::VerifyMarked::operator()):
-        (JSC::MarkedSpace::clearMarks):
-        * heap/MarkedSpace.h:
-        (JSC::ClearMarks::operator()):
-        (JSC::ClearRememberedSet::operator()):
-        (JSC::MarkedSpace::didAllocateInBlock):
-        (JSC::MarkedSpace::clearRememberedSet):
-        * heap/SlotVisitor.cpp:
-        (JSC::SlotVisitor::~SlotVisitor):
-        (JSC::SlotVisitor::clearMarkStack):
-        * heap/SlotVisitor.h:
-        (JSC::SlotVisitor::markStack):
-        (JSC::SlotVisitor::sharedData):
-        * heap/SlotVisitorInlines.h:
-        (JSC::SlotVisitor::internalAppend):
-        (JSC::SlotVisitor::unconditionallyAppend):
-        (JSC::SlotVisitor::copyLater):
-        (JSC::SlotVisitor::reportExtraMemoryUsage):
-        (JSC::SlotVisitor::heap):
-        * jit/Repatch.cpp:
-        * runtime/JSGenericTypedArrayViewInlines.h:
-        (JSC::JSGenericTypedArrayView::visitChildren):
-        * runtime/JSPropertyNameIterator.h:
-        (JSC::StructureRareData::setEnumerationCache):
-        * runtime/JSString.cpp:
-        (JSC::JSString::visitChildren):
-        * runtime/StructureRareDataInlines.h:
-        (JSC::StructureRareData::setPreviousID):
-        (JSC::StructureRareData::setObjectToStringValue):
-        * runtime/WeakMapData.cpp:
-        (JSC::WeakMapData::visitChildren):
-
-2014-01-09  Joseph Pecoraro  
-
-        Unreviewed Windows build fix for r161563.
-
-        Copy all scripts, some may not be .py.
-
-        * JavaScriptCore.vcxproj/copy-files.cmd:
-
-2014-01-09  Filip Pizlo  
-
-        AI for CreateArguments should pass through non-SpecEmpty input values
-        https://bugs.webkit.org/show_bug.cgi?id=126709
-
-        Reviewed by Mark Hahnenberg.
-
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::AbstractInterpreter::executeEffects):
-        * tests/stress/use-arguments-as-object-pointer.js: Added.
-        (foo):
-
-2014-01-09  Mark Hahnenberg  
-
-        Constructors for Objective-C classes do not work properly with instanceof
-        https://bugs.webkit.org/show_bug.cgi?id=126670
-
-        Reviewed by Oliver Hunt.
-
-        This bug is due to the fact that the JS constructors created for Objective-C classes via the JSC 
-        API inherit from JSCallbackObject, which overrides hasInstance with its own customHasInstance. 
-        JSCallbackObject::customHasInstance only checks the JSClassRefs for hasInstance callbacks. 
-        If it doesn't find any callbacks, it returns false.
-
-        This patch adds a hasInstance callback to constructors created for Objective-C wrapper classes.
-
-        * API/JSWrapperMap.mm:
-        (constructorHasInstance):
-        (constructorWithCustomBrand):
-        (allocateConstructorForCustomClass):
-        * API/tests/testapi.mm:
-
-2014-01-09  Joseph Pecoraro  
-
-        Web Inspector: Move InjectedScript classes into JavaScriptCore
-        https://bugs.webkit.org/show_bug.cgi?id=126598
-
-        Reviewed by Timothy Hatcher.
-
-        Part 5: Move InjectedScript classes into JavaScriptCore
-
-        There are pieces of logic that WebCore wants to hook into in the InjectedScript
-        execution (e.g. for CommandLineAPIModule and InspectorInstrumentation). Create
-        hooks for those in a base class called InspectorEnvironment. For now, the
-        InspectorControllers (Page, JSGlobalObject, Worker) will be the InspectorEnvironments
-        and provide answers to its hooks.
-
-        * inspector/InspectorEnvironment.h: Added.
-        New hooks needed by WebCore in various places. Mostly stubbed in JavaScriptCore.
-
-        * inspector/InjectedScript.cpp: Renamed from Source/WebCore/inspector/InjectedScript.cpp.
-        * inspector/InjectedScript.h: Added.
-        * inspector/InjectedScriptBase.cpp: Renamed from Source/WebCore/inspector/InjectedScriptBase.cpp.
-        * inspector/InjectedScriptBase.h: Renamed from Source/WebCore/inspector/InjectedScriptBase.h.
-        * inspector/InjectedScriptModule.cpp: Renamed from Source/WebCore/inspector/InjectedScriptModule.cpp.
-        * inspector/InjectedScriptModule.h: Renamed from Source/WebCore/inspector/InjectedScriptModule.h.
-        Cleanup the style of these files (nullptr, formatting, whitespace, etc).
-        Use the InspectorEnvironments call/evaluate function for ScriptFunctionCalls and checking access
-
-        * inspector/InjectedScriptManager.cpp: Renamed from Source/WebCore/inspector/InjectedScriptManager.cpp.
-        * inspector/InjectedScriptManager.h: Renamed from Source/WebCore/inspector/InjectedScriptManager.h.
-        Take an InspectorEnvironment with multiple hooks, instead of a single hook function.
-
-        * inspector/InjectedScriptHost.cpp: Added.
-        * inspector/InjectedScriptHost.h: Added.
-        * inspector/JSInjectedScriptHost.cpp: Renamed from Source/WebCore/bindings/js/JSInjectedScriptHostCustom.cpp.
-        * inspector/JSInjectedScriptHost.h: Added.
-        * inspector/JSInjectedScriptHostPrototype.cpp: Added.
-        * inspector/JSInjectedScriptHostPrototype.h: Added.
-        Implementation of InjectedScriptHost which is passed into the script (InjectedScriptSource.js)
-        that we inject into the page. This is mostly copied from the original autogenerated code,
-        then simplified and cleaned up. InjectedScriptHost can be subclasses to provide specialized
-        implementations of isHTMLAllCollection and type for Web/DOM types unknown to a pure JS context.
-
-
-        Part 4: Move all inspector scripts into JavaScriptCore and update generators.
-
-        For OS X be sure to export the scripts as if they are private headers.
-
-        * GNUmakefile.am:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * inspector/scripts/cssmin.py: Renamed from Source/WebCore/inspector/Scripts/cssmin.py.
-        * inspector/scripts/inline-and-minify-stylesheets-and-scripts.py: Renamed from Source/WebCore/inspector/Scripts/inline-and-minify-stylesheets-and-scripts.py.
-        * inspector/scripts/jsmin.py: Renamed from Source/WebCore/inspector/Scripts/jsmin.py.
-        * inspector/scripts/xxd.pl: Renamed from Source/WebCore/inspector/xxd.pl.
-
-
-        Part 3: Update CodeGeneratorInspector to avoid inlining virtual destructors.
-
-        This avoids build errors about duplicate exported virtual inlined methods
-        are included from multiple places. Just put empty destructors in the
-        implementation file instead of inlined.
-
-        * inspector/scripts/CodeGeneratorInspector.py:
-        (Generator):
-        (Generator.go):
-        * inspector/scripts/CodeGeneratorInspectorStrings.py:
-
-
-        Part 2: Move InjectedScriptSource and generation into JavaScriptCore.
-
-        Move InjectedScriptSource.js and derived sources generation.
-
-        * CMakeLists.txt:
-        * DerivedSources.make:
-        * GNUmakefile.am:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * inspector/InjectedScriptSource.js: Renamed from Source/WebCore/inspector/InjectedScriptSource.js.
-
-2014-01-09  Balazs Kilvady  
-
-        Regression: failing RegExp tests on 32 bit architectures.
-        https://bugs.webkit.org/show_bug.cgi?id=126699
-
-        Reviewed by Michael Saboff.
-
-        Fix setRegExpConstructor functions for 32 bit architectures.
-
-        * runtime/RegExpConstructor.cpp:
-        (JSC::setRegExpConstructorInput):
-        (JSC::setRegExpConstructorMultiline):
-
-2014-01-09  Commit Queue  
-
-        Unreviewed, rolling out r161540.
-        http://trac.webkit.org/changeset/161540
-        https://bugs.webkit.org/show_bug.cgi?id=126704
-
-        Caused assertion failures on multiple tests (Requested by ap
-        on #webkit).
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::visitAggregate):
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlockSet::mark):
-        * dfg/DFGOperations.cpp:
-        * heap/CodeBlockSet.cpp:
-        (JSC::CodeBlockSet::add):
-        (JSC::CodeBlockSet::traceMarked):
-        * heap/CodeBlockSet.h:
-        * heap/CopiedBlockInlines.h:
-        (JSC::CopiedBlock::reportLiveBytes):
-        * heap/CopiedSpace.cpp:
-        * heap/CopiedSpace.h:
-        * heap/Heap.cpp:
-        (JSC::Heap::Heap):
-        (JSC::Heap::didAbandon):
-        (JSC::Heap::markRoots):
-        (JSC::Heap::copyBackingStores):
-        (JSC::Heap::collectAllGarbage):
-        (JSC::Heap::collect):
-        (JSC::Heap::didAllocate):
-        * heap/Heap.h:
-        (JSC::Heap::shouldCollect):
-        (JSC::Heap::isCollecting):
-        (JSC::Heap::isWriteBarrierEnabled):
-        (JSC::Heap::writeBarrier):
-        * heap/HeapOperation.h:
-        * heap/MarkStack.cpp:
-        (JSC::MarkStackArray::~MarkStackArray):
-        * heap/MarkStack.h:
-        * heap/MarkedAllocator.cpp:
-        (JSC::MarkedAllocator::isPagedOut):
-        (JSC::MarkedAllocator::tryAllocateHelper):
-        (JSC::MarkedAllocator::addBlock):
-        (JSC::MarkedAllocator::removeBlock):
-        * heap/MarkedAllocator.h:
-        (JSC::MarkedAllocator::MarkedAllocator):
-        (JSC::MarkedAllocator::reset):
-        * heap/MarkedBlock.cpp:
-        * heap/MarkedBlock.h:
-        (JSC::MarkedBlock::lastChanceToFinalize):
-        (JSC::MarkedBlock::didConsumeEmptyFreeList):
-        (JSC::MarkedBlock::clearMarks):
-        * heap/MarkedSpace.cpp:
-        (JSC::MarkedSpace::~MarkedSpace):
-        (JSC::MarkedSpace::resetAllocators):
-        (JSC::MarkedSpace::visitWeakSets):
-        (JSC::MarkedSpace::reapWeakSets):
-        * heap/MarkedSpace.h:
-        (JSC::ClearMarks::operator()):
-        (JSC::MarkedSpace::clearMarks):
-        * heap/SlotVisitor.cpp:
-        (JSC::SlotVisitor::~SlotVisitor):
-        * heap/SlotVisitor.h:
-        (JSC::SlotVisitor::sharedData):
-        * heap/SlotVisitorInlines.h:
-        (JSC::SlotVisitor::internalAppend):
-        (JSC::SlotVisitor::copyLater):
-        (JSC::SlotVisitor::reportExtraMemoryUsage):
-        * jit/Repatch.cpp:
-        * runtime/JSGenericTypedArrayViewInlines.h:
-        (JSC::JSGenericTypedArrayView::visitChildren):
-        * runtime/JSPropertyNameIterator.h:
-        (JSC::StructureRareData::setEnumerationCache):
-        * runtime/JSString.cpp:
-        (JSC::JSString::visitChildren):
-        * runtime/StructureRareDataInlines.h:
-        (JSC::StructureRareData::setPreviousID):
-        (JSC::StructureRareData::setObjectToStringValue):
-        * runtime/WeakMapData.cpp:
-        (JSC::WeakMapData::visitChildren):
-
-2014-01-09  Andreas Kling  
-
-        Shrink WatchpointSet.
-        
-
-        Reorder the members of WatchpointSet, shrinking it by 8 bytes.
-        767 kB progression on Membuster3.
-
-        Reviewed by Antti Koivisto.
-
-        * bytecode/Watchpoint.h:
-
-2014-01-08  Mark Hahnenberg  
-
-        Reverting accidental GC logging
-
-        * heap/Heap.cpp:
-
-2014-01-07  Mark Hahnenberg  
-
-        Marking should be generational
-        https://bugs.webkit.org/show_bug.cgi?id=126552
-
-        Reviewed by Geoffrey Garen.
-
-        Re-marking the same objects over and over is a waste of effort. This patch implements 
-        the sticky mark bit algorithm (along with our already-present write barriers) to reduce 
-        overhead during garbage collection caused by rescanning objects.
-
-        There are now two collection modes, EdenCollection and FullCollection. EdenCollections
-        only visit new objects or objects that were added to the remembered set by a write barrier.
-        FullCollections are normal collections that visit all objects regardless of their 
-        generation.
-
-        In this patch EdenCollections do not do anything in CopiedSpace. This will be fixed in 
-        https://bugs.webkit.org/show_bug.cgi?id=126555.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::visitAggregate):
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlockSet::mark):
-        * dfg/DFGOperations.cpp:
-        * heap/CodeBlockSet.cpp:
-        (JSC::CodeBlockSet::add):
-        (JSC::CodeBlockSet::traceMarked):
-        (JSC::CodeBlockSet::rememberCurrentlyExecutingCodeBlocks):
-        * heap/CodeBlockSet.h:
-        * heap/CopiedBlockInlines.h:
-        (JSC::CopiedBlock::reportLiveBytes):
-        * heap/CopiedSpace.cpp:
-        (JSC::CopiedSpace::didStartFullCollection):
-        * heap/CopiedSpace.h:
-        (JSC::CopiedSpace::heap):
-        * heap/Heap.cpp:
-        (JSC::Heap::Heap):
-        (JSC::Heap::didAbandon):
-        (JSC::Heap::markRoots):
-        (JSC::Heap::copyBackingStores):
-        (JSC::Heap::addToRememberedSet):
-        (JSC::Heap::collectAllGarbage):
-        (JSC::Heap::collect):
-        (JSC::Heap::didAllocate):
-        (JSC::Heap::writeBarrier):
-        * heap/Heap.h:
-        (JSC::Heap::isInRememberedSet):
-        (JSC::Heap::operationInProgress):
-        (JSC::Heap::shouldCollect):
-        (JSC::Heap::isCollecting):
-        (JSC::Heap::isWriteBarrierEnabled):
-        (JSC::Heap::writeBarrier):
-        * heap/HeapOperation.h:
-        * heap/MarkStack.cpp:
-        (JSC::MarkStackArray::~MarkStackArray):
-        (JSC::MarkStackArray::clear):
-        (JSC::MarkStackArray::fillVector):
-        * heap/MarkStack.h:
-        * heap/MarkedAllocator.cpp:
-        (JSC::isListPagedOut):
-        (JSC::MarkedAllocator::isPagedOut):
-        (JSC::MarkedAllocator::tryAllocateHelper):
-        (JSC::MarkedAllocator::addBlock):
-        (JSC::MarkedAllocator::removeBlock):
-        (JSC::MarkedAllocator::reset):
-        * heap/MarkedAllocator.h:
-        (JSC::MarkedAllocator::MarkedAllocator):
-        * heap/MarkedBlock.cpp:
-        (JSC::MarkedBlock::clearMarks):
-        (JSC::MarkedBlock::clearRememberedSet):
-        (JSC::MarkedBlock::clearMarksWithCollectionType):
-        (JSC::MarkedBlock::lastChanceToFinalize):
-        * heap/MarkedBlock.h: Changed atomSize to 16 bytes because we have no objects smaller
-        than 16 bytes. This is also to pay for the additional Bitmap for the remembered set.
-        (JSC::MarkedBlock::didConsumeEmptyFreeList):
-        (JSC::MarkedBlock::setRemembered):
-        (JSC::MarkedBlock::clearRemembered):
-        (JSC::MarkedBlock::atomicClearRemembered):
-        (JSC::MarkedBlock::isRemembered):
-        * heap/MarkedSpace.cpp:
-        (JSC::MarkedSpace::~MarkedSpace):
-        (JSC::MarkedSpace::resetAllocators):
-        (JSC::MarkedSpace::visitWeakSets):
-        (JSC::MarkedSpace::reapWeakSets):
-        (JSC::VerifyMarked::operator()):
-        (JSC::MarkedSpace::clearMarks):
-        * heap/MarkedSpace.h:
-        (JSC::ClearMarks::operator()):
-        (JSC::ClearRememberedSet::operator()):
-        (JSC::MarkedSpace::didAllocateInBlock):
-        (JSC::MarkedSpace::clearRememberedSet):
-        * heap/SlotVisitor.cpp:
-        (JSC::SlotVisitor::~SlotVisitor):
-        (JSC::SlotVisitor::clearMarkStack):
-        * heap/SlotVisitor.h:
-        (JSC::SlotVisitor::markStack):
-        (JSC::SlotVisitor::sharedData):
-        * heap/SlotVisitorInlines.h:
-        (JSC::SlotVisitor::internalAppend):
-        (JSC::SlotVisitor::unconditionallyAppend):
-        (JSC::SlotVisitor::copyLater):
-        (JSC::SlotVisitor::reportExtraMemoryUsage):
-        (JSC::SlotVisitor::heap):
-        * jit/Repatch.cpp:
-        * runtime/JSGenericTypedArrayViewInlines.h:
-        (JSC::JSGenericTypedArrayView::visitChildren):
-        * runtime/JSPropertyNameIterator.h:
-        (JSC::StructureRareData::setEnumerationCache):
-        * runtime/JSString.cpp:
-        (JSC::JSString::visitChildren):
-        * runtime/StructureRareDataInlines.h:
-        (JSC::StructureRareData::setPreviousID):
-        (JSC::StructureRareData::setObjectToStringValue):
-        * runtime/WeakMapData.cpp:
-        (JSC::WeakMapData::visitChildren):
-
-2014-01-08  Sam Weinig  
-
-        [JS] Should be able to create a promise by calling the Promise constructor as a function
-        https://bugs.webkit.org/show_bug.cgi?id=126561
-
-        Reviewed by Geoffrey Garen.
-
-        * runtime/JSPromiseConstructor.cpp:
-        (JSC::JSPromiseConstructor::getCallData):
-        Add support for calling the Promise constructor as a function (e.g. var p = Promise(...), note
-        the missing "new").
-
-2014-01-08  Dániel Bátyai  
-
-        [EFL] Make FTL buildable
-        https://bugs.webkit.org/show_bug.cgi?id=125777
-
-        Reviewed by Csaba Osztrogonác.
-
-        * CMakeLists.txt:
-        * ftl/FTLOSREntry.cpp:
-        * ftl/FTLOSRExitCompiler.cpp:
-        * llvm/library/config_llvm.h:
-
-2014-01-08  Zan Dobersek  
-
-        [Automake] Scripts for generated build targets do not necessarily produce their output
-        https://bugs.webkit.org/show_bug.cgi?id=126378
-
-        Reviewed by Carlos Garcia Campos.
-
-        * GNUmakefile.am: Touch the build targets that are generated through helper scripts that don't
-        assure the output is generated every time the script is invoked, most commonly due to unchanged
-        input. This assures the build targets are up-to-date and can't be older that their dependencies,
-        which would result in constant regeneration at every build.
-
-2014-01-07  Filip Pizlo  
-
-        DFG fixup phase should be responsible for inserting ValueToInt32's as needed and it should use Phantom to keep the original values alive in case of OSR exit
-        https://bugs.webkit.org/show_bug.cgi?id=126600
-
-        Reviewed by Michael Saboff.
-        
-        This fixes an embarrassing OSR exit liveness bug. It also simplifies the code. We were
-        already using FixupPhase as the place where conversion nodes get inserted. ValueToInt32
-        was the only exception to that rule, and that was one of the reasons why we had this bug.
-        
-        Henceforth ValueToInt32 is only inserted by FixupPhase, and only when it is necessary:
-        we have a BitOp that will want a ToInt32 conversion and the operand is not predicted to
-        already be an int32. If FixupPhase inserts any ValueToInt32's then the BitOp will no
-        longer appear to use the original operand, which will make OSR exit think that the
-        original operand is dead. We work around this they way we always do: insert a Phantom on
-        the original operands right after the BitOp. This ensures that any OSR exit in any of the
-        ValueToInt32's or in the BitOp itself will have values for the original inputs.
-
-        * dfg/DFGBackwardsPropagationPhase.cpp:
-        (JSC::DFG::BackwardsPropagationPhase::isWithinPowerOfTwo):
-        (JSC::DFG::BackwardsPropagationPhase::propagate):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::handleIntrinsic):
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        (JSC::DFG::FixupPhase::fixIntEdge):
-        (JSC::DFG::FixupPhase::fixBinaryIntEdges):
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        * tests/stress/bit-op-value-to-int32-input-liveness.js: Added.
-        (foo):
-
-2014-01-07  Mark Hahnenberg  
-
-        Repatch write barrier slow path call doesn't align the stack in the presence of saved registers
-        https://bugs.webkit.org/show_bug.cgi?id=126093
-
-        Reviewed by Geoffrey Garen.
-
-        * jit/Repatch.cpp: Reworked the stack alignment code for calling out to C code on the write barrier slow path.
-        We need to properly account for the number of reused registers that were saved to the stack, so we have to 
-        pass the ScratchRegisterAllocator around.
-        (JSC::storeToWriteBarrierBuffer):
-        (JSC::writeBarrier):
-        (JSC::emitPutReplaceStub):
-        (JSC::emitPutTransitionStub):
-        * jit/ScratchRegisterAllocator.h: Previously the ScratchRegisterAllocator only knew whether or not it had
-        reused registers, but not how many. In order to correctly align the stack for calls to C slow paths for 
-        the write barriers in inline caches we need to know how the stack is aligned. So now ScratchRegisterAllocator
-        tracks how many registers it has reused.
-        (JSC::ScratchRegisterAllocator::ScratchRegisterAllocator):
-        (JSC::ScratchRegisterAllocator::allocateScratch):
-        (JSC::ScratchRegisterAllocator::didReuseRegisters):
-        (JSC::ScratchRegisterAllocator::numberOfReusedRegisters):
-        (JSC::ScratchRegisterAllocator::preserveReusedRegistersByPushing):
-        (JSC::ScratchRegisterAllocator::restoreReusedRegistersByPopping):
-        * llint/LowLevelInterpreter64.asm: Random typo fix.
-
-2014-01-07  Mark Lam  
-
-        r161364 caused JSC tests regression on non-DFG builds (e.g. C Loop and Windows).
-        https://bugs.webkit.org/show_bug.cgi?id=126589.
-
-        Reviewed by Filip Pizlo.
-
-        After the removal of ENABLE(VALUE_PROFILER), the LLINT is now expecting the
-        relevant opcode operands to point to ValueProfiler data structures and will
-        write profiling data into them. Hence, we need to allocate these data
-        structures even though the profiling data won't be used in non-DFG builds.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-
-2014-01-07  Filip Pizlo  
-
-        ASSERT in compileArithNegate on pdfjs
-        https://bugs.webkit.org/show_bug.cgi?id=126584
-
-        Reviewed by Mark Hahnenberg.
-        
-        Check negative zero when we should check it, not when we shouldn't check it. :-/
-
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileArithNegate):
-
-2014-01-07  Gabor Rapcsanyi  
-
-        pushFinallyContext saves wrong m_labelScopes size
-        https://bugs.webkit.org/show_bug.cgi?id=124529
-
-        Remove free label scopes before saving finally context.
-
-        Reviewed by Geoffrey Garen.
-
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::pushFinallyContext):
-
-2014-01-06  Mark Hahnenberg  
-
-        Heap::collect shouldn't be responsible for sweeping
-        https://bugs.webkit.org/show_bug.cgi?id=126556
-
-        Reviewed by Geoffrey Garen.
-
-        Sweeping happens at an awkward time during collection due to the fact that destructors can 
-        cause arbitrary reentry into the VM. This patch separates collecting and sweeping, and delays 
-        sweeping until after collection has completely finished.
-
-        * heap/Heap.cpp:
-        (JSC::Heap::collectAllGarbage):
-        (JSC::Heap::collect):
-        (JSC::Heap::collectIfNecessaryOrDefer):
-        * heap/Heap.h:
-        * heap/MarkedSpace.cpp:
-        (JSC::MarkedSpace::sweep):
-        * runtime/GCActivityCallback.cpp:
-        (JSC::DefaultGCActivityCallback::doWork):
-
-2014-01-07  Mark Rowe  
-
-         Remove the legacy WebKit availability macros
-
-        They're no longer used.
-
-        Reviewed by Ryosuke Niwa.
-
-        * API/WebKitAvailability.h:
-
-2014-01-07  Filip Pizlo  
-
-        SetLocal for a FlushedArguments should not claim that the dataFormat is DataFormatJS
-        https://bugs.webkit.org/show_bug.cgi?id=126563
-
-        Reviewed by Gavin Barraclough.
-        
-        This was a rookie arguments simplification mistake: the SetLocal needs to record the fact
-        that although it set JSValue(), OSR should think it set Arguments. DataFormatArguments
-        conveys this, and dataFormatFor(FlushFormat) will do the right thing.
-
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * tests/stress/phantom-arguments-set-local-then-exit-in-same-block.js: Added.
-        (foo):
-
-2014-01-06  Filip Pizlo  
-
-        Make the different flavors of integer arithmetic more explicit, and don't rely on (possibly stale) results of the backwards propagator to decide integer arithmetic semantics
-        https://bugs.webkit.org/show_bug.cgi?id=125519
-
-        Reviewed by Geoffrey Garen.
-        
-        Adds the Arith::Mode enum to arithmetic nodes, which makes it explicit what sorts of
-        checks and overflows the node should do. Previously this would be deduced from
-        backwards analysis results.
-        
-        This also makes "unchecked" variants really mean that you want the int32 wrapped
-        result, so ArithIMul is now done in terms of ArithMul(Unchecked). That means that the
-        constant folder needs to compute exactly the result implied by ArithMode, instead of
-        just folding the double result.
-
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::AbstractInterpreter::executeEffects):
-        * dfg/DFGArithMode.cpp: Added.
-        (WTF::printInternal):
-        * dfg/DFGArithMode.h: Added.
-        (JSC::DFG::doesOverflow):
-        (JSC::DFG::shouldCheckOverflow):
-        (JSC::DFG::shouldCheckNegativeZero):
-        * dfg/DFGCSEPhase.cpp:
-        (JSC::DFG::CSEPhase::pureCSE):
-        (JSC::DFG::CSEPhase::performNodeCSE):
-        * dfg/DFGConstantFoldingPhase.cpp:
-        (JSC::DFG::ConstantFoldingPhase::foldConstants):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        (JSC::DFG::FixupPhase::attemptToMakeIntegerAdd):
-        * dfg/DFGGraph.cpp:
-        (JSC::DFG::Graph::dump):
-        * dfg/DFGNode.h:
-        (JSC::DFG::Node::Node):
-        (JSC::DFG::Node::hasArithMode):
-        (JSC::DFG::Node::arithMode):
-        (JSC::DFG::Node::setArithMode):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileUInt32ToNumber):
-        (JSC::DFG::SpeculativeJIT::compileDoubleAsInt32):
-        (JSC::DFG::SpeculativeJIT::compileAdd):
-        (JSC::DFG::SpeculativeJIT::compileArithSub):
-        (JSC::DFG::SpeculativeJIT::compileArithNegate):
-        (JSC::DFG::SpeculativeJIT::compileArithMul):
-        (JSC::DFG::SpeculativeJIT::compileArithDiv):
-        (JSC::DFG::SpeculativeJIT::compileArithMod):
-        * dfg/DFGSpeculativeJIT.h:
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileAddSub):
-        (JSC::FTL::LowerDFGToLLVM::compileArithMul):
-        (JSC::FTL::LowerDFGToLLVM::compileArithDivMod):
-        (JSC::FTL::LowerDFGToLLVM::compileArithNegate):
-        (JSC::FTL::LowerDFGToLLVM::compileUInt32ToNumber):
-
-2014-01-06  Mark Hahnenberg  
-
-        Add write barriers to the LLInt
-        https://bugs.webkit.org/show_bug.cgi?id=126527
-
-        Reviewed by Filip Pizlo.
-
-        This patch takes a similar approach to how write barriers work in the baseline JIT.
-        We execute the write barrier at the beginning of the opcode so we don't have to 
-        worry about saving and restoring live registers across write barrier slow path calls 
-        to C code.
-
-        * llint/LLIntOfflineAsmConfig.h:
-        * llint/LLIntSlowPaths.cpp:
-        (JSC::LLInt::llint_write_barrier_slow):
-        * llint/LLIntSlowPaths.h:
-        * llint/LowLevelInterpreter.asm:
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-        * offlineasm/arm64.rb:
-        * offlineasm/instructions.rb:
-        * offlineasm/x86.rb:
-
-2014-01-05  Sam Weinig  
-
-        [JS] Implement Promise.all()
-        https://bugs.webkit.org/show_bug.cgi?id=126510
-
-        Reviewed by Gavin Barraclough.
-
-        Add Promise.all() implementation and factor out performing resolves and rejects
-        on deferreds to share a bit of code. Also moves the abruptRejection helper to
-        JSPromiseDeferred so it can be used in JSPromiseFunctions.
-
-        * runtime/CommonIdentifiers.h:
-        * runtime/JSPromiseConstructor.cpp:
-        (JSC::JSPromiseConstructorFuncCast):
-        (JSC::JSPromiseConstructorFuncResolve):
-        (JSC::JSPromiseConstructorFuncReject):
-        (JSC::JSPromiseConstructorFuncAll):
-        * runtime/JSPromiseDeferred.cpp:
-        (JSC::updateDeferredFromPotentialThenable):
-        (JSC::performDeferredResolve):
-        (JSC::performDeferredReject):
-        (JSC::abruptRejection):
-        * runtime/JSPromiseDeferred.h:
-        * runtime/JSPromiseFunctions.cpp:
-        (JSC::promiseAllCountdownFunction):
-        (JSC::createPromiseAllCountdownFunction):
-        * runtime/JSPromiseFunctions.h:
-        * runtime/JSPromiseReaction.cpp:
-        (JSC::ExecutePromiseReactionMicrotask::run):
-
-2014-01-06  Filip Pizlo  
-
-        Get rid of ENABLE(VALUE_PROFILER). It's on all the time now.
-
-        Rubber stamped by Mark Hahnenberg.
-
-        * bytecode/CallLinkStatus.cpp:
-        (JSC::CallLinkStatus::computeFor):
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::dumpValueProfiling):
-        (JSC::CodeBlock::dumpArrayProfiling):
-        (JSC::CodeBlock::dumpRareCaseProfile):
-        (JSC::CodeBlock::dumpBytecode):
-        (JSC::CodeBlock::CodeBlock):
-        (JSC::CodeBlock::setNumParameters):
-        (JSC::CodeBlock::shrinkToFit):
-        (JSC::CodeBlock::shouldOptimizeNow):
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlock::valueProfileForBytecodeOffset):
-        * bytecode/GetByIdStatus.cpp:
-        (JSC::GetByIdStatus::computeForChain):
-        (JSC::GetByIdStatus::computeFor):
-        * bytecode/LazyOperandValueProfile.cpp:
-        * bytecode/LazyOperandValueProfile.h:
-        * bytecode/PutByIdStatus.cpp:
-        (JSC::PutByIdStatus::computeFor):
-        * bytecode/ValueProfile.h:
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::newArrayProfile):
-        (JSC::BytecodeGenerator::newArrayAllocationProfile):
-        (JSC::BytecodeGenerator::emitProfiledOpcode):
-        * jit/GPRInfo.h:
-        * jit/JIT.cpp:
-        (JSC::JIT::JIT):
-        (JSC::JIT::privateCompileSlowCases):
-        (JSC::JIT::privateCompile):
-        * jit/JIT.h:
-        * jit/JITArithmetic.cpp:
-        (JSC::JIT::compileBinaryArithOp):
-        (JSC::JIT::emit_op_mul):
-        (JSC::JIT::emit_op_div):
-        * jit/JITArithmetic32_64.cpp:
-        (JSC::JIT::emitBinaryDoubleOp):
-        (JSC::JIT::emit_op_mul):
-        (JSC::JIT::emitSlow_op_mul):
-        (JSC::JIT::emit_op_div):
-        * jit/JITCall.cpp:
-        (JSC::JIT::emitPutCallResult):
-        * jit/JITCall32_64.cpp:
-        (JSC::JIT::emitPutCallResult):
-        * jit/JITInlines.h:
-        (JSC::JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile):
-        (JSC::JIT::emitValueProfilingSite):
-        (JSC::JIT::emitArrayProfilingSiteForBytecodeIndex):
-        (JSC::JIT::emitArrayProfileStoreToHoleSpecialCase):
-        (JSC::JIT::emitArrayProfileOutOfBoundsSpecialCase):
-        (JSC::arrayProfileSaw):
-        (JSC::JIT::chooseArrayMode):
-        * jit/JITOpcodes.cpp:
-        (JSC::JIT::emit_op_get_argument_by_val):
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::emit_op_get_argument_by_val):
-        * jit/JITPropertyAccess.cpp:
-        (JSC::JIT::emit_op_get_by_val):
-        (JSC::JIT::emitSlow_op_get_by_val):
-        (JSC::JIT::emit_op_get_by_id):
-        (JSC::JIT::emit_op_get_from_scope):
-        * jit/JITPropertyAccess32_64.cpp:
-        (JSC::JIT::emit_op_get_by_val):
-        (JSC::JIT::emitSlow_op_get_by_val):
-        (JSC::JIT::emit_op_get_by_id):
-        (JSC::JIT::emit_op_get_from_scope):
-        * llint/LLIntOfflineAsmConfig.h:
-        * llint/LLIntSlowPaths.cpp:
-        (JSC::LLInt::LLINT_SLOW_PATH_DECL):
-        * llint/LowLevelInterpreter.asm:
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-        * profiler/ProfilerBytecodeSequence.cpp:
-        (JSC::Profiler::BytecodeSequence::BytecodeSequence):
-        * runtime/CommonSlowPaths.cpp:
-
-2014-01-06  Filip Pizlo  
-
-        LLInt shouldn't check for ENABLE(JIT).
-
-        Rubber stamped by Mark Hahnenberg.
-
-        * llint/LLIntCommon.h:
-        * llint/LLIntOfflineAsmConfig.h:
-        * llint/LLIntSlowPaths.cpp:
-        (JSC::LLInt::entryOSR):
-        (JSC::LLInt::LLINT_SLOW_PATH_DECL):
-        * llint/LowLevelInterpreter.asm:
-
-2014-01-06  Filip Pizlo  
-
-        LLInt shouldnt check for ENABLE(JAVASCRIPT_DEBUGGER).
-
-        Rubber stamped by Mark Hahnenberg.
-
-        * debugger/Debugger.h:
-        (JSC::Debugger::Debugger):
-        * llint/LLIntOfflineAsmConfig.h:
-        * llint/LowLevelInterpreter.asm:
-
-2014-01-05  Sam Weinig  
-
-        [JS] Implement Promise.race()
-        https://bugs.webkit.org/show_bug.cgi?id=126506
-
-        Reviewed by Oliver Hunt.
-
-        * runtime/CommonIdentifiers.h:
-        Add identifier for "cast".
-    
-        * runtime/JSPromiseConstructor.cpp:
-        (JSC::abruptRejection):
-        Helper for the RejectIfAbrupt abstract operation.
-  
-        (JSC::JSPromiseConstructorFuncRace):
-        Add implementation of Promise.race()
-
-2014-01-05  Martin Robinson  
-
-        [GTK] [CMake] Ensure that the autotools build and the CMake install the same files
-        https://bugs.webkit.org/show_bug.cgi?id=116379
-
-        Reviewed by Gustavo Noronha Silva.
-
-        * PlatformGTK.cmake: Install API headers, gir files, and the pkg-config file.
-
-2014-01-04  Yusuke Suzuki  
-
-        Use Compiler macros instead of raw "final" and "override"
-        https://bugs.webkit.org/show_bug.cgi?id=126490
-
-        Reviewed by Sam Weinig.
-
-        * runtime/JSPromiseReaction.cpp:
-
-2014-01-04  Martin Robinson  
-
-        [GTK] [CMake] Improve the way we locate gobject-introspection
-        https://bugs.webkit.org/show_bug.cgi?id=126452
-
-        Reviewed by Philippe Normand.
-
-        * PlatformGTK.cmake: Use the new introspection variables.
-
-2014-01-04  Zan Dobersek  
-
-        Explicitly use the std:: nested name specifier when using std::pair, std::make_pair
-        https://bugs.webkit.org/show_bug.cgi?id=126439
-
-        Reviewed by Andreas Kling.
-
-        Instead of relying on std::pair and std::make_pair symbols being present in the current scope
-        through the pair and make_pair symbols, the std:: specifier should be used explicitly.
-
-        * bytecode/Opcode.cpp:
-        (JSC::compareOpcodePairIndices):
-        (JSC::OpcodeStats::~OpcodeStats):
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::BytecodeGenerator):
-        * parser/ASTBuilder.h:
-        (JSC::ASTBuilder::makeBinaryNode):
-        * parser/Parser.cpp:
-        (JSC::Parser::parseIfStatement):
-        * runtime/Structure.cpp:
-        (JSC::StructureTransitionTable::contains):
-        (JSC::StructureTransitionTable::get):
-        (JSC::StructureTransitionTable::add):
-
-2014-01-03  David Farler  
-
-        [super dealloc] missing in Source/JavaScriptCore/API/tests/testapi.mm, fails to build with -Werror,-Wobjc-missing-super-calls
-        https://bugs.webkit.org/show_bug.cgi?id=126454
-
-        Reviewed by Geoffrey Garen.
-
-        * API/tests/testapi.mm:
-        (-[TextXYZ dealloc]):
-        add [super dealloc]
-        (-[EvilAllocationObject dealloc]):
-        add [super dealloc]
-
-2014-01-02  Carlos Garcia Campos  
-
-        REGRESSION(r160304): [GTK] Disable libtool fast install
-        https://bugs.webkit.org/show_bug.cgi?id=126381
-
-        Reviewed by Martin Robinson.
-
-        Remove -no-fast-install ld flag since fast install is now disabled
-        globally.
-
-        * GNUmakefile.am:
-
-2014-01-02  Sam Weinig  
-
-        Update Promises to the https://github.com/domenic/promises-unwrapping spec
-        https://bugs.webkit.org/show_bug.cgi?id=120954
-
-        Reviewed by Filip Pizlo.
-
-        Update Promises to the revised spec. Notable changes:
-        - JSPromiseResolver is gone.
-        - TaskContext has been renamed Microtask and now has a virtual run() function.
-        - Instead of using custom InternalFunction subclasses, JSFunctions are used
-          with PrivateName properties for internal slots.
-
-        * CMakeLists.txt:
-        * DerivedSources.make:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * interpreter/CallFrame.h:
-        (JSC::ExecState::promiseConstructorTable):
-        * runtime/CommonIdentifiers.cpp:
-        (JSC::CommonIdentifiers::CommonIdentifiers):
-        * runtime/CommonIdentifiers.h:
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::reset):
-        (JSC::JSGlobalObject::visitChildren):
-        (JSC::JSGlobalObject::queueMicrotask):
-        * runtime/JSGlobalObject.h:
-        (JSC::JSGlobalObject::promiseConstructor):
-        (JSC::JSGlobalObject::promisePrototype):
-        (JSC::JSGlobalObject::promiseStructure):
-        * runtime/JSPromise.cpp:
-        (JSC::JSPromise::create):
-        (JSC::JSPromise::JSPromise):
-        (JSC::JSPromise::finishCreation):
-        (JSC::JSPromise::visitChildren):
-        (JSC::JSPromise::reject):
-        (JSC::JSPromise::resolve):
-        (JSC::JSPromise::appendResolveReaction):
-        (JSC::JSPromise::appendRejectReaction):
-        (JSC::triggerPromiseReactions):
-        * runtime/JSPromise.h:
-        (JSC::JSPromise::status):
-        (JSC::JSPromise::result):
-        (JSC::JSPromise::constructor):
-        * runtime/JSPromiseCallback.cpp: Removed.
-        * runtime/JSPromiseCallback.h: Removed.
-        * runtime/JSPromiseConstructor.cpp:
-        (JSC::constructPromise):
-        (JSC::JSPromiseConstructor::getCallData):
-        (JSC::JSPromiseConstructorFuncCast):
-        (JSC::JSPromiseConstructorFuncResolve):
-        (JSC::JSPromiseConstructorFuncReject):
-        * runtime/JSPromiseConstructor.h:
-        * runtime/JSPromiseDeferred.cpp: Added.
-        (JSC::JSPromiseDeferred::create):
-        (JSC::JSPromiseDeferred::JSPromiseDeferred):
-        (JSC::JSPromiseDeferred::finishCreation):
-        (JSC::JSPromiseDeferred::visitChildren):
-        (JSC::createJSPromiseDeferredFromConstructor):
-        (JSC::updateDeferredFromPotentialThenable):
-        * runtime/JSPromiseDeferred.h: Added.
-        (JSC::JSPromiseDeferred::createStructure):
-        (JSC::JSPromiseDeferred::promise):
-        (JSC::JSPromiseDeferred::resolve):
-        (JSC::JSPromiseDeferred::reject):
-        * runtime/JSPromiseFunctions.cpp: Added.
-        (JSC::deferredConstructionFunction):
-        (JSC::createDeferredConstructionFunction):
-        (JSC::identifyFunction):
-        (JSC::createIdentifyFunction):
-        (JSC::promiseAllCountdownFunction):
-        (JSC::createPromiseAllCountdownFunction):
-        (JSC::promiseResolutionHandlerFunction):
-        (JSC::createPromiseResolutionHandlerFunction):
-        (JSC::rejectPromiseFunction):
-        (JSC::createRejectPromiseFunction):
-        (JSC::resolvePromiseFunction):
-        (JSC::createResolvePromiseFunction):
-        (JSC::throwerFunction):
-        (JSC::createThrowerFunction):
-        * runtime/JSPromiseFunctions.h: Added.
-        * runtime/JSPromisePrototype.cpp:
-        (JSC::JSPromisePrototypeFuncThen):
-        (JSC::JSPromisePrototypeFuncCatch):
-        * runtime/JSPromiseReaction.cpp: Added.
-        (JSC::createExecutePromiseReactionMicroTask):
-        (JSC::ExecutePromiseReactionMicroTask::run):
-        (JSC::JSPromiseReaction::create):
-        (JSC::JSPromiseReaction::JSPromiseReaction):
-        (JSC::JSPromiseReaction::finishCreation):
-        (JSC::JSPromiseReaction::visitChildren):
-        * runtime/JSPromiseReaction.h: Added.
-        (JSC::JSPromiseReaction::createStructure):
-        (JSC::JSPromiseReaction::deferred):
-        (JSC::JSPromiseReaction::handler):
-        * runtime/JSPromiseResolver.cpp: Removed.
-        * runtime/JSPromiseResolver.h: Removed.
-        * runtime/JSPromiseResolverConstructor.cpp: Removed.
-        * runtime/JSPromiseResolverConstructor.h: Removed.
-        * runtime/JSPromiseResolverPrototype.cpp: Removed.
-        * runtime/JSPromiseResolverPrototype.h: Removed.
-        * runtime/Microtask.h: Added.
-        * runtime/VM.cpp:
-        (JSC::VM::VM):
-        (JSC::VM::~VM):
-        * runtime/VM.h:
-
-2014-01-02  Mark Hahnenberg  
-
-        Add support for StoreBarrier and friends to the FTL
-        https://bugs.webkit.org/show_bug.cgi?id=126040
-
-        Reviewed by Filip Pizlo.
-
-        * ftl/FTLAbstractHeapRepository.h:
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLIntrinsicRepository.h:
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        (JSC::FTL::LowerDFGToLLVM::compileStoreBarrier):
-        (JSC::FTL::LowerDFGToLLVM::compileConditionalStoreBarrier):
-        (JSC::FTL::LowerDFGToLLVM::compileStoreBarrierWithNullCheck):
-        (JSC::FTL::LowerDFGToLLVM::loadMarkByte):
-        (JSC::FTL::LowerDFGToLLVM::emitStoreBarrier):
-        * heap/Heap.cpp:
-        (JSC::Heap::Heap):
-        * heap/Heap.h:
-        (JSC::Heap::writeBarrierBuffer):
-
-2014-01-02  Mark Hahnenberg  
-
-        Storing new CopiedSpace memory into a JSObject should fire a write barrier
-        https://bugs.webkit.org/show_bug.cgi?id=126025
-
-        Reviewed by Filip Pizlo.
-
-        Technically this is creating a pointer between a (potentially) old generation object and a young 
-        generation chunk of memory, thus there needs to be a barrier.
-
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * dfg/DFGOperations.cpp:
-        * heap/CopyWriteBarrier.h: Added. This class functions similarly to the WriteBarrier class. It 
-        acts as a proxy for pointers to CopiedSpace. Assignments to the field cause a write barrier to 
-        fire for the object that is the owner of the CopiedSpace memory. This is to ensure during nursery 
-        collections that objects with new backing stores are visited, even if they are old generation objects. 
-        (JSC::CopyWriteBarrier::CopyWriteBarrier):
-        (JSC::CopyWriteBarrier::operator!):
-        (JSC::CopyWriteBarrier::operator UnspecifiedBoolType*):
-        (JSC::CopyWriteBarrier::get):
-        (JSC::CopyWriteBarrier::operator*):
-        (JSC::CopyWriteBarrier::operator->):
-        (JSC::CopyWriteBarrier::set):
-        (JSC::CopyWriteBarrier::setWithoutWriteBarrier):
-        (JSC::CopyWriteBarrier::clear):
-        * heap/Heap.h:
-        * runtime/JSArray.cpp:
-        (JSC::JSArray::unshiftCountSlowCase):
-        (JSC::JSArray::shiftCountWithArrayStorage):
-        (JSC::JSArray::unshiftCountWithArrayStorage):
-        * runtime/JSCell.h:
-        (JSC::JSCell::unvalidatedStructure):
-        * runtime/JSGenericTypedArrayViewInlines.h:
-        (JSC::JSGenericTypedArrayView::slowDownAndWasteMemory):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::copyButterfly):
-        (JSC::JSObject::getOwnPropertySlotByIndex):
-        (JSC::JSObject::putByIndex):
-        (JSC::JSObject::enterDictionaryIndexingModeWhenArrayStorageAlreadyExists):
-        (JSC::JSObject::createInitialIndexedStorage):
-        (JSC::JSObject::createArrayStorage):
-        (JSC::JSObject::deletePropertyByIndex):
-        (JSC::JSObject::getOwnPropertyNames):
-        (JSC::JSObject::putByIndexBeyondVectorLengthWithoutAttributes):
-        (JSC::JSObject::countElements):
-        (JSC::JSObject::increaseVectorLength):
-        (JSC::JSObject::ensureLengthSlow):
-        * runtime/JSObject.h:
-        (JSC::JSObject::butterfly):
-        (JSC::JSObject::setStructureAndButterfly):
-        (JSC::JSObject::setButterflyWithoutChangingStructure):
-        (JSC::JSObject::JSObject):
-        (JSC::JSObject::putDirectInternal):
-        (JSC::JSObject::putDirectWithoutTransition):
-        * runtime/MapData.cpp:
-        (JSC::MapData::ensureSpaceForAppend):
-        * runtime/Structure.cpp:
-        (JSC::Structure::materializePropertyMap):
-
-2013-12-23  Oliver Hunt  
-
-        Refactor PutPropertySlot to be aware of custom properties
-        https://bugs.webkit.org/show_bug.cgi?id=126187
-
-        Reviewed by Antti Koivisto.
-
-        Refactor PutPropertySlot, making the constructor take the thisValue
-        used as a target.  This results in a wide range of boilerplate changes
-        to pass the new parameter.
-
-        * API/JSObjectRef.cpp:
-        (JSObjectSetProperty):
-        * dfg/DFGOperations.cpp:
-        (JSC::DFG::operationPutByValInternal):
-        * interpreter/Interpreter.cpp:
-        (JSC::Interpreter::execute):
-        * jit/JITOperations.cpp:
-        * llint/LLIntSlowPaths.cpp:
-        (JSC::LLInt::LLINT_SLOW_PATH_DECL):
-        * runtime/Arguments.cpp:
-        (JSC::Arguments::putByIndex):
-        * runtime/ArrayPrototype.cpp:
-        (JSC::putProperty):
-        (JSC::arrayProtoFuncPush):
-        * runtime/JSCJSValue.cpp:
-        (JSC::JSValue::putToPrimitiveByIndex):
-        * runtime/JSCell.cpp:
-        (JSC::JSCell::putByIndex):
-        * runtime/JSFunction.cpp:
-        (JSC::JSFunction::put):
-        * runtime/JSGenericTypedArrayViewInlines.h:
-        (JSC::JSGenericTypedArrayView::putByIndex):
-        * runtime/JSONObject.cpp:
-        (JSC::Walker::walk):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::putByIndex):
-        (JSC::JSObject::putDirectNonIndexAccessor):
-        (JSC::JSObject::deleteProperty):
-        * runtime/JSObject.h:
-        (JSC::JSObject::putDirect):
-        * runtime/Lookup.h:
-        (JSC::putEntry):
-        (JSC::lookupPut):
-        * runtime/PutPropertySlot.h:
-        (JSC::PutPropertySlot::PutPropertySlot):
-        (JSC::PutPropertySlot::setCustomProperty):
-        (JSC::PutPropertySlot::thisValue):
-        (JSC::PutPropertySlot::isCacheable):
-
-2014-01-01  Filip Pizlo  
-
-        Rationalize DFG DCE
-        https://bugs.webkit.org/show_bug.cgi?id=125523
-
-        Reviewed by Mark Hahnenberg.
-        
-        Adds the ability to DCE more things. It's now the case that if a node is completely
-        pure, we clear NodeMustGenerate and the node becomes a DCE candidate.
-
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::AbstractInterpreter::executeEffects):
-        * dfg/DFGCSEPhase.cpp:
-        (JSC::DFG::CSEPhase::performNodeCSE):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGDCEPhase.cpp:
-        (JSC::DFG::DCEPhase::cleanVariables):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        * dfg/DFGGraph.h:
-        (JSC::DFG::Graph::clobbersWorld):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileAdd):
-        * dfg/DFGSpeculativeJIT.h:
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        (JSC::FTL::LowerDFGToLLVM::compileValueAdd):
-
-2014-01-02  Benjamin Poulain  
-
-        Attempt to fix the build of WebCore's code generator on CMake based system
-        https://bugs.webkit.org/show_bug.cgi?id=126271
-
-        Reviewed by Sam Weinig.
-
-        * CMakeLists.txt:
-
-2013-12-30  Commit Queue  
-
-        Unreviewed, rolling out r161157, r161158, r161160, r161161,
-        r161163, and r161165.
-        http://trac.webkit.org/changeset/161157
-        http://trac.webkit.org/changeset/161158
-        http://trac.webkit.org/changeset/161160
-        http://trac.webkit.org/changeset/161161
-        http://trac.webkit.org/changeset/161163
-        http://trac.webkit.org/changeset/161165
-        https://bugs.webkit.org/show_bug.cgi?id=126332
-
-        Broke WebKit2 on Mountain Lion (Requested by ap on #webkit).
-
-        * heap/BlockAllocator.cpp:
-        (JSC::BlockAllocator::~BlockAllocator):
-        (JSC::BlockAllocator::waitForRelativeTimeWhileHoldingLock):
-        (JSC::BlockAllocator::waitForRelativeTime):
-        (JSC::BlockAllocator::blockFreeingThreadMain):
-        * heap/BlockAllocator.h:
-        (JSC::BlockAllocator::deallocate):
-
-2013-12-30  Anders Carlsson  
-
-        Fix build.
-
-        * heap/BlockAllocator.h:
-
-2013-12-30  Anders Carlsson  
-
-        Stop using ThreadCondition in BlockAllocator
-        https://bugs.webkit.org/show_bug.cgi?id=126313
-
-        Reviewed by Sam Weinig.
-
-        * heap/BlockAllocator.cpp:
-        (JSC::BlockAllocator::~BlockAllocator):
-        (JSC::BlockAllocator::waitForDuration):
-        (JSC::BlockAllocator::blockFreeingThreadMain):
-        * heap/BlockAllocator.h:
-        (JSC::BlockAllocator::deallocate):
-
-2013-12-30  Anders Carlsson  
-
-        Stop using ThreadCondition in jsc.cpp
-        https://bugs.webkit.org/show_bug.cgi?id=126311
-
-        Reviewed by Sam Weinig.
-
-        * jsc.cpp:
-        (timeoutThreadMain):
-        (main):
-
-2013-12-30  Anders Carlsson  
-
-        Replace WTF::ThreadingOnce with std::call_once
-        https://bugs.webkit.org/show_bug.cgi?id=126215
-
-        Reviewed by Sam Weinig.
-
-        * dfg/DFGWorklist.cpp:
-        (JSC::DFG::globalWorklist):
-        * runtime/InitializeThreading.cpp:
-        (JSC::initializeThreading):
-
-2013-12-30  Martin Robinson  
-
-        [CMake] [GTK] Add support for GObject introspection
-        https://bugs.webkit.org/show_bug.cgi?id=126162
-
-        Reviewed by Daniel Bates.
-
-        * PlatformGTK.cmake: Add the GIR targets.
-
-2013-12-28  Filip Pizlo  
-
-        Get rid of DFG forward exiting
-        https://bugs.webkit.org/show_bug.cgi?id=125531
-
-        Reviewed by Oliver Hunt.
-        
-        This finally gets rid of forward exiting. Forward exiting was always a fragile concept
-        since it involved the compiler trying to figure out how to "roll forward" the
-        execution from some DFG node to the next bytecode index. It was always easy to find
-        counterexamples where it broke, and it has always served as an obstacle to adding
-        compiler improvements - the latest being http://webkit.org/b/125523, which tried to
-        make DCE work for more things.
-        
-        This change finishes the work of removing forward exiting. A lot of forward exiting
-        was already removed in some other bugs, but SetLocal still did forward exits. SetLocal
-        is in many ways the hardest to remove, since the forward exiting of SetLocal also
-        implied that any conversion nodes inserted before the SetLocal would then also be
-        marked as forward-exiting. Hence SetLocal's forward-exiting made a bunch of other
-        things also forward-exiting, and this was always a source of weirdo bugs.
-        
-        SetLocal must be able to exit in case it performs a hoisted type speculation. Nodes
-        inserted just before SetLocal must also be able to exit - for example type check
-        hoisting may insert a CheckStructure, or fixup phase may insert something like
-        Int32ToDouble. But if any of those nodes tried to backward exit, then this could lead
-        to the reexecution of a side-effecting operation, for example:
-        
-            a: Call(...)
-            b: SetLocal(@a, r1)
-        
-        For a long time it seemed like SetLocal *had* to exit forward because of this. But
-        this change side-steps the problem by changing the ByteCodeParser to always emit a
-        kind of "two-phase commit" for stores to local variables. Now when the ByteCodeParser
-        wishes to store to a local, it first emits a MovHint and then enqueues a SetLocal.
-        The SetLocal isn't actually emitted until the beginning of the next bytecode
-        instruction (which the exception of op_enter and op_ret, which emit theirs immediately
-        since it's always safe to reexecute those bytecode instructions and since deferring
-        SetLocals would be weird there - op_enter has many SetLocals and op_ret is a set
-        followed by a jump in case of inlining, so we'd have to emit the SetLocal "after" the
-        jump and that would be awkward). This means that the above IR snippet would look
-        something like:
-        
-            a: Call(..., bc#42)
-            b: MovHint(@a, r1, bc#42)
-            c: SetLocal(@a, r1, bc#47)
-        
-        Where the SetLocal exits "backwards" but appears at the beginning of the next bytecode
-        instruction. This means that by the time we get to that SetLocal, the OSR exit
-        analysis already knows that r1 is associated with @a, and it means that the SetLocal
-        or anything hoisted above it can exit backwards as normal.
-        
-        This change also means that the "forward rewiring" can be killed. Previously, we might
-        have inserted a conversion node on SetLocal and then the SetLocal died (i.e. turned
-        into a MovHint) and the conversion node either died completely or had its lifetime
-        truncated to be less than the actual value's bytecode lifetime. This no longer happens
-        since conversion nodes are only inserted at SetLocals.
-        
-        More precisely, this change introduces two laws that we were basically already
-        following anyway:
-        
-        1) A MovHint's child should never be changed except if all other uses of that child
-           are also replaced. Specifically, this prohibits insertion of conversion nodes at
-           MovHints.
-        
-        2) Anytime any child is replaced with something else, and all other uses aren't also
-           replaced, we must insert a Phantom use of the original child.
-
-        This is a slight compile-time regression but has no effect on code-gen. It unlocks a
-        bunch of optimization opportunities so I think it's worth it.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::dumpAssumingJITType):
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlock::instructionCount):
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::AbstractInterpreter::executeEffects):
-        * dfg/DFGArgumentsSimplificationPhase.cpp:
-        (JSC::DFG::ArgumentsSimplificationPhase::run):
-        * dfg/DFGArrayifySlowPathGenerator.h:
-        (JSC::DFG::ArrayifySlowPathGenerator::ArrayifySlowPathGenerator):
-        * dfg/DFGBackwardsPropagationPhase.cpp:
-        (JSC::DFG::BackwardsPropagationPhase::propagate):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::setDirect):
-        (JSC::DFG::ByteCodeParser::DelayedSetLocal::DelayedSetLocal):
-        (JSC::DFG::ByteCodeParser::DelayedSetLocal::execute):
-        (JSC::DFG::ByteCodeParser::handleInlining):
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * dfg/DFGCSEPhase.cpp:
-        (JSC::DFG::CSEPhase::eliminate):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGCommon.h:
-        * dfg/DFGConstantFoldingPhase.cpp:
-        (JSC::DFG::ConstantFoldingPhase::foldConstants):
-        * dfg/DFGDCEPhase.cpp:
-        (JSC::DFG::DCEPhase::run):
-        (JSC::DFG::DCEPhase::fixupBlock):
-        (JSC::DFG::DCEPhase::cleanVariables):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        (JSC::DFG::FixupPhase::fixEdge):
-        (JSC::DFG::FixupPhase::injectInt32ToDoubleNode):
-        * dfg/DFGLICMPhase.cpp:
-        (JSC::DFG::LICMPhase::run):
-        (JSC::DFG::LICMPhase::attemptHoist):
-        * dfg/DFGMinifiedNode.cpp:
-        (JSC::DFG::MinifiedNode::fromNode):
-        * dfg/DFGMinifiedNode.h:
-        (JSC::DFG::belongsInMinifiedGraph):
-        (JSC::DFG::MinifiedNode::constantNumber):
-        (JSC::DFG::MinifiedNode::weakConstant):
-        * dfg/DFGNode.cpp:
-        (JSC::DFG::Node::hasVariableAccessData):
-        * dfg/DFGNode.h:
-        (JSC::DFG::Node::convertToPhantom):
-        (JSC::DFG::Node::convertToPhantomUnchecked):
-        (JSC::DFG::Node::convertToIdentity):
-        (JSC::DFG::Node::containsMovHint):
-        (JSC::DFG::Node::hasUnlinkedLocal):
-        (JSC::DFG::Node::willHaveCodeGenOrOSR):
-        * dfg/DFGNodeFlags.cpp:
-        (JSC::DFG::dumpNodeFlags):
-        * dfg/DFGNodeFlags.h:
-        * dfg/DFGNodeType.h:
-        * dfg/DFGOSRAvailabilityAnalysisPhase.cpp:
-        (JSC::DFG::OSRAvailabilityAnalysisPhase::run):
-        * dfg/DFGOSREntrypointCreationPhase.cpp:
-        (JSC::DFG::OSREntrypointCreationPhase::run):
-        * dfg/DFGOSRExit.cpp:
-        * dfg/DFGOSRExit.h:
-        * dfg/DFGOSRExitBase.cpp:
-        * dfg/DFGOSRExitBase.h:
-        (JSC::DFG::OSRExitBase::considerAddingAsFrequentExitSite):
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        (JSC::DFG::PredictionPropagationPhase::doDoubleVoting):
-        * dfg/DFGSSAConversionPhase.cpp:
-        (JSC::DFG::SSAConversionPhase::run):
-        * dfg/DFGSafeToExecute.h:
-        (JSC::DFG::safeToExecute):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::speculationCheck):
-        (JSC::DFG::SpeculativeJIT::emitInvalidationPoint):
-        (JSC::DFG::SpeculativeJIT::typeCheck):
-        (JSC::DFG::SpeculativeJIT::compileMovHint):
-        (JSC::DFG::SpeculativeJIT::compileCurrentBlock):
-        (JSC::DFG::SpeculativeJIT::checkArgumentTypes):
-        (JSC::DFG::SpeculativeJIT::compileInt32ToDouble):
-        * dfg/DFGSpeculativeJIT.h:
-        (JSC::DFG::SpeculativeJIT::detectPeepHoleBranch):
-        (JSC::DFG::SpeculativeJIT::needsTypeCheck):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGTypeCheckHoistingPhase.cpp:
-        (JSC::DFG::TypeCheckHoistingPhase::run):
-        (JSC::DFG::TypeCheckHoistingPhase::identifyRedundantStructureChecks):
-        (JSC::DFG::TypeCheckHoistingPhase::identifyRedundantArrayChecks):
-        * dfg/DFGValidate.cpp:
-        (JSC::DFG::Validate::validateCPS):
-        * dfg/DFGVariableAccessData.h:
-        (JSC::DFG::VariableAccessData::VariableAccessData):
-        * dfg/DFGVariableEventStream.cpp:
-        (JSC::DFG::VariableEventStream::reconstruct):
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        (JSC::FTL::LowerDFGToLLVM::compileGetArgument):
-        (JSC::FTL::LowerDFGToLLVM::compileSetLocal):
-        (JSC::FTL::LowerDFGToLLVM::compileMovHint):
-        (JSC::FTL::LowerDFGToLLVM::compileZombieHint):
-        (JSC::FTL::LowerDFGToLLVM::compileInt32ToDouble):
-        (JSC::FTL::LowerDFGToLLVM::speculate):
-        (JSC::FTL::LowerDFGToLLVM::typeCheck):
-        (JSC::FTL::LowerDFGToLLVM::appendTypeCheck):
-        (JSC::FTL::LowerDFGToLLVM::appendOSRExit):
-        (JSC::FTL::LowerDFGToLLVM::emitOSRExitCall):
-        * ftl/FTLOSRExit.cpp:
-        * ftl/FTLOSRExit.h:
-        * tests/stress/dead-int32-to-double.js: Added.
-        (foo):
-        * tests/stress/dead-uint32-to-number.js: Added.
-        (foo):
-
-2013-12-25  Commit Queue  
-
-        Unreviewed, rolling out r161033 and r161074.
-        http://trac.webkit.org/changeset/161033
-        http://trac.webkit.org/changeset/161074
-        https://bugs.webkit.org/show_bug.cgi?id=126240
-
-        Oliver says that a rollout would be better (Requested by ap on
-        #webkit).
-
-        * API/JSObjectRef.cpp:
-        (JSObjectSetProperty):
-        * dfg/DFGOperations.cpp:
-        (JSC::DFG::operationPutByValInternal):
-        * interpreter/Interpreter.cpp:
-        (JSC::Interpreter::execute):
-        * jit/JITOperations.cpp:
-        * llint/LLIntSlowPaths.cpp:
-        (JSC::LLInt::LLINT_SLOW_PATH_DECL):
-        * runtime/Arguments.cpp:
-        (JSC::Arguments::putByIndex):
-        * runtime/ArrayPrototype.cpp:
-        (JSC::putProperty):
-        (JSC::arrayProtoFuncPush):
-        * runtime/JSCJSValue.cpp:
-        (JSC::JSValue::putToPrimitiveByIndex):
-        * runtime/JSCell.cpp:
-        (JSC::JSCell::putByIndex):
-        * runtime/JSFunction.cpp:
-        (JSC::JSFunction::put):
-        * runtime/JSGenericTypedArrayViewInlines.h:
-        (JSC::JSGenericTypedArrayView::putByIndex):
-        * runtime/JSONObject.cpp:
-        (JSC::Walker::walk):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::putByIndex):
-        (JSC::JSObject::putDirectNonIndexAccessor):
-        (JSC::JSObject::deleteProperty):
-        * runtime/JSObject.h:
-        (JSC::JSObject::putDirect):
-        * runtime/Lookup.h:
-        (JSC::putEntry):
-        (JSC::lookupPut):
-        * runtime/PutPropertySlot.h:
-        (JSC::PutPropertySlot::PutPropertySlot):
-        (JSC::PutPropertySlot::setNewProperty):
-        (JSC::PutPropertySlot::isCacheable):
-
-2013-12-25  Filip Pizlo  
-
-        DFG PhantomArguments shouldn't rely on a dead Phi graph
-        https://bugs.webkit.org/show_bug.cgi?id=126218
-
-        Reviewed by Oliver Hunt.
-        
-        This change dramatically rationalizes our handling of PhantomArguments (i.e.
-        speculative elision of arguments object allocation).
-        
-        It's now the case that if we decide that we can elide arguments allocation, we just
-        turn the arguments-creating node into a PhantomArguments and mark all locals that
-        it's stored to as being arguments aliases. Being an arguments alias and being a
-        PhantomArguments means basically the same thing: in DFG execution you have the empty
-        value, on OSR exit an arguments object is allocated in your place, and all operations
-        that use the value now just refer directly to the actual arguments in the call frame
-        header (or the arguments we know that we passed to the call, in case of inlining).
-        
-        This means that we no longer have arguments simplification creating a dead Phi graph
-        that then has to be interpreted by the OSR exit logic. That sort of never made any
-        sense.
-        
-        This means that PhantomArguments now has a clear story in SSA: basically SSA just
-        gets rid of the "locals" but everything else is the same.
-        
-        Finally, this means that we can more easily get rid of forward exiting. As I was
-        working on the code to get rid of forward exiting, I realized that I'd have to
-        carefully preserve the special meanings of MovHint and SetLocal in the case of
-        PhantomArguments. It was really bizarre: even the semantics of MovHint were tied to
-        our specific treatment of PhantomArguments. After this change this is no longer the
-        case.
-        
-        One of the really cool things about this change is that arguments reification now
-        just becomes a special kind of FlushFormat. This further unifies things: it means
-        that a MovHint(PhantomArguments) and a SetLocal(PhantomArguments) both have the same
-        meaning, since both of them dictate that the way we recover the local on exit is by
-        reifying arguments. Previously, the SetLocal(PhantomArguments) case needed some
-        special handling to accomplish this.
-        
-        A downside of this approach is that we will now emit code to store the empty value
-        into aliased arguments variables, and we will even emit code to load that empty value
-        as well. As far as I can tell this doesn't cost anything, since PhantomArguments are
-        most profitable in cases where it allows us to simplify control flow and kill the
-        arguments locals entirely. Of course, this isn't an issue in SSA form since SSA form
-        also eliminates the locals.
-
-        * dfg/DFGArgumentsSimplificationPhase.cpp:
-        (JSC::DFG::ArgumentsSimplificationPhase::run):
-        (JSC::DFG::ArgumentsSimplificationPhase::detypeArgumentsReferencingPhantomChild):
-        * dfg/DFGFlushFormat.cpp:
-        (WTF::printInternal):
-        * dfg/DFGFlushFormat.h:
-        (JSC::DFG::resultFor):
-        (JSC::DFG::useKindFor):
-        (JSC::DFG::dataFormatFor):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileCurrentBlock):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGValueSource.h:
-        (JSC::DFG::ValueSource::ValueSource):
-        (JSC::DFG::ValueSource::forFlushFormat):
-        * dfg/DFGVariableAccessData.h:
-        (JSC::DFG::VariableAccessData::flushFormat):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::buildExitArguments):
-
-2013-12-23  Oliver Hunt  
-
-        Refactor PutPropertySlot to be aware of custom properties
-        https://bugs.webkit.org/show_bug.cgi?id=126187
-
-        Reviewed by msaboff.
-
-        Refactor PutPropertySlot, making the constructor take the thisValue
-        used as a target.  This results in a wide range of boilerplate changes
-        to pass the new parameter.
-
-        * API/JSObjectRef.cpp:
-        (JSObjectSetProperty):
-        * dfg/DFGOperations.cpp:
-        (JSC::DFG::operationPutByValInternal):
-        * interpreter/Interpreter.cpp:
-        (JSC::Interpreter::execute):
-        * jit/JITOperations.cpp:
-        * llint/LLIntSlowPaths.cpp:
-        (JSC::LLInt::LLINT_SLOW_PATH_DECL):
-        * runtime/Arguments.cpp:
-        (JSC::Arguments::putByIndex):
-        * runtime/ArrayPrototype.cpp:
-        (JSC::putProperty):
-        (JSC::arrayProtoFuncPush):
-        * runtime/JSCJSValue.cpp:
-        (JSC::JSValue::putToPrimitiveByIndex):
-        * runtime/JSCell.cpp:
-        (JSC::JSCell::putByIndex):
-        * runtime/JSFunction.cpp:
-        (JSC::JSFunction::put):
-        * runtime/JSGenericTypedArrayViewInlines.h:
-        (JSC::JSGenericTypedArrayView::putByIndex):
-        * runtime/JSONObject.cpp:
-        (JSC::Walker::walk):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::putByIndex):
-        (JSC::JSObject::putDirectNonIndexAccessor):
-        (JSC::JSObject::deleteProperty):
-        * runtime/JSObject.h:
-        (JSC::JSObject::putDirect):
-        * runtime/Lookup.h:
-        (JSC::putEntry):
-        (JSC::lookupPut):
-        * runtime/PutPropertySlot.h:
-        (JSC::PutPropertySlot::PutPropertySlot):
-        (JSC::PutPropertySlot::setCustomProperty):
-        (JSC::PutPropertySlot::thisValue):
-        (JSC::PutPropertySlot::isCacheable):
-
-2013-12-23  Benjamin Poulain  
-
-        Add class matching to the Selector Code Generator
-        https://bugs.webkit.org/show_bug.cgi?id=126176
-
-        Reviewed by Antti Koivisto and Oliver Hunt.
-
-        Add test and branch based on BaseIndex addressing for x86_64.
-        Fast loops are needed to compete with clang on tight loops.
-
-        * assembler/MacroAssembler.h:
-        * assembler/MacroAssemblerX86_64.h:
-        (JSC::MacroAssemblerX86_64::branch64):
-        (JSC::MacroAssemblerX86_64::branchPtr):
-        * assembler/X86Assembler.h:
-        (JSC::X86Assembler::cmpq_rm):
-
-2013-12-23  Oliver Hunt  
-
-        Update custom setter implementations to perform type checks
-        https://bugs.webkit.org/show_bug.cgi?id=126171
-
-        Reviewed by Daniel Bates.
-
-        Modify the setter function signature to take encoded values
-        as we're changing the setter usage everywhere anyway.
-
-        * runtime/Lookup.h:
-        (JSC::putEntry):
-
-2013-12-23  Lucas Forschler  
-
-         Update copyright strings
-        
-        Reviewed by Dan Bernstein.
-
-        * Info.plist:
-        * JavaScriptCore.vcxproj/JavaScriptCore.resources/Info.plist:
-
-2013-12-23  Zan Dobersek  
-
-        [GTK] Clean up compiler optimizations flags for libWTF, libJSC
-        https://bugs.webkit.org/show_bug.cgi?id=126157
-
-        Reviewed by Gustavo Noronha Silva.
-
-        * GNUmakefile.am: Remove the -fstrict-aliasing and -O3 compiler flags for libWTF.la. -O3 gets
-        overridden by -O2 that's listed in CXXFLAGS (or -O0 in case of debug builds) and -fstrict-aliasing
-        is enabled when -O2 is used (and shouldn't be enabled in debug builds anyway).
-
-2013-12-22  Martin Robinson  
-
-        [CMake] Fix typo from r160812
-        https://bugs.webkit.org/show_bug.cgi?id=126145
-
-        Reviewed by Gustavo Noronha Silva.
-
-        * CMakeLists.txt: Fix typo when detecting the type of library.
-
-2013-12-22  Martin Robinson  
-
-        [GTK][CMake] libtool-compatible soversion calculation
-        https://bugs.webkit.org/show_bug.cgi?id=125511
-
-        Reviewed by Gustavo Noronha Silva.
-
-        * CMakeLists.txt: Use the POPULATE_LIBRARY_VERSION macro and the
-        library-specific version information.
-
-2013-12-23  Gustavo Noronha Silva  
-
-        [GTK] [CMake] Generate pkg-config files
-        https://bugs.webkit.org/show_bug.cgi?id=125685
-
-        Reviewed by Martin Robinson.
-
-        * PlatformGTK.cmake: Added. Generate javascriptcoregtk-3.0.pc.
-
-2013-12-22  Benjamin Poulain  
-
-        Create a skeleton for CSS Selector code generation
-        https://bugs.webkit.org/show_bug.cgi?id=126044
-
-        Reviewed by Antti Koivisto and Gavin Barraclough.
-
-        * assembler/LinkBuffer.h:
-        Add a new owner UID for code compiled for CSS.
-        Export the symbols needed to link code from WebCore.
-
-2013-12-19  Mark Hahnenberg  
-
-        Clean up DFG write barriers
-        https://bugs.webkit.org/show_bug.cgi?id=126047
-
-        Reviewed by Filip Pizlo.
-
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::storeToWriteBarrierBuffer): Use the register allocator to 
-        determine which registers need saving instead of saving every single one of them.
-        (JSC::DFG::SpeculativeJIT::osrWriteBarrier): We don't need to save live register state 
-        because the write barriers during OSR execute when there are no live registers. Also we  
-        don't need to use pushes to pad the stack pointer for pokes on x86; we can just use an add.
-        (JSC::DFG::SpeculativeJIT::writeBarrier):
-        * dfg/DFGSpeculativeJIT.h:
-        * jit/Repatch.cpp:
-        (JSC::emitPutReplaceStub):
-        (JSC::emitPutTransitionStub):
-        * runtime/VM.h: Get rid of writeBarrierRegisterBuffer since it's no longer used.
-
-2013-12-20  Balazs Kilvady  
-
-        [MIPS] Missing MacroAssemblerMIPS::branchTest8(ResultCondition, BaseIndex, TrustedImm32)
-        https://bugs.webkit.org/show_bug.cgi?id=126062
-
-        Reviewed by Mark Hahnenberg.
-
-        * assembler/MacroAssemblerMIPS.h:
-        (JSC::MacroAssemblerMIPS::branchTest8):
-
-2013-12-20  Julien Brianceau  
-
-        [sh4] Add missing implementation in MacroAssembler to fix build.
-        https://bugs.webkit.org/show_bug.cgi?id=126063
-
-        Reviewed by Mark Hahnenberg.
-
-        * assembler/MacroAssemblerSH4.h:
-        (JSC::MacroAssemblerSH4::branchTest8):
-
-2013-12-20  Julien Brianceau  
-
-        [arm] Add missing implementation in MacroAssembler to fix CPU(ARM_TRADITIONAL) build.
-        https://bugs.webkit.org/show_bug.cgi?id=126064
-
-        Reviewed by Mark Hahnenberg.
-
-        * assembler/MacroAssemblerARM.h:
-        (JSC::MacroAssemblerARM::branchTest8):
-
-2013-12-19  Joseph Pecoraro  
-
-        Web Inspector: Add InspectorFrontendHost.debuggableType to let the frontend know it's backend is JavaScript or Web
-        https://bugs.webkit.org/show_bug.cgi?id=126016
-
-        Reviewed by Timothy Hatcher.
-
-        * inspector/remote/RemoteInspector.mm:
-        (Inspector::RemoteInspector::listingForDebuggable):
-        * inspector/remote/RemoteInspectorConstants.h:
-        Include a debuggable type identifier in the debuggable listing,
-        so the remote frontend can know if it is debugging a Web Page
-        or JS Context.
-
-2013-12-19  Benjamin Poulain  
-
-        Add an utility class to simplify generating function calls
-        https://bugs.webkit.org/show_bug.cgi?id=125972
-
-        Reviewed by Geoffrey Garen.
-
-        Split branchTest32 in two functions: test32AndSetFlags and branchOnFlags.
-        This is done to allow code where the flags are set, multiple operation that
-        do not modify the flags occur, then the flags are used.
-
-        This is used for function calls to test the return value while discarding the
-        return register.
-
-        * assembler/MacroAssemblerX86Common.h:
-        (JSC::MacroAssemblerX86Common::test32AndSetFlags):
-        (JSC::MacroAssemblerX86Common::branchOnFlags):
-        (JSC::MacroAssemblerX86Common::branchTest32):
-
-2013-12-19  Mark Hahnenberg  
-
-        Put write barriers in the right places in the baseline JIT
-        https://bugs.webkit.org/show_bug.cgi?id=125975
-
-        Reviewed by Filip Pizlo.
-
-        * jit/JIT.cpp:
-        (JSC::JIT::privateCompileSlowCases):
-        * jit/JIT.h:
-        * jit/JITInlines.h:
-        (JSC::JIT::callOperation):
-        (JSC::JIT::emitArrayProfilingSite):
-        * jit/JITOpcodes.cpp:
-        (JSC::JIT::emit_op_enter):
-        (JSC::JIT::emitSlow_op_enter):
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::emit_op_enter):
-        (JSC::JIT::emitSlow_op_enter):
-        * jit/JITPropertyAccess.cpp:
-        (JSC::JIT::emit_op_put_by_val):
-        (JSC::JIT::emitGenericContiguousPutByVal):
-        (JSC::JIT::emitArrayStoragePutByVal):
-        (JSC::JIT::emit_op_put_by_id):
-        (JSC::JIT::emitPutGlobalProperty):
-        (JSC::JIT::emitPutGlobalVar):
-        (JSC::JIT::emitPutClosureVar):
-        (JSC::JIT::emit_op_init_global_const):
-        (JSC::JIT::checkMarkWord):
-        (JSC::JIT::emitWriteBarrier):
-        (JSC::JIT::privateCompilePutByVal):
-        * jit/JITPropertyAccess32_64.cpp:
-        (JSC::JIT::emitGenericContiguousPutByVal):
-        (JSC::JIT::emitArrayStoragePutByVal):
-        (JSC::JIT::emit_op_put_by_id):
-        (JSC::JIT::emitSlow_op_put_by_id):
-        (JSC::JIT::emitPutGlobalProperty):
-        (JSC::JIT::emitPutGlobalVar):
-        (JSC::JIT::emitPutClosureVar):
-        (JSC::JIT::emit_op_init_global_const):
-        * jit/Repatch.cpp:
-        (JSC::emitPutReplaceStub):
-        (JSC::emitPutTransitionStub):
-        (JSC::repatchPutByID):
-        * runtime/CommonSlowPaths.cpp:
-        (JSC::SLOW_PATH_DECL):
-        * runtime/CommonSlowPaths.h:
-
-2013-12-19  Brent Fulgham  
-
-        Implement ArrayBuffer.isView
-        https://bugs.webkit.org/show_bug.cgi?id=126004
-
-        Reviewed by Filip Pizlo.
-
-        Test coverage in webgl/1.0.2/resources/webgl_test_files/conformance/typedarrays/array-unit-tests.html
-
-        * runtime/JSArrayBufferConstructor.cpp:
-        (JSC::JSArrayBufferConstructor::finishCreation): Add 'isView' to object constructor.
-        (JSC::arrayBufferFuncIsView): New method.
-
-2013-12-19  Mark Lam  
-
-        Fix broken C loop LLINT build.
-        https://bugs.webkit.org/show_bug.cgi?id=126024.
-
-        Reviewed by Oliver Hunt.
-
-        * runtime/VM.h:
-
-2013-12-18  Mark Hahnenberg  
-
-        DelayedReleaseScope is in the wrong place
-        https://bugs.webkit.org/show_bug.cgi?id=125876
-
-        Reviewed by Geoffrey Garen.
-
-        The DelayedReleaseScope needs to be around the free list sweeping in MarkedAllocator::tryAllocateHelper. 
-        This location gives us a good safe point between getting ready to allocate  (i.e. identifying a non-empty 
-        free list) and doing the actual allocation (popping the free list).
-
-        * heap/MarkedAllocator.cpp:
-        (JSC::MarkedAllocator::tryAllocateHelper):
-        (JSC::MarkedAllocator::allocateSlowCase):
-        (JSC::MarkedAllocator::addBlock):
-        * runtime/JSCellInlines.h:
-        (JSC::allocateCell):
-
-2013-12-18  Gustavo Noronha Silva  
-
-        [GTK][CMake] make libjavascriptcoregtk a public shared library again
-        https://bugs.webkit.org/show_bug.cgi?id=125512
-
-        Reviewed by Martin Robinson.
-
-        * CMakeLists.txt: use target type instead of SHARED_CORE to decide whether
-        JavaScriptCore is a shared library, since it's always shared for GTK+ regardless
-        of SHARED_CORE.
-
-2013-12-18  Benjamin Poulain  
-
-        Add a simple stack abstraction for x86_64
-        https://bugs.webkit.org/show_bug.cgi?id=125908
-
-        Reviewed by Geoffrey Garen.
-
-        * assembler/MacroAssemblerX86_64.h:
-        (JSC::MacroAssemblerX86_64::addPtrNoFlags):
-        Add an explicit abstraction for the "lea" instruction. This is needed
-        by the experimental JIT to have add and substract without changing the flags.
-
-        This is useful for function calls to test the return value, restore the registers,
-        then branch on the flags from the return value.
-
-2013-12-18  Mark Hahnenberg  
-
-        DFG should have a separate StoreBarrier node
-        https://bugs.webkit.org/show_bug.cgi?id=125530
-
-        Reviewed by Filip Pizlo.
-
-        This is in preparation for GenGC. We use a separate StoreBarrier node instead of making them implicitly 
-        part of other nodes so that it's easier to run analyses on them, e.g. for the StoreBarrierElisionPhase. 
-        They are inserted during the fixup phase. Initially they do not generate any code.
-
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * dfg/DFGAbstractHeap.h:
-        * dfg/DFGAbstractInterpreter.h:
-        (JSC::DFG::AbstractInterpreter::isKnownNotCell):
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::::executeEffects):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberizeForAllocation):
-        (JSC::DFG::clobberize):
-        * dfg/DFGConstantFoldingPhase.cpp:
-        (JSC::DFG::ConstantFoldingPhase::foldConstants): Whenever we insert new nodes that require StoreBarriers,
-        we have to add those new StoreBarriers too. It's important to note that AllocatePropertyStorage and 
-        ReallocatePropertyStorage nodes require their StoreBarriers to come after them since they allocate first,
-        which could cause a GC, and then store the resulting buffer into their JSCell, which requires the barrier.
-        If we ever require that write barriers occur before stores, we'll have to split these nodes into 
-        AllocatePropertyStorage + StoreBarrier + PutPropertyStorage.
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        (JSC::DFG::FixupPhase::insertStoreBarrier):
-        * dfg/DFGNode.h:
-        (JSC::DFG::Node::isStoreBarrier):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGOSRExitCompiler32_64.cpp:
-        (JSC::DFG::OSRExitCompiler::compileExit):
-        * dfg/DFGOSRExitCompiler64.cpp:
-        (JSC::DFG::OSRExitCompiler::compileExit):
-        * dfg/DFGPlan.cpp:
-        (JSC::DFG::Plan::compileInThreadImpl):
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        * dfg/DFGSafeToExecute.h:
-        (JSC::DFG::safeToExecute):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileAllocatePropertyStorage):
-        (JSC::DFG::SpeculativeJIT::compileReallocatePropertyStorage):
-        (JSC::DFG::SpeculativeJIT::compileStoreBarrier):
-        (JSC::DFG::SpeculativeJIT::genericWriteBarrier): The fast path write barrier check. It loads the 
-        byte that contains the mark bit of the object. 
-        (JSC::DFG::SpeculativeJIT::storeToWriteBarrierBuffer): If the fast path check fails we try to store the 
-        cell in the WriteBarrierBuffer so as to avoid frequently flushing all registers in order to make a C call.
-        (JSC::DFG::SpeculativeJIT::writeBarrier):
-        (JSC::DFG::SpeculativeJIT::osrWriteBarrier): More barebones version of the write barrier to be executed 
-        during an OSR exit into baseline code. We must do this so that the baseline JIT object and array profiles 
-        are properly cleared during GC.
-        * dfg/DFGSpeculativeJIT.h:
-        (JSC::DFG::SpeculativeJIT::callOperation):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::cachedPutById):
-        (JSC::DFG::SpeculativeJIT::compileBaseValueStoreBarrier):
-        (JSC::DFG::SpeculativeJIT::compile):
-        (JSC::DFG::SpeculativeJIT::writeBarrier):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::cachedPutById):
-        (JSC::DFG::SpeculativeJIT::compileBaseValueStoreBarrier):
-        (JSC::DFG::SpeculativeJIT::compile):
-        (JSC::DFG::SpeculativeJIT::writeBarrier):
-        * dfg/DFGStoreBarrierElisionPhase.cpp: Added. New DFG phase that does block-local elision of redundant
-        StoreBarriers. Every time a StoreBarrier on a particular object is executed, a bit is set indicating that 
-        that object doesn't need any more StoreBarriers. 
-        (JSC::DFG::StoreBarrierElisionPhase::StoreBarrierElisionPhase):
-        (JSC::DFG::StoreBarrierElisionPhase::couldCauseGC): Nodes that could cause a GC reset the bits for all of the 
-        objects known in the current block. 
-        (JSC::DFG::StoreBarrierElisionPhase::allocatesFreshObject): A node that creates a new object automatically 
-        sets the bit for that object since if a GC occurred as the result of that object's allocation then that 
-        object would not need a barrier since it would be guaranteed to be a young generation object until the 
-        next GC point.
-        (JSC::DFG::StoreBarrierElisionPhase::noticeFreshObject):
-        (JSC::DFG::StoreBarrierElisionPhase::getBaseOfStore):
-        (JSC::DFG::StoreBarrierElisionPhase::shouldBeElided):
-        (JSC::DFG::StoreBarrierElisionPhase::elideBarrier):
-        (JSC::DFG::StoreBarrierElisionPhase::handleNode):
-        (JSC::DFG::StoreBarrierElisionPhase::handleBlock):
-        (JSC::DFG::StoreBarrierElisionPhase::run):
-        (JSC::DFG::performStoreBarrierElision):
-        * dfg/DFGStoreBarrierElisionPhase.h: Added.
-        * heap/Heap.cpp:
-        (JSC::Heap::Heap):
-        (JSC::Heap::flushWriteBarrierBuffer):
-        * heap/Heap.h:
-        (JSC::Heap::writeBarrier):
-        * heap/MarkedBlock.h:
-        (JSC::MarkedBlock::offsetOfMarks):
-        * heap/WriteBarrierBuffer.cpp: Added. The WriteBarrierBuffer buffers a set of JSCells that are awaiting 
-        a pending WriteBarrier. This buffer is used by the DFG to avoid the overhead of calling out to C repeatedly
-        to invoke a write barrier on a single JSCell. Instead the DFG has inline code to fill the WriteBarrier buffer
-        until its full, and then to call out to C to flush it. The WriteBarrierBuffer will also be flushed prior to 
-        each EdenCollection.
-        (JSC::WriteBarrierBuffer::WriteBarrierBuffer):
-        (JSC::WriteBarrierBuffer::~WriteBarrierBuffer):
-        (JSC::WriteBarrierBuffer::flush):
-        (JSC::WriteBarrierBuffer::reset):
-        (JSC::WriteBarrierBuffer::add):
-        * heap/WriteBarrierBuffer.h: Added.
-        (JSC::WriteBarrierBuffer::currentIndexOffset):
-        (JSC::WriteBarrierBuffer::capacityOffset):
-        (JSC::WriteBarrierBuffer::bufferOffset):
-        * jit/JITOperations.cpp:
-        * jit/JITOperations.h:
-        * runtime/VM.h:
-
-2013-12-18  Carlos Garcia Campos  
-
-        Unreviewed. Fix make distcheck.
-
-        * GNUmakefile.am:
-
-2013-12-17  Julien Brianceau  
-
-        Fix armv7 and sh4 builds.
-        https://bugs.webkit.org/show_bug.cgi?id=125848
-
-        Reviewed by Csaba Osztrogonác.
-
-        * assembler/ARMv7Assembler.h: Include limits.h for INT_MIN.
-        * assembler/SH4Assembler.h: Include limits.h for INT_MIN.
-
-2013-12-16  Joseph Pecoraro  
-
-        Fix some whitespace issues in inspector code
-        https://bugs.webkit.org/show_bug.cgi?id=125814
-
-        Reviewed by Darin Adler.
-
-        * inspector/protocol/Debugger.json:
-        * inspector/protocol/Runtime.json:
-        * inspector/scripts/CodeGeneratorInspector.py:
-        (Generator.process_command):
-
-2013-12-16  Mark Hahnenberg  
-
-        Add some missing functions to MacroAssembler
-        https://bugs.webkit.org/show_bug.cgi?id=125809
-
-        Reviewed by Oliver Hunt.
-
-        * assembler/AbstractMacroAssembler.h:
-        * assembler/AssemblerBuffer.h:
-        * assembler/LinkBuffer.cpp:
-        * assembler/MacroAssembler.h:
-        (JSC::MacroAssembler::storePtr):
-        (JSC::MacroAssembler::andPtr):
-        * assembler/MacroAssemblerARM64.h:
-        (JSC::MacroAssemblerARM64::and64):
-        (JSC::MacroAssemblerARM64::branchTest8):
-        * assembler/MacroAssemblerARMv7.h:
-        (JSC::MacroAssemblerARMv7::branchTest8):
-        * assembler/X86Assembler.h:
-
-2013-12-16  Brent Fulgham  
-
-        [Win] Remove dead code after conversion to VS2013
-        https://bugs.webkit.org/show_bug.cgi?id=125795
-
-        Reviewed by Darin Adler.
-
-        * API/tests/testapi.c: Remove local nan implementation
-
-2013-12-16  Oliver Hunt  
-
-        Cache getters and custom accessors on the prototype chain
-        https://bugs.webkit.org/show_bug.cgi?id=125602
-
-        Reviewed by Michael Saboff.
-
-        Support caching of custom getters and accessors on the prototype chain.
-        This is relatively trivial and just requires a little work compared to
-        the direct access mode as we're under more register pressure.
-
-        * bytecode/StructureStubInfo.h:
-          Removed the unsued initGetByIdProto as it was confusing to still have it present.
-        * jit/Repatch.cpp:
-        (JSC::generateProtoChainAccessStub):
-        (JSC::tryCacheGetByID):
-        (JSC::tryBuildGetByIDList):
-
-2013-12-16  Mark Lam  
-
-        Change slow path result to take a void* instead of a ExecState*.
-        https://bugs.webkit.org/show_bug.cgi?id=125802.
-
-        Reviewed by Filip Pizlo.
-
-        This is in preparation for C Stack OSR entry work that is coming soon.
-        In the OSR entry case, we'll be returning a topOfFrame pointer value
-        instead of the ExecState*.
-
-        * offlineasm/cloop.rb:
-        * runtime/CommonSlowPaths.h:
-        (JSC::encodeResult):
-        (JSC::decodeResult):
-
-2013-12-16  Alex Christensen  
-
-        Fixed Win64 build on VS2013.
-        https://bugs.webkit.org/show_bug.cgi?id=125753
-
-        Reviewed by Brent Fulgham.
-
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCoreGenerated.vcxproj:
-        * JavaScriptCore.vcxproj/LLInt/LLIntAssembly/LLIntAssembly.vcxproj:
-        * JavaScriptCore.vcxproj/LLInt/LLIntDesiredOffsets/LLIntDesiredOffsets.vcxproj:
-        * JavaScriptCore.vcxproj/LLInt/LLIntOffsetsExtractor/LLIntOffsetsExtractor.vcxproj:
-        * JavaScriptCore.vcxproj/jsc/jsc.vcxproj:
-        * JavaScriptCore.vcxproj/testRegExp/testRegExp.vcxproj:
-        * JavaScriptCore.vcxproj/testapi/testapi.vcxproj:
-        Added correct PlatformToolset for 64-bit builds.
-
-2013-12-16  Peter Szanka  
-
-        Delete RVCT related code parts.
-        https://bugs.webkit.org/show_bug.cgi?id=125626
-
-        Reviewed by Darin Adler.
-
-        * assembler/ARMAssembler.cpp:
-        * assembler/ARMAssembler.h:
-        (JSC::ARMAssembler::cacheFlush):
-        * assembler/MacroAssemblerARM.cpp:
-        (JSC::isVFPPresent):
-        * jit/JITStubsARM.h:
-        * jit/JITStubsARMv7.h:
-
-2013-12-15  Ryosuke Niwa  
-
-        REGRESSION: 2x regression on Dromaeo DOM query tests
-        https://bugs.webkit.org/show_bug.cgi?id=125377
-
-        Reviewed by Filip Pizlo.
-
-        The bug was caused by JSC not JIT'ing property access on "document" due to its type info having
-        HasImpureGetOwnPropertySlot flag.
-
-        Fixed the bug by new type info flag NewImpurePropertyFiresWatchpoints, which allows the baseline
-        JIT to generate byte code for access properties on an object with named properties (a.k.a.
-        custom name getter) in DOM. When a new named property appears on the object, VM is notified via
-        VM::addImpureProperty and fires StructureStubClearingWatchpoint added during the repatch.
-
-        * bytecode/GetByIdStatus.cpp:
-        (JSC::GetByIdStatus::computeFromLLInt): Take the slow path if we have any object with impure
-        properties in the prototype chain.
-        (JSC::GetByIdStatus::computeForChain): Ditto.
-
-        * jit/Repatch.cpp:
-        (JSC::repatchByIdSelfAccess): Throw away the byte code when a new impure property is added on any
-        object in the prototype chain via StructureStubClearingWatchpoint.
-        (JSC::generateProtoChainAccessStub): Ditto.
-        (JSC::tryCacheGetByID):
-        (JSC::tryBuildGetByIDList):
-        (JSC::tryRepatchIn): Ditto.
-
-        * runtime/JSTypeInfo.h: Added NewImpurePropertyFiresWatchpoints.
-        (JSC::TypeInfo::newImpurePropertyFiresWatchpoints): Added.
-
-        * runtime/Operations.h:
-        (JSC::normalizePrototypeChainForChainAccess): Don't exit early if VM will be notified of new
-        impure property even if the object had impure properties.
-
-        * runtime/Structure.h:
-        (JSC::Structure::takesSlowPathInDFGForImpureProperty): Added. Wraps hasImpureGetOwnPropertySlot and
-        asserts that newImpurePropertyFiresWatchpoints is true whenever hasImpureGetOwnPropertySlot is true.
-
-        * runtime/VM.cpp:
-        (JSC::VM::registerWatchpointForImpureProperty): Added.
-        (JSC::VM::addImpureProperty): Added. HTMLDocument calls it to notify JSC of a new impure property.
-
-        * runtime/VM.h:
-
-2013-12-15  Andy Estes  
-
-        [iOS] Upstream changes to FeatureDefines.xcconfig
-        https://bugs.webkit.org/show_bug.cgi?id=125742
-
-        Reviewed by Dan Bernstein.
-
-        * Configurations/FeatureDefines.xcconfig:
-
-2013-12-14  Filip Pizlo  
-
-        FTL should *really* know when things are flushed
-        https://bugs.webkit.org/show_bug.cgi?id=125747
-
-        Reviewed by Sam Weinig.
-        
-        Fix more codegen badness. This makes V8v7's crypto am3() function run faster in the FTL
-        than in DFG. This means that even if we just compile those functions in V8v7 that don't
-        make calls, the FTL gives us a 2% speed-up over the DFG. That's pretty good considering
-        that we have still more optimizations to fix and we can make calls work.
-
-        * dfg/DFGSSAConversionPhase.cpp:
-        (JSC::DFG::SSAConversionPhase::run):
-        * ftl/FTLCompile.cpp:
-        (JSC::FTL::fixFunctionBasedOnStackMaps):
-
-2013-12-14  Andy Estes  
-
-        Unify FeatureDefines.xcconfig
-        https://bugs.webkit.org/show_bug.cgi?id=125741
-
-        Rubber-stamped by Dan Bernstein.
-
-        * Configurations/FeatureDefines.xcconfig: Enable ENABLE_MEDIA_SOURCE.
-
-2013-12-14  Mark Rowe  
-
-        Build fix after r160557.
-
-        r160557 added the first generated header to JavaScriptCore that needs to be installed in to
-        the framework wrapper. Sadly JavaScriptCore's Derived Sources target was not set to generate
-        headers when invoked as part of the installhdrs action. This resulted in the build failing
-        due to Xcode being unable to find the header file to install. The fix for this is to configure
-        the Derived Sources target to use JavaScriptCore.xcconfig, which sets INSTALLHDRS_SCRIPT_PHASE
-        to YES and allows Xcode to generate derived sources during the installhdrs action.
-
-        Enabling INSTALLHDRS_SCRIPT_PHASE required tweaking the Generate Derived Sources script build
-        phase to skip running code related to offlineasm that depends on JSCLLIntOffsetExtractor
-        having been compiled, which isn't the case at installhdrs time.
-
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-
-2013-12-13  Joseph Pecoraro  
-
-        Some Set and Map prototype functions have incorrect function lengths
-        https://bugs.webkit.org/show_bug.cgi?id=125732
-
-        Reviewed by Oliver Hunt.
-
-        * runtime/MapPrototype.cpp:
-        (JSC::MapPrototype::finishCreation):
-        * runtime/SetPrototype.cpp:
-        (JSC::SetPrototype::finishCreation):
-
-2013-12-13  Joseph Pecoraro  
-
-        Web Inspector: Move Inspector and Debugger protocol domains into JavaScriptCore
-        https://bugs.webkit.org/show_bug.cgi?id=125707
-
-        Reviewed by Timothy Hatcher.
-
-        * CMakeLists.txt:
-        * DerivedSources.make:
-        * GNUmakefile.am:
-        * inspector/protocol/Debugger.json: Renamed from Source/WebCore/inspector/protocol/Debugger.json.
-        * inspector/protocol/GenericTypes.json: Added.
-        * inspector/protocol/InspectorDomain.json: Renamed from Source/WebCore/inspector/protocol/InspectorDomain.json.
-        Add new files to inspector generation.
-
-        * inspector/scripts/CodeGeneratorInspector.py:
-        (Generator.go):
-        Only build TypeBuilder output if the domain only has types. Avoid
-        backend/frontend dispatchers and backend commands.
-
-        (TypeBindings.create_type_declaration_.EnumBinding.get_setter_value_expression_pattern):
-        (format_setter_value_expression):
-        (Generator.process_command):
-        (Generator.generate_send_method):
-        * inspector/scripts/CodeGeneratorInspectorStrings.py:
-        Export and name the get{JS,Web}EnumConstant function.
-
-2013-12-11  Filip Pizlo  
-
-        Get rid of forward exit on UInt32ToNumber by adding an op_unsigned bytecode instruction
-        https://bugs.webkit.org/show_bug.cgi?id=125553
-
-        Reviewed by Oliver Hunt.
-        
-        UInt32ToNumber was a super complicated node because it had to do a speculation, but it
-        would do it after we already had computed the urshift. It couldn't just back to the
-        beginning of the urshift because the inputs to the urshift weren't necessarily live
-        anymore. We couldn't jump forward to the beginning of the next instruction because the
-        result of the urshift was not yet unsigned-converted.
-        
-        For a while we solved this by forward-exiting in UInt32ToNumber. But that's really
-        gross and I want to get rid of all forward exits. They cause a lot of bugs.
-        
-        We could also have turned UInt32ToNumber to a backwards exit by forcing the inputs to
-        the urshift to be live. I figure that this might be a bit too extreme.
-        
-        So, I just created a new place that we can exit to: I split op_urshift into op_urshift
-        followed by op_unsigned. op_unsigned is an "unsigned cast" along the lines of what
-        UInt32ToNumber does. This allows me to get rid of all of the nastyness in the DFG for
-        forward exiting in UInt32ToNumber.
-        
-        This patch enables massive code carnage in the DFG and FTL, and brings us closer to
-        eliminating one of the DFG's most confusing concepts. On the flipside, it does make the
-        bytecode slightly more complex (one new instruction). This is a profitable trade. We
-        want the DFG and FTL to trend towards simplicity, since they are both currently too
-        complicated.
-
-        * bytecode/BytecodeUseDef.h:
-        (JSC::computeUsesForBytecodeOffset):
-        (JSC::computeDefsForBytecodeOffset):
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::dumpBytecode):
-        * bytecode/Opcode.h:
-        (JSC::padOpcodeName):
-        * bytecode/ValueRecovery.cpp:
-        (JSC::ValueRecovery::dumpInContext):
-        * bytecode/ValueRecovery.h:
-        (JSC::ValueRecovery::gpr):
-        * bytecompiler/NodesCodegen.cpp:
-        (JSC::BinaryOpNode::emitBytecode):
-        (JSC::emitReadModifyAssignment):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::toInt32):
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGOSRExitCompiler32_64.cpp:
-        (JSC::DFG::OSRExitCompiler::compileExit):
-        * dfg/DFGOSRExitCompiler64.cpp:
-        (JSC::DFG::OSRExitCompiler::compileExit):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileMovHint):
-        (JSC::DFG::SpeculativeJIT::compileUInt32ToNumber):
-        * dfg/DFGSpeculativeJIT.h:
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        * dfg/DFGSpeculativeJIT64.cpp:
-        * dfg/DFGStrengthReductionPhase.cpp:
-        (JSC::DFG::StrengthReductionPhase::handleNode):
-        (JSC::DFG::StrengthReductionPhase::convertToIdentityOverChild):
-        (JSC::DFG::StrengthReductionPhase::convertToIdentityOverChild1):
-        (JSC::DFG::StrengthReductionPhase::convertToIdentityOverChild2):
-        * ftl/FTLFormattedValue.h:
-        (JSC::FTL::int32Value):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileUInt32ToNumber):
-        * ftl/FTLValueFormat.cpp:
-        (JSC::FTL::reboxAccordingToFormat):
-        (WTF::printInternal):
-        * ftl/FTLValueFormat.h:
-        * jit/JIT.cpp:
-        (JSC::JIT::privateCompileMainPass):
-        (JSC::JIT::privateCompileSlowCases):
-        * jit/JIT.h:
-        * jit/JITArithmetic.cpp:
-        (JSC::JIT::emit_op_urshift):
-        (JSC::JIT::emitSlow_op_urshift):
-        (JSC::JIT::emit_op_unsigned):
-        (JSC::JIT::emitSlow_op_unsigned):
-        * jit/JITArithmetic32_64.cpp:
-        (JSC::JIT::emitRightShift):
-        (JSC::JIT::emitRightShiftSlowCase):
-        (JSC::JIT::emit_op_unsigned):
-        (JSC::JIT::emitSlow_op_unsigned):
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-        * runtime/CommonSlowPaths.cpp:
-        (JSC::SLOW_PATH_DECL):
-        * runtime/CommonSlowPaths.h:
-
-2013-12-13  Mark Hahnenberg  
-
-        LLInt should not conditionally branch to to labels outside of its function
-        https://bugs.webkit.org/show_bug.cgi?id=125713
-
-        Reviewed by Geoffrey Garen.
-
-        Conditional branches are insufficient for jumping to out-of-function labels.
-        The fix is to use an unconditional jmp to the label combined with a conditional branch around the jmp.
-
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-
-2013-12-13  Joseph Pecoraro  
-
-        [GTK] Remove Warnings in building about duplicate INSPECTOR variables
-        https://bugs.webkit.org/show_bug.cgi?id=125710
-
-        Reviewed by Tim Horton.
-
-        * GNUmakefile.am:
-
-2013-12-13  Joseph Pecoraro  
-
-        Cleanup CodeGeneratorInspectorStrings a bit
-        https://bugs.webkit.org/show_bug.cgi?id=125705
-
-        Reviewed by Timothy Hatcher.
-
-        * inspector/scripts/CodeGeneratorInspectorStrings.py:
-        Use ${foo} variable syntax and add an ASCIILiteral.
-
-2013-12-13  Brent Fulgham  
-
-        [Win] Unreviewed build fix after r160563
-
-        * JavaScriptCore.vcxproj/JavaScriptCoreGenerated.vcxproj: Missed the Debug
-        target in my last patch.
-
-2013-12-13  Brent Fulgham  
-
-        [Win] Unreviewed build fix after r160548
-
-        * JavaScriptCore.vcxproj/JavaScriptCoreGenerated.vcxproj: Specify
-        that we are using the vs12_xp target for Makefile-based projects.
-        * JavaScriptCore.vcxproj/LLInt/LLIntAssembly/LLIntAssembly.vcxproj: Ditto
-        * JavaScriptCore.vcxproj/LLInt/LLIntDesiredOffsets/LLIntDesiredOffsets.vcxproj: Ditto.
-
-2013-12-13  Joseph Pecoraro  
-
-        Make inspector folder groups smarter in JavaScriptCore.xcodeproj
-        https://bugs.webkit.org/show_bug.cgi?id=125663
-
-        Reviewed by Darin Adler.
-
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-
-2013-12-13  Joseph Pecoraro  
-
-        Web Inspector: Add Inspector Code Generation to JavaScriptCore for Runtime Domain
-        https://bugs.webkit.org/show_bug.cgi?id=125595
-
-        Reviewed by Timothy Hatcher.
-
-          - Move CodeGeneration scripts from WebCore into JavaScriptCore/inspector/scripts
-          - For ports that build WebKit frameworks separately, export the scripts as PrivateHeaders
-          - Update CodeGeneratorInspector.py in a few ways:
-            - output dynamic filenames, so JavaScriptCore generates InspectorJSFoo.* and WebCore generates InspectorWebFoo.*
-            - take in more then one protocol JSON file. The first contains domains to generate, the others are dependencies
-              that are generated elsewhere that we can depend on for Types.
-          - Add DerivedSources build step to generate the Inspector Interfaces
-
-        * CMakeLists.txt:
-        * DerivedSources.make:
-        * GNUmakefile.am:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.vcxproj/copy-files.cmd:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        Add scripts and code generation.
-
-        * inspector/protocol/Runtime.json: Renamed from Source/WebCore/inspector/protocol/Runtime.json.
-        Move protocol file into JavaScriptCore so its types will be generated in JavaScriptCore.
-
-        * inspector/scripts/CodeGeneratorInspector.py: Renamed from Source/WebCore/inspector/CodeGeneratorInspector.py.
-        Updates to the script as listed above.
-
-        * inspector/scripts/CodeGeneratorInspectorStrings.py: Renamed from Source/WebCore/inspector/CodeGeneratorInspectorStrings.py.
-        * inspector/scripts/generate-combined-inspector-json.py: Renamed from Source/WebCore/inspector/Scripts/generate-combined-inspector-json.py.
-        Moved from WebCore into JavaScriptCore for code generation.
-
-2013-12-13  Peter Szanka  
-
-        Delete INTEL C compiler related code parts.
-        https://bugs.webkit.org/show_bug.cgi?id=125625
-
-        Reviewed by Darin Adler.
-
-        * jsc.cpp:
-        * testRegExp.cpp:
-
-2013-12-13  Brent Fulgham  
-
-        [Win] Switch WebKit solution to Visual Studio 2013
-        https://bugs.webkit.org/show_bug.cgi?id=125192
-
-        Reviewed by Anders Carlsson.
-
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj: Update for VS2013
-        * JavaScriptCore.vcxproj/LLInt/LLIntOffsetsExtractor/LLIntOffsetsExtractor.vcxproj:
-        Ditto
-        * JavaScriptCore.vcxproj/jsc/jsc.vcxproj: Ditto
-        * JavaScriptCore.vcxproj/testRegExp/testRegExp.vcxproj: Ditto
-        * JavaScriptCore.vcxproj/testapi/testapi.vcxproj: Ditto
-
-2013-12-12  Joseph Pecoraro  
-
-        Add a few more ASCIILiterals
-        https://bugs.webkit.org/show_bug.cgi?id=125662
-
-        Reviewed by Darin Adler.
-
-        * inspector/InspectorBackendDispatcher.cpp:
-        (Inspector::InspectorBackendDispatcher::dispatch):
-
-2013-12-12  Joseph Pecoraro  
-
-        Test new JSContext name APIs
-        https://bugs.webkit.org/show_bug.cgi?id=125607
-
-        Reviewed by Darin Adler.
-
-        * API/JSContext.h:
-        * API/JSContextRef.h:
-        Fix whitespace issues.
-
-        * API/tests/testapi.c:
-        (globalContextNameTest):
-        (main):
-        * API/tests/testapi.mm:
-        Add tests for JSContext set/get name APIs.
-
-2013-12-11  Filip Pizlo  
-
-        ARM64: Hang running pdfjs test, suspect DFG generated code for "in"
-        https://bugs.webkit.org/show_bug.cgi?id=124727
-        
-
-        Reviewed by Michael Saboff.
-        
-        Get rid of In's hackish use of StructureStubInfo. Previously it was using hotPathBegin,
-        and it was the only IC that used that field, which was wasteful. Moreover, it used it
-        to store two separate locations: the label for patching the jump and the label right
-        after the jump. The code was relying on those two being the same label, which is true
-        on X86 and some other platforms, but it isn't true on ARM64.
-        
-        This gets rid of hotPathBegin and makes In express those two locations as offsets from
-        the callReturnLocation, which is analogous to what the other IC's do.
-        
-        This fixes a bug where any successful In patching would result in a trivially infinite
-        loop - and hence a hang - on ARM64.
-
-        * bytecode/StructureStubInfo.h:
-        * dfg/DFGJITCompiler.cpp:
-        (JSC::DFG::JITCompiler::link):
-        * dfg/DFGJITCompiler.h:
-        (JSC::DFG::InRecord::InRecord):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileIn):
-        * jit/JITInlineCacheGenerator.cpp:
-        (JSC::JITByIdGenerator::finalize):
-        * jit/Repatch.cpp:
-        (JSC::replaceWithJump):
-        (JSC::patchJumpToGetByIdStub):
-        (JSC::tryCachePutByID):
-        (JSC::tryBuildPutByIdList):
-        (JSC::tryRepatchIn):
-        (JSC::resetGetByID):
-        (JSC::resetPutByID):
-        (JSC::resetIn):
-
-2013-12-11  Joseph Pecoraro  
-
-        Web Inspector: Push More Inspector Required Classes Down into JavaScriptCore
-        https://bugs.webkit.org/show_bug.cgi?id=125324
-
-        Reviewed by Timothy Hatcher.
-
-        * CMakeLists.txt:
-        * GNUmakefile.am:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.vcxproj/JavaScriptCoreCommon.props:
-        * JavaScriptCore.vcxproj/copy-files.cmd:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * bindings/ScriptFunctionCall.cpp: Renamed from Source/WebCore/bindings/js/ScriptFunctionCall.cpp.
-        * bindings/ScriptFunctionCall.h: Renamed from Source/WebCore/bindings/js/ScriptFunctionCall.h.
-        * bindings/ScriptObject.cpp: Copied from Source/WebCore/inspector/WorkerConsoleAgent.cpp.
-        * bindings/ScriptObject.h: Renamed from Source/WebCore/inspector/InspectorBaseAgent.h.
-        * bindings/ScriptValue.cpp: Renamed from Source/WebCore/bindings/js/ScriptValue.cpp.
-        * bindings/ScriptValue.h: Renamed from Source/WebCore/bindings/js/ScriptValue.h.
-        * inspector/InspectorAgentBase.h: Copied from Source/WebCore/inspector/InspectorAgentRegistry.h.
-        * inspector/InspectorAgentRegistry.cpp: Renamed from Source/WebCore/inspector/InspectorAgentRegistry.cpp.
-        * inspector/InspectorBackendDispatcher.h: Renamed from Source/WebCore/inspector/InspectorBackendDispatcher.h.
-        (Inspector::InspectorSupplementalBackendDispatcher::InspectorSupplementalBackendDispatcher):
-        (Inspector::InspectorSupplementalBackendDispatcher::~InspectorSupplementalBackendDispatcher):
-        * inspector/InspectorValues.cpp: Renamed from Source/WebCore/inspector/InspectorValues.cpp.
-        * inspector/InspectorValues.h: Renamed from Source/WebCore/inspector/InspectorValues.h.
-
-2013-12-11  Laszlo Vidacs  
-
-        Store SHA1 hash in std::array
-        https://bugs.webkit.org/show_bug.cgi?id=125446
-
-        Reviewed by Darin Adler.
-
-        Change Vector to std::array and use typedef.
-
-        * bytecode/CodeBlockHash.cpp:
-        (JSC::CodeBlockHash::CodeBlockHash):
-
-2013-12-11  Mark Rowe  
-
-         Modernize the JavaScriptCore API headers
-        
-
-        This consists of three main changes:
-        1) Converting the return type of initializer methods to instancetype.
-        2) Declaring properties rather than getters and setters.
-        3) Tagging C API methods with information about their memory management semantics.
-
-        Changing the declarations from getters and setters to properties also required
-        updating the headerdoc in a number of places.
-
-        Reviewed by Anders Carlsson.
-
-        * API/JSContext.h:
-        * API/JSContext.mm:
-        * API/JSManagedValue.h:
-        * API/JSManagedValue.mm:
-        * API/JSStringRefCF.h:
-        * API/JSValue.h:
-        * API/JSVirtualMachine.h:
-        * API/JSVirtualMachine.mm:
-
-2013-12-11  Mark Rowe  
-
-         Move JavaScriptCore off the legacy WebKit availability macros
-
-        The legacy WebKit availability macros are verbose, confusing, and provide no benefit over
-        using the system availability macros directly. The original vision was that they'd serve
-        a cross-platform purpose but that never came to be.
-
-        Map from WebKit version to OS X version based on the mapping in WebKitAvailability.h.
-        All iOS versions are specified as 7.0 as that is when the JavaScriptCore C API was made
-        public.
-
-        Part of .
-
-        Reviewed by Anders Carlsson.
-
-        * API/JSBasePrivate.h:
-        * API/JSContextRef.h:
-        * API/JSContextRefPrivate.h:
-        * API/JSObjectRef.h:
-        * API/JSValueRef.h:
-
-2013-12-10  Filip Pizlo  
-
-        Get rid of forward exit on DoubleAsInt32
-        https://bugs.webkit.org/show_bug.cgi?id=125552
-
-        Reviewed by Oliver Hunt.
-        
-        The forward exit was just there so that we wouldn't have to keep the inputs alive up to
-        the DoubleAsInt32. That's dumb. Forward exits are a complicated piece of machinery and
-        we shouldn't have it just for a bit of liveness micro-optimization.
-        
-        Also add a bunch of machinery to test this case on X86.
-
-        * assembler/AbstractMacroAssembler.h:
-        (JSC::optimizeForARMv7s):
-        (JSC::optimizeForARM64):
-        (JSC::optimizeForX86):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileDoubleAsInt32):
-        * runtime/Options.h:
-        * tests/stress/double-as-int32.js: Added.
-        (foo):
-        (test):
-
-2013-12-10  Filip Pizlo  
-
-        Simplify CSE's treatment of NodeRelevantToOSR
-        https://bugs.webkit.org/show_bug.cgi?id=125538
-
-        Reviewed by Oliver Hunt.
-        
-        Make the NodeRelevantToOSR thing obvious: if there is any MovHint on a node then the
-        node is relevant to OSR.
-
-        * dfg/DFGCSEPhase.cpp:
-        (JSC::DFG::CSEPhase::run):
-        (JSC::DFG::CSEPhase::performNodeCSE):
-        (JSC::DFG::CSEPhase::performBlockCSE):
-
-2013-12-10  Filip Pizlo  
-
-        Get rid of forward exit in GetByVal on Uint32Array
-        https://bugs.webkit.org/show_bug.cgi?id=125543
-
-        Reviewed by Oliver Hunt.
-
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileGetByValOnIntTypedArray):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileGetByVal):
-
-2013-12-10  Balazs Kilvady  
-
-        [MIPS] Redundant instructions in code generated from offlineasm.
-        https://bugs.webkit.org/show_bug.cgi?id=125528
-
-        Reviewed by Michael Saboff.
-
-        Optimize lowering of offlineasm BaseIndex Addresses.
-
-        * offlineasm/mips.rb:
-
-2013-12-10  Oliver Hunt  
-
-        Reduce the mass templatizing of the JS parser
-        https://bugs.webkit.org/show_bug.cgi?id=125535
-
-        Reviewed by Michael Saboff.
-
-        The various caches we have now have removed the need for many of
-        the template vs. regular parameters.  This patch converts those
-        template parameters to regular parameters and updates the call
-        sites.  This reduces the code size of the parser by around 15%.
-
-        * parser/ASTBuilder.h:
-        (JSC::ASTBuilder::createGetterOrSetterProperty):
-        (JSC::ASTBuilder::createProperty):
-        * parser/Parser.cpp:
-        (JSC::::parseInner):
-        (JSC::::parseSourceElements):
-        (JSC::::parseVarDeclarationList):
-        (JSC::::createBindingPattern):
-        (JSC::::tryParseDeconstructionPatternExpression):
-        (JSC::::parseDeconstructionPattern):
-        (JSC::::parseSwitchClauses):
-        (JSC::::parseSwitchDefaultClause):
-        (JSC::::parseBlockStatement):
-        (JSC::::parseFormalParameters):
-        (JSC::::parseFunctionInfo):
-        (JSC::::parseFunctionDeclaration):
-        (JSC::::parseProperty):
-        (JSC::::parseObjectLiteral):
-        (JSC::::parseStrictObjectLiteral):
-        (JSC::::parseMemberExpression):
-        * parser/Parser.h:
-        * parser/SyntaxChecker.h:
-        (JSC::SyntaxChecker::createProperty):
-        (JSC::SyntaxChecker::createGetterOrSetterProperty):
-
-2013-12-10  Mark Hahnenberg  
-
-        ASSERT !heap.vm()->isInitializingObject() when finishing DFG compilation at beginning of GC
-        https://bugs.webkit.org/show_bug.cgi?id=125472
-
-        Reviewed by Geoff Garen.
-
-        This patch makes it look like it's okay to allocate so that the DFG plan finalization stuff 
-        can do what it needs to do. We already expected that we might do allocation during plan 
-        finalization and we increased the deferral depth to handle this, but we need to fix this other 
-        ASSERT stuff too.
-
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * heap/Heap.cpp:
-        (JSC::Heap::collect):
-        * heap/Heap.h:
-        * heap/RecursiveAllocationScope.h: Added.
-        (JSC::RecursiveAllocationScope::RecursiveAllocationScope):
-        (JSC::RecursiveAllocationScope::~RecursiveAllocationScope):
-        * runtime/VM.h:
-
-2013-12-09  Filip Pizlo  
-
-        Impose and enforce some basic rules of sanity for where Phi functions are allowed to occur and where their (optional) corresponding MovHints can be
-        https://bugs.webkit.org/show_bug.cgi?id=125480
-
-        Reviewed by Geoffrey Garen.
-        
-        Previously, if you wanted to insert some speculation right after where a value was
-        produced, you'd get super confused if that value was produced by a Phi node.  You can't
-        necessarily insert speculations after a Phi node because Phi nodes appear in this
-        special sequence of Phis and MovHints that establish the OSR exit state for a block.
-        So, you'd probably want to search for the next place where it's safe to insert things.
-        We already do this "search for beginning of next bytecode instruction" search by
-        looking at the next node that has a different CodeOrigin.  But this would be hard for a
-        Phi because those Phis and MovHints have basically random CodeOrigins and they can all
-        have different CodeOrigins.
-
-        This change imposes some sanity for this situation:
-
-        - Phis must have unset CodeOrigins.
-
-        - In each basic block, all nodes that have unset CodeOrigins must come before all nodes
-          that have set CodeOrigins.
-
-        This all ends up working out just great because prior to this change we didn't have a 
-        use for unset CodeOrigins.  I think it's appropriate to make "unset CodeOrigin" mean
-        that we're in the prologue of a basic block.
-
-        It's interesting what this means for block merging, which we don't yet do in SSA.
-        Consider merging the edge A->B.  One possibility is that the block merger is now
-        required to clean up Phi/Upsilons, and reascribe the MovHints to have the CodeOrigin of
-        the A's block terminal.  But an answer that might be better is that the originless
-        nodes at the top of the B are just given the origin of the terminal and we keep the
-        Phis.  That would require changing the above rules.  We'll see how it goes, and what we
-        end up picking...
-
-        Overall, this special-things-at-the-top rule is analogous to what other SSA-based
-        compilers do.  For example, LLVM has rules mandating that Phis appear at the top of a
-        block.
-
-        * bytecode/CodeOrigin.cpp:
-        (JSC::CodeOrigin::dump):
-        * dfg/DFGOSRExitBase.h:
-        (JSC::DFG::OSRExitBase::OSRExitBase):
-        * dfg/DFGSSAConversionPhase.cpp:
-        (JSC::DFG::SSAConversionPhase::run):
-        * dfg/DFGValidate.cpp:
-        (JSC::DFG::Validate::validate):
-        (JSC::DFG::Validate::validateSSA):
-
-2013-12-08  Filip Pizlo  
-
-        Reveal array bounds checks in DFG IR
-        https://bugs.webkit.org/show_bug.cgi?id=125253
-
-        Reviewed by Oliver Hunt and Mark Hahnenberg.
-        
-        In SSA mode, this reveals array bounds checks and the load of array length in DFG IR,
-        making this a candidate for LICM.
-
-        This also fixes a long-standing performance bug where the JSObject slow paths would
-        always create contiguous storage, rather than type-specialized storage, when doing a
-        "storage creating" storage, like:
-        
-            var o = {};
-            o[0] = 42;
-
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * bytecode/ExitKind.cpp:
-        (JSC::exitKindToString):
-        (JSC::exitKindIsCountable):
-        * bytecode/ExitKind.h:
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::::executeEffects):
-        * dfg/DFGArrayMode.cpp:
-        (JSC::DFG::permitsBoundsCheckLowering):
-        (JSC::DFG::ArrayMode::permitsBoundsCheckLowering):
-        * dfg/DFGArrayMode.h:
-        (JSC::DFG::ArrayMode::lengthNeedsStorage):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGConstantFoldingPhase.cpp:
-        (JSC::DFG::ConstantFoldingPhase::foldConstants):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGPlan.cpp:
-        (JSC::DFG::Plan::compileInThreadImpl):
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        * dfg/DFGSSALoweringPhase.cpp: Added.
-        (JSC::DFG::SSALoweringPhase::SSALoweringPhase):
-        (JSC::DFG::SSALoweringPhase::run):
-        (JSC::DFG::SSALoweringPhase::handleNode):
-        (JSC::DFG::SSALoweringPhase::lowerBoundsCheck):
-        (JSC::DFG::performSSALowering):
-        * dfg/DFGSSALoweringPhase.h: Added.
-        * dfg/DFGSafeToExecute.h:
-        (JSC::DFG::safeToExecute):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileDoublePutByVal):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compileContiguousPutByVal):
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        (JSC::FTL::LowerDFGToLLVM::compileCheckInBounds):
-        (JSC::FTL::LowerDFGToLLVM::compileGetByVal):
-        (JSC::FTL::LowerDFGToLLVM::compilePutByVal):
-        (JSC::FTL::LowerDFGToLLVM::contiguousPutByValOutOfBounds):
-        * runtime/JSObject.cpp:
-        (JSC::JSObject::convertUndecidedForValue):
-        (JSC::JSObject::createInitialForValueAndSet):
-        (JSC::JSObject::putByIndexBeyondVectorLength):
-        (JSC::JSObject::putDirectIndexBeyondVectorLength):
-        * runtime/JSObject.h:
-        * tests/stress/float32array-out-of-bounds.js: Added.
-        (make):
-        (foo):
-        (test):
-        * tests/stress/int32-object-out-of-bounds.js: Added.
-        (make):
-        (foo):
-        (test):
-        * tests/stress/int32-out-of-bounds.js: Added.
-        (foo):
-        (test):
-
-2013-12-09  Sam Weinig  
-
-        Replace use of WTF::FixedArray with std::array
-        https://bugs.webkit.org/show_bug.cgi?id=125475
-
-        Reviewed by Anders Carlsson.
-
-        * bytecode/CodeBlockHash.cpp:
-        (JSC::CodeBlockHash::dump):
-        * bytecode/Opcode.cpp:
-        (JSC::OpcodeStats::~OpcodeStats):
-        * dfg/DFGCSEPhase.cpp:
-        * ftl/FTLAbstractHeap.h:
-        * heap/MarkedSpace.h:
-        * parser/ParserArena.h:
-        * runtime/CodeCache.h:
-        * runtime/DateInstanceCache.h:
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::reset):
-        * runtime/JSGlobalObject.h:
-        * runtime/JSString.h:
-        * runtime/LiteralParser.h:
-        * runtime/NumericStrings.h:
-        * runtime/RegExpCache.h:
-        * runtime/SmallStrings.h:
-
-2013-12-09  Joseph Pecoraro  
-
-        Remove miscellaneous unnecessary build statements
-        https://bugs.webkit.org/show_bug.cgi?id=125466
-
-        Reviewed by Darin Adler.
-
-        * DerivedSources.make:
-        * JavaScriptCore.vcxproj/build-generated-files.sh:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * make-generated-sources.sh:
-
-2013-12-08  Filip Pizlo  
-
-        CSE should work in SSA
-        https://bugs.webkit.org/show_bug.cgi?id=125430
-
-        Reviewed by Oliver Hunt and Mark Hahnenberg.
-
-        * dfg/DFGCSEPhase.cpp:
-        (JSC::DFG::CSEPhase::run):
-        (JSC::DFG::CSEPhase::performNodeCSE):
-        * dfg/DFGPlan.cpp:
-        (JSC::DFG::Plan::compileInThreadImpl):
-
-2013-12-09  Joseph Pecoraro  
-
-        Remove docs/make-bytecode-docs.pl
-        https://bugs.webkit.org/show_bug.cgi?id=125462
-
-        This sript is very old and no longer outputs useful data since the
-        op code definitions have moved from Interpreter.cpp.
-
-        Reviewed by Darin Adler.
-
-        * DerivedSources.make:
-        * docs/make-bytecode-docs.pl: Removed.
-
-2013-12-09  Julien Brianceau  
-
-        Fix sh4 LLINT build.
-        https://bugs.webkit.org/show_bug.cgi?id=125454
-
-        Reviewed by Michael Saboff.
-
-        In LLINT, sh4 backend implementation didn't handle properly conditional jumps using
-        a LabelReference instance. This patch fixes it through sh4LowerMisplacedLabels phase.
-        Also, to avoid the need of a 4th temporary gpr, this phase is triggered later in
-        getModifiedListSH4.
-
-        * offlineasm/sh4.rb:
-
-2013-12-08  Filip Pizlo  
-
-        Add the notion of ConstantStoragePointer to DFG IR
-        https://bugs.webkit.org/show_bug.cgi?id=125395
-
-        Reviewed by Oliver Hunt.
-        
-        This pushes more typed array folding into StrengthReductionPhase, and enables CSE on
-        storage pointers. Previously, you might have separate nodes for the same storage
-        pointer and this would cause some bad register pressure in the DFG. Note that this
-        was really a theoretical problem and not, to my knowledge a practical one - so this
-        patch is basically just a clean-up.
-
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::::executeEffects):
-        * dfg/DFGCSEPhase.cpp:
-        (JSC::DFG::CSEPhase::constantStoragePointerCSE):
-        (JSC::DFG::CSEPhase::performNodeCSE):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        * dfg/DFGGraph.cpp:
-        (JSC::DFG::Graph::dump):
-        * dfg/DFGNode.h:
-        (JSC::DFG::Node::convertToConstantStoragePointer):
-        (JSC::DFG::Node::hasStoragePointer):
-        (JSC::DFG::Node::storagePointer):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        * dfg/DFGSafeToExecute.h:
-        (JSC::DFG::safeToExecute):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileConstantStoragePointer):
-        (JSC::DFG::SpeculativeJIT::compileGetIndexedPropertyStorage):
-        * dfg/DFGSpeculativeJIT.h:
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGStrengthReductionPhase.cpp:
-        (JSC::DFG::StrengthReductionPhase::handleNode):
-        (JSC::DFG::StrengthReductionPhase::foldTypedArrayPropertyToConstant):
-        (JSC::DFG::StrengthReductionPhase::prepareToFoldTypedArray):
-        * dfg/DFGWatchpointCollectionPhase.cpp:
-        (JSC::DFG::WatchpointCollectionPhase::handle):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        (JSC::FTL::LowerDFGToLLVM::compileConstantStoragePointer):
-        (JSC::FTL::LowerDFGToLLVM::compileGetIndexedPropertyStorage):
-
-2013-12-08  Filip Pizlo  
-
-        FTL should support UntypedUse versions of Compare nodes
-        https://bugs.webkit.org/show_bug.cgi?id=125426
-
-        Reviewed by Oliver Hunt.
-        
-        This adds UntypedUse versions of all comparisons except CompareStrictEq, which is
-        sufficiently different that I thought I'd do it in another patch.
-        
-        This also extends our ability to abstract over comparison kind and removes a bunch of
-        copy-paste code.
-
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompare):
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLIntrinsicRepository.h:
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileCompareEq):
-        (JSC::FTL::LowerDFGToLLVM::compileCompareLess):
-        (JSC::FTL::LowerDFGToLLVM::compileCompareLessEq):
-        (JSC::FTL::LowerDFGToLLVM::compileCompareGreater):
-        (JSC::FTL::LowerDFGToLLVM::compileCompareGreaterEq):
-        (JSC::FTL::LowerDFGToLLVM::compare):
-        (JSC::FTL::LowerDFGToLLVM::nonSpeculativeCompare):
-        * ftl/FTLOutput.h:
-        (JSC::FTL::Output::icmp):
-        (JSC::FTL::Output::equal):
-        (JSC::FTL::Output::notEqual):
-        (JSC::FTL::Output::above):
-        (JSC::FTL::Output::aboveOrEqual):
-        (JSC::FTL::Output::below):
-        (JSC::FTL::Output::belowOrEqual):
-        (JSC::FTL::Output::greaterThan):
-        (JSC::FTL::Output::greaterThanOrEqual):
-        (JSC::FTL::Output::lessThan):
-        (JSC::FTL::Output::lessThanOrEqual):
-        (JSC::FTL::Output::fcmp):
-        (JSC::FTL::Output::doubleEqual):
-        (JSC::FTL::Output::doubleNotEqualOrUnordered):
-        (JSC::FTL::Output::doubleLessThan):
-        (JSC::FTL::Output::doubleLessThanOrEqual):
-        (JSC::FTL::Output::doubleGreaterThan):
-        (JSC::FTL::Output::doubleGreaterThanOrEqual):
-        (JSC::FTL::Output::doubleEqualOrUnordered):
-        (JSC::FTL::Output::doubleNotEqual):
-        (JSC::FTL::Output::doubleLessThanOrUnordered):
-        (JSC::FTL::Output::doubleLessThanOrEqualOrUnordered):
-        (JSC::FTL::Output::doubleGreaterThanOrUnordered):
-        (JSC::FTL::Output::doubleGreaterThanOrEqualOrUnordered):
-        * tests/stress/untyped-equality.js: Added.
-        (foo):
-        * tests/stress/untyped-less-than.js: Added.
-        (foo):
-
-2013-12-07  Filip Pizlo  
-
-        Fold typedArray.length if typedArray is constant
-        https://bugs.webkit.org/show_bug.cgi?id=125252
-
-        Reviewed by Sam Weinig.
-        
-        This was meant to be easy. The problem is that there was no good place for putting
-        the folding of typedArray.length to a constant. You can't quite do it in the
-        bytecode parser because at that point you don't yet know if typedArray is really
-        a typed array. You can't do it as part of constant folding because the folder
-        assumes that it can opportunistically forward-flow a constant value without changing
-        the IR; this doesn't work since we need to first change the IR to register a
-        desired watchpoint and only after that can we introduce that constant. We could have
-        done it in Fixup but that would have been awkward since Fixup's code for turning a
-        GetById of "length" into GetArrayLength is already somewhat complex. We could have
-        done it in CSE but CSE is already fairly gnarly and will probably get rewritten.
-        
-        So I introduced a new phase, called StrengthReduction. This phase should have any
-        transformations that don't requite CFA or CSE and that it would be weird to put into
-        those other phases.
-        
-        I also took the opportunity to refactor some of the other folding code.
-        
-        This also adds a test, but the test couldn't quite be a LayoutTests/js/regress so I
-        introduced the notion of JavaScriptCore/tests/stress.
-        
-        The goal of this patch isn't really to improve performance or anything like that.
-        It adds an optimization for completeness, and in doing so it unlocks a bunch of new
-        possibilities. The one that I'm most excited about is revealing array length checks
-        in DFG IR, which will allow for array bounds check hoisting and elimination.
-
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::::executeEffects):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        * dfg/DFGGraph.cpp:
-        (JSC::DFG::Graph::tryGetFoldableView):
-        (JSC::DFG::Graph::tryGetFoldableViewForChild1):
-        * dfg/DFGGraph.h:
-        * dfg/DFGNode.h:
-        (JSC::DFG::Node::hasTypedArray):
-        (JSC::DFG::Node::typedArray):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGPlan.cpp:
-        (JSC::DFG::Plan::compileInThreadImpl):
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        * dfg/DFGSafeToExecute.h:
-        (JSC::DFG::safeToExecute):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::jumpForTypedArrayOutOfBounds):
-        (JSC::DFG::SpeculativeJIT::compileConstantIndexedPropertyStorage):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGStrengthReductionPhase.cpp: Added.
-        (JSC::DFG::StrengthReductionPhase::StrengthReductionPhase):
-        (JSC::DFG::StrengthReductionPhase::run):
-        (JSC::DFG::StrengthReductionPhase::handleNode):
-        (JSC::DFG::StrengthReductionPhase::foldTypedArrayPropertyToConstant):
-        (JSC::DFG::performStrengthReduction):
-        * dfg/DFGStrengthReductionPhase.h: Added.
-        * dfg/DFGWatchpointCollectionPhase.cpp:
-        (JSC::DFG::WatchpointCollectionPhase::handle):
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        (JSC::FTL::LowerDFGToLLVM::compileGetIndexedPropertyStorage):
-        (JSC::FTL::LowerDFGToLLVM::compilePutByVal):
-        (JSC::FTL::LowerDFGToLLVM::typedArrayLength):
-        * jsc.cpp:
-        (GlobalObject::finishCreation):
-        (functionTransferArrayBuffer):
-        * runtime/ArrayBufferView.h:
-        * tests/stress: Added.
-        * tests/stress/fold-typed-array-properties.js: Added.
-        (foo):
-
-2013-12-07  peavo@outlook.com  
-
-        [Win][64-bit] Hitting breakpoint assembler instruction in callToJavaScript.
-        https://bugs.webkit.org/show_bug.cgi?id=125382
-
-        Reviewed by Michael Saboff.
-
-        The WinCairo results from run-javascriptcore-tests are the same as the WinCairo 32-bits results, when removing these breakpoints.
-
-        * jit/JITStubsMSVC64.asm: Remove breakpoint instructions.
-
-2013-12-06  Filip Pizlo  
-
-        FTL should support all of Branch/LogicalNot
-        https://bugs.webkit.org/show_bug.cgi?id=125370
-
-        Reviewed by Mark Hahnenberg.
-
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLIntrinsicRepository.h:
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::boolify):
-
-2013-12-06  Roger Fong  and Brent Fulgham  
-
-        [Win] Support compiling with VS2013
-        https://bugs.webkit.org/show_bug.cgi?id=125353
-
-        Reviewed by Anders Carlsson.
-
-        * API/tests/testapi.c: Use C99 defines if available.
-        * jit/JITOperations.cpp: Don't attempt to define C linkage when
-        returning a C++ object.
-
-2013-12-06  Filip Pizlo  
-
-        FTL should support generic ByVal accesses
-        https://bugs.webkit.org/show_bug.cgi?id=125368
-
-        Reviewed by Mark Hahnenberg.
-
-        * dfg/DFGGraph.h:
-        (JSC::DFG::Graph::isStrictModeFor):
-        (JSC::DFG::Graph::ecmaModeFor):
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLIntrinsicRepository.h:
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        (JSC::FTL::LowerDFGToLLVM::compileGetByVal):
-        (JSC::FTL::LowerDFGToLLVM::compilePutByVal):
-
-2013-12-06  Filip Pizlo  
-
-        FTL should support hole/OOB array accesses
-        https://bugs.webkit.org/show_bug.cgi?id=118077
-
-        Reviewed by Oliver Hunt and Mark Hahnenberg.
-
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLIntrinsicRepository.h:
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileGetByVal):
-        (JSC::FTL::LowerDFGToLLVM::baseIndex):
-
-2013-12-06  Michael Saboff  
-
-        Split sizing of VarArgs frames from loading arguments for the frame
-        https://bugs.webkit.org/show_bug.cgi?id=125331
-
-        Reviewed by Filip Pizlo.
-
-        Split loadVarargs into sizeAndAllocFrameForVarargs() and loadVarargs() in
-        preparation for moving onto the C stack.  sizeAndAllocFrameForVarargs() will
-        compute the size of the callee frame and allocate it, while loadVarargs()
-        actually loads the argument values.
-
-        As part of moving onto the C stack, sizeAndAllocFrameForVarargs() will be
-        changed to a function that just computes the size.  The caller will use that
-        size to allocate the new frame on the stack before calling loadVargs() and
-        actually making the call.
-
-        * interpreter/Interpreter.cpp:
-        (JSC::sizeAndAllocFrameForVarargs):
-        (JSC::loadVarargs):
-        * interpreter/Interpreter.h:
-        * jit/JIT.h:
-        * jit/JITCall.cpp:
-        (JSC::JIT::compileLoadVarargs):
-        * jit/JITCall32_64.cpp:
-        (JSC::JIT::compileLoadVarargs):
-        * jit/JITInlines.h:
-        (JSC::JIT::callOperation):
-        * jit/JITOperations.cpp:
-        * jit/JITOperations.h:
-        * llint/LLIntSlowPaths.cpp:
-        (JSC::LLInt::LLINT_SLOW_PATH_DECL):
-        * llint/LLIntSlowPaths.h:
-        * llint/LowLevelInterpreter.asm:
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-        * runtime/VM.h:
-
-2013-12-06  Filip Pizlo  
-
-        FTL should support all of ValueToInt32
-        https://bugs.webkit.org/show_bug.cgi?id=125283
-
-        Reviewed by Mark Hahnenberg.
-
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileValueToInt32):
-        (JSC::FTL::LowerDFGToLLVM::compilePutByVal):
-        (JSC::FTL::LowerDFGToLLVM::lowCell):
-        (JSC::FTL::LowerDFGToLLVM::isCell):
-
-2013-12-06  Filip Pizlo  
-
-        FTL shouldn't have a doubleToUInt32 path
-        https://bugs.webkit.org/show_bug.cgi?id=125360
-
-        Reviewed by Mark Hahnenberg.
-        
-        This code existed because I incorrectly thought it was necessary. It's now basically
-        dead.
-
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compilePutByVal):
-
-2013-12-06  Laszlo Vidacs  
-
-        Define SHA1 hash size in SHA1.h and use it at various places.
-        https://bugs.webkit.org/show_bug.cgi?id=125345
-
-        Reviewed by Darin Adler.
-
-        Use SHA1::hashSize instead of local variables.
-
-        * bytecode/CodeBlockHash.cpp:
-        (JSC::CodeBlockHash::CodeBlockHash): use SHA1::hashSize
-
-2013-12-05  Michael Saboff  
-
-        REGRESSION(r160213): Crash in js/dom/JSON-parse.html
-        https://bugs.webkit.org/show_bug.cgi?id=125335
-
-        Reviewed by Mark Lam.
-
-        Changed _llint_op_catch to materialize the VM via the scope chain instead of 
-        the CodeBlock.  CallFrames always have a scope chain, but may have a null CodeBlock.
-
-        * llint/LowLevelInterpreter32_64.asm:
-        (_llint_op_catch):
-        * llint/LowLevelInterpreter64.asm:
-        (_llint_op_catch):
-
-2013-12-05  Michael Saboff  
-
-        JSC: Simplify interface between throw and catch handler
-        https://bugs.webkit.org/show_bug.cgi?id=125328
-
-        Reviewed by Geoffrey Garen.
-
-        Simplified the throw - catch interface.  The throw side is only responsible for
-        jumping to the appropriate op_catch handler or returnFromJavaScript for uncaught
-        exceptions.  The handler uses the exception values like VM.callFrameForThrow
-        as appropriate and no longer relies on the throw side putting anything in
-        registers.
-
-        * jit/CCallHelpers.h:
-        (JSC::CCallHelpers::jumpToExceptionHandler):
-        * jit/JITOpcodes.cpp:
-        (JSC::JIT::emit_op_catch):
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::emit_op_catch):
-        * llint/LowLevelInterpreter32_64.asm:
-        (_llint_op_catch):
-        (_llint_throw_from_slow_path_trampoline):
-        * llint/LowLevelInterpreter64.asm:
-        (_llint_op_catch):
-        (_llint_throw_from_slow_path_trampoline):
-
-2013-12-04  Oliver Hunt  
-
-        Refactor static getter function prototype to include thisValue in addition to the base object
-        https://bugs.webkit.org/show_bug.cgi?id=124461
-
-        Reviewed by Geoffrey Garen.
-
-        Add thisValue parameter to static getter prototype, and switch
-        from JSValue to EncodedJSValue for parameters and return value.
-
-        Currently none of the static getters use the thisValue, but
-        separating out the refactoring will prevent future changes
-        from getting lost in the noise of refactoring.  This means
-        that this patch does not result in any change in behaviour.
-
-        * API/JSCallbackObject.h:
-        * API/JSCallbackObjectFunctions.h:
-        (JSC::::asCallbackObject):
-        (JSC::::staticFunctionGetter):
-        (JSC::::callbackGetter):
-        * jit/JITOperations.cpp:
-        * runtime/JSActivation.cpp:
-        (JSC::JSActivation::argumentsGetter):
-        * runtime/JSActivation.h:
-        * runtime/JSFunction.cpp:
-        (JSC::JSFunction::argumentsGetter):
-        (JSC::JSFunction::callerGetter):
-        (JSC::JSFunction::lengthGetter):
-        (JSC::JSFunction::nameGetter):
-        * runtime/JSFunction.h:
-        * runtime/JSObject.h:
-        (JSC::PropertySlot::getValue):
-        * runtime/NumberConstructor.cpp:
-        (JSC::numberConstructorNaNValue):
-        (JSC::numberConstructorNegInfinity):
-        (JSC::numberConstructorPosInfinity):
-        (JSC::numberConstructorMaxValue):
-        (JSC::numberConstructorMinValue):
-        * runtime/PropertySlot.h:
-        * runtime/RegExpConstructor.cpp:
-        (JSC::asRegExpConstructor):
-        (JSC::regExpConstructorDollar1):
-        (JSC::regExpConstructorDollar2):
-        (JSC::regExpConstructorDollar3):
-        (JSC::regExpConstructorDollar4):
-        (JSC::regExpConstructorDollar5):
-        (JSC::regExpConstructorDollar6):
-        (JSC::regExpConstructorDollar7):
-        (JSC::regExpConstructorDollar8):
-        (JSC::regExpConstructorDollar9):
-        (JSC::regExpConstructorInput):
-        (JSC::regExpConstructorMultiline):
-        (JSC::regExpConstructorLastMatch):
-        (JSC::regExpConstructorLastParen):
-        (JSC::regExpConstructorLeftContext):
-        (JSC::regExpConstructorRightContext):
-        * runtime/RegExpObject.cpp:
-        (JSC::asRegExpObject):
-        (JSC::regExpObjectGlobal):
-        (JSC::regExpObjectIgnoreCase):
-        (JSC::regExpObjectMultiline):
-        (JSC::regExpObjectSource):
-
-2013-12-04  Filip Pizlo  
-
-        FTL should use cvttsd2si directly for double-to-int32 conversions
-        https://bugs.webkit.org/show_bug.cgi?id=125275
-
-        Reviewed by Michael Saboff.
-        
-        Wow. This was an ordeal. Using cvttsd2si was actually easy, but I learned, and
-        sometimes even fixed, some interesting things:
-        
-        - The llvm.x86.sse2.cvttsd2si intrinsic can actually result in LLVM emitting a
-          vcvttsd2si. I guess the intrinsic doesn't actually imply the instruction.
-        
-        - That whole thing about branchTruncateDoubleToUint32? Yeah we don't need that. It's
-          better to use branchTruncateDoubleToInt32 instead. It has the right semantics for
-          all of its callers (err, its one-and-only caller), and it's more likely to take
-          fast path. This patch kills branchTruncateDoubleToUint32.
-        
-        - "a[i] = v; v = a[i]". Does this change v? OK, assume that 'a[i]' is a pure-ish
-          operation - like an array access with 'i' being an integer index and we're not
-          having a bad time. Now does this change v? CSE assumes that it doesn't. That's
-          wrong. If 'a' is a typed array - the most sensible and pure kind of array - then
-          this can be a truncating cast. For example 'v' could be a double and 'a' could be
-          an integer array.
-        
-        - "v1 = a[i]; v2 = a[i]". Is v1 === v2 assuming that 'a[i]' is pure-ish? The answer
-          is no. You could have a different arrayMode in each access. I know this sounds
-          weird, but with concurrent JIT that might happen.
-        
-        This patch adds tests for all of this stuff, except for the first issue (it's weird
-        but probably doesn't matter) and the last issue (it's too much of a freakshow).
-
-        * assembler/MacroAssemblerARM64.h:
-        * assembler/MacroAssemblerARMv7.h:
-        * assembler/MacroAssemblerX86Common.h:
-        * dfg/DFGCSEPhase.cpp:
-        (JSC::DFG::CSEPhase::getByValLoadElimination):
-        (JSC::DFG::CSEPhase::performNodeCSE):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compilePutByValForIntTypedArray):
-        * ftl/FTLAbbreviations.h:
-        (JSC::FTL::vectorType):
-        (JSC::FTL::getUndef):
-        (JSC::FTL::buildInsertElement):
-        * ftl/FTLIntrinsicRepository.h:
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::doubleToInt32):
-        (JSC::FTL::LowerDFGToLLVM::doubleToUInt32):
-        (JSC::FTL::LowerDFGToLLVM::sensibleDoubleToInt32):
-        * ftl/FTLOutput.h:
-        (JSC::FTL::Output::insertElement):
-        (JSC::FTL::Output::hasSensibleDoubleToInt):
-        (JSC::FTL::Output::sensibleDoubleToInt):
-
-2013-12-05  Commit Queue  
-
-        Unreviewed, rolling out r160133.
-        http://trac.webkit.org/changeset/160133
-        https://bugs.webkit.org/show_bug.cgi?id=125325
-
-        broke bindings tests on all the bots (Requested by thorton on
-        #webkit).
-
-        * API/JSCallbackObject.h:
-        * API/JSCallbackObjectFunctions.h:
-        (JSC::::staticFunctionGetter):
-        (JSC::::callbackGetter):
-        * jit/JITOperations.cpp:
-        * runtime/JSActivation.cpp:
-        (JSC::JSActivation::argumentsGetter):
-        * runtime/JSActivation.h:
-        * runtime/JSFunction.cpp:
-        (JSC::JSFunction::argumentsGetter):
-        (JSC::JSFunction::callerGetter):
-        (JSC::JSFunction::lengthGetter):
-        (JSC::JSFunction::nameGetter):
-        * runtime/JSFunction.h:
-        * runtime/JSObject.h:
-        (JSC::PropertySlot::getValue):
-        * runtime/NumberConstructor.cpp:
-        (JSC::numberConstructorNaNValue):
-        (JSC::numberConstructorNegInfinity):
-        (JSC::numberConstructorPosInfinity):
-        (JSC::numberConstructorMaxValue):
-        (JSC::numberConstructorMinValue):
-        * runtime/PropertySlot.h:
-        * runtime/RegExpConstructor.cpp:
-        (JSC::regExpConstructorDollar1):
-        (JSC::regExpConstructorDollar2):
-        (JSC::regExpConstructorDollar3):
-        (JSC::regExpConstructorDollar4):
-        (JSC::regExpConstructorDollar5):
-        (JSC::regExpConstructorDollar6):
-        (JSC::regExpConstructorDollar7):
-        (JSC::regExpConstructorDollar8):
-        (JSC::regExpConstructorDollar9):
-        (JSC::regExpConstructorInput):
-        (JSC::regExpConstructorMultiline):
-        (JSC::regExpConstructorLastMatch):
-        (JSC::regExpConstructorLastParen):
-        (JSC::regExpConstructorLeftContext):
-        (JSC::regExpConstructorRightContext):
-        * runtime/RegExpObject.cpp:
-        (JSC::regExpObjectGlobal):
-        (JSC::regExpObjectIgnoreCase):
-        (JSC::regExpObjectMultiline):
-        (JSC::regExpObjectSource):
-
-2013-12-05  Mark Lam  
-
-        Make the C Loop LLINT work with callToJavaScript.
-        https://bugs.webkit.org/show_bug.cgi?id=125294.
-
-        Reviewed by Michael Saboff.
-
-        1. Changed the C Loop LLINT to dispatch to an Executable via its JITCode
-           instance which is consistent with how the ASM LLINT works.
-        2. Changed CLoop::execute() to take an Opcode instead of an OpcodeID.
-           This makes it play nice with the use of JITCode for dispatching.
-        3. Introduce a callToJavaScript and callToNativeFunction for the C Loop
-           LLINT. These will call JSStack::pushFrame() and popFrame() to setup
-           and teardown the CallFrame.
-        4. Also introduced a C Loop returnFromJavaScript which is just a
-           replacement for ctiOpThrowNotCaught which had the same function.
-        5. Remove a lot of #if ENABLE(LLINT_C_LOOP) code now that the dispatch
-           mechanism is consistent.
-
-        This patch has been tested with both configurations of COMPUTED_GOTOs
-        on and off.
-
-        * interpreter/CachedCall.h:
-        (JSC::CachedCall::CachedCall):
-        (JSC::CachedCall::call):
-        (JSC::CachedCall::setArgument):
-        * interpreter/CallFrameClosure.h:
-        (JSC::CallFrameClosure::setThis):
-        (JSC::CallFrameClosure::setArgument):
-        (JSC::CallFrameClosure::resetCallFrame):
-        * interpreter/Interpreter.cpp:
-        (JSC::Interpreter::execute):
-        (JSC::Interpreter::executeCall):
-        (JSC::Interpreter::executeConstruct):
-        (JSC::Interpreter::prepareForRepeatCall):
-        * interpreter/Interpreter.h:
-        * interpreter/JSStack.h:
-        * interpreter/JSStackInlines.h:
-        (JSC::JSStack::pushFrame):
-        * interpreter/ProtoCallFrame.h:
-        (JSC::ProtoCallFrame::scope):
-        (JSC::ProtoCallFrame::callee):
-        (JSC::ProtoCallFrame::thisValue):
-        (JSC::ProtoCallFrame::argument):
-        (JSC::ProtoCallFrame::setArgument):
-        * jit/JITCode.cpp:
-        (JSC::JITCode::execute):
-        * jit/JITCode.h:
-        * jit/JITExceptions.cpp:
-        (JSC::genericUnwind):
-        * llint/LLIntCLoop.cpp:
-        (JSC::LLInt::CLoop::initialize):
-        * llint/LLIntCLoop.h:
-        * llint/LLIntEntrypoint.cpp:
-        (JSC::LLInt::setFunctionEntrypoint):
-        (JSC::LLInt::setEvalEntrypoint):
-        (JSC::LLInt::setProgramEntrypoint):
-        - Inverted the check for vm.canUseJIT(). This allows the JIT case to be
-          #if'd out nicely when building the C Loop LLINT.
-        * llint/LLIntOpcode.h:
-        * llint/LLIntThunks.cpp:
-        (JSC::doCallToJavaScript):
-        (JSC::executeJS):
-        (JSC::callToJavaScript):
-        (JSC::executeNative):
-        (JSC::callToNativeFunction):
-        * llint/LLIntThunks.h:
-        * llint/LowLevelInterpreter.cpp:
-        (JSC::CLoop::execute):
-        * runtime/Executable.h:
-        (JSC::ExecutableBase::offsetOfNumParametersFor):
-        (JSC::ExecutableBase::hostCodeEntryFor):
-        (JSC::ExecutableBase::jsCodeEntryFor):
-        (JSC::ExecutableBase::jsCodeWithArityCheckEntryFor):
-        (JSC::NativeExecutable::create):
-        (JSC::NativeExecutable::finishCreation):
-        (JSC::ProgramExecutable::generatedJITCode):
-        * runtime/JSArray.cpp:
-        (JSC::AVLTreeAbstractorForArrayCompare::compare_key_key):
-        * runtime/StringPrototype.cpp:
-        (JSC::replaceUsingRegExpSearch):
-        * runtime/VM.cpp:
-        (JSC::VM::getHostFunction):
-
-2013-12-05  Laszlo Vidacs  
-
-        Fix JavaScriptCore build if cloop is enabled after r160094
-        https://bugs.webkit.org/show_bug.cgi?id=125292
-
-        Reviewed by Michael Saboff.
-
-        Move ProtoCallFrame outside the JIT guard.
-
-        * jit/JITCode.h:
-
-2013-12-04  Filip Pizlo  
-
-        Fold constant typed arrays
-        https://bugs.webkit.org/show_bug.cgi?id=125205
-
-        Reviewed by Oliver Hunt and Mark Hahnenberg.
-        
-        If by some other mechanism we have a typed array access on a compile-time constant
-        typed array pointer, then fold:
-        
-        - Array bounds checks. Specifically, fold the load of length.
-        
-        - Loading the vector.
-        
-        This needs to install a watchpoint on the array itself because of the possibility of
-        neutering. Neutering is ridiculous. We do this without bloating the size of
-        ArrayBuffer or JSArrayBufferView in the common case (i.e. the case where you
-        allocated an array that didn't end up becoming a compile-time constant). To install
-        the watchpoint, we slowDownAndWasteMemory and then create an incoming reference to
-        the ArrayBuffer, where that incoming reference is from a watchpoint object. The
-        ArrayBuffer already knows about such incoming references and can fire the
-        watchpoints that way.
-        
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * dfg/DFGDesiredWatchpoints.cpp:
-        (JSC::DFG::ArrayBufferViewWatchpointAdaptor::add):
-        (JSC::DFG::DesiredWatchpoints::addLazily):
-        * dfg/DFGDesiredWatchpoints.h:
-        (JSC::DFG::GenericSetAdaptor::add):
-        (JSC::DFG::GenericSetAdaptor::hasBeenInvalidated):
-        (JSC::DFG::ArrayBufferViewWatchpointAdaptor::hasBeenInvalidated):
-        (JSC::DFG::GenericDesiredWatchpoints::reallyAdd):
-        (JSC::DFG::GenericDesiredWatchpoints::areStillValid):
-        (JSC::DFG::GenericDesiredWatchpoints::isStillValid):
-        (JSC::DFG::GenericDesiredWatchpoints::shouldAssumeMixedState):
-        (JSC::DFG::DesiredWatchpoints::isStillValid):
-        (JSC::DFG::DesiredWatchpoints::shouldAssumeMixedState):
-        (JSC::DFG::DesiredWatchpoints::isValidOrMixed):
-        * dfg/DFGGraph.cpp:
-        (JSC::DFG::Graph::tryGetFoldableView):
-        * dfg/DFGGraph.h:
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::jumpForTypedArrayOutOfBounds):
-        (JSC::DFG::SpeculativeJIT::emitTypedArrayBoundsCheck):
-        (JSC::DFG::SpeculativeJIT::compileGetByValOnIntTypedArray):
-        (JSC::DFG::SpeculativeJIT::compilePutByValForIntTypedArray):
-        (JSC::DFG::SpeculativeJIT::compileGetByValOnFloatTypedArray):
-        (JSC::DFG::SpeculativeJIT::compilePutByValForFloatTypedArray):
-        (JSC::DFG::SpeculativeJIT::compileConstantIndexedPropertyStorage):
-        (JSC::DFG::SpeculativeJIT::compileGetIndexedPropertyStorage):
-        * dfg/DFGSpeculativeJIT.h:
-        * dfg/DFGWatchpointCollectionPhase.cpp:
-        (JSC::DFG::WatchpointCollectionPhase::handle):
-        (JSC::DFG::WatchpointCollectionPhase::addLazily):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileGetIndexedPropertyStorage):
-        (JSC::FTL::LowerDFGToLLVM::compileGetByVal):
-        (JSC::FTL::LowerDFGToLLVM::compilePutByVal):
-        (JSC::FTL::LowerDFGToLLVM::typedArrayLength):
-        * runtime/ArrayBuffer.cpp:
-        (JSC::ArrayBuffer::transfer):
-        * runtime/ArrayBufferNeuteringWatchpoint.cpp: Added.
-        (JSC::ArrayBufferNeuteringWatchpoint::ArrayBufferNeuteringWatchpoint):
-        (JSC::ArrayBufferNeuteringWatchpoint::~ArrayBufferNeuteringWatchpoint):
-        (JSC::ArrayBufferNeuteringWatchpoint::finishCreation):
-        (JSC::ArrayBufferNeuteringWatchpoint::destroy):
-        (JSC::ArrayBufferNeuteringWatchpoint::create):
-        (JSC::ArrayBufferNeuteringWatchpoint::createStructure):
-        * runtime/ArrayBufferNeuteringWatchpoint.h: Added.
-        (JSC::ArrayBufferNeuteringWatchpoint::set):
-        * runtime/VM.cpp:
-        (JSC::VM::VM):
-        * runtime/VM.h:
-
-2013-12-04  Commit Queue  
-
-        Unreviewed, rolling out r160116.
-        http://trac.webkit.org/changeset/160116
-        https://bugs.webkit.org/show_bug.cgi?id=125264
-
-        Change doesn't work as intended. See bug comments for details.
-        (Requested by bfulgham on #webkit).
-
-        * runtime/InitializeThreading.cpp:
-        (JSC::initializeThreading):
-
-2013-12-04  Oliver Hunt  
-
-        Refactor static getter function prototype to include thisValue in addition to the base object
-        https://bugs.webkit.org/show_bug.cgi?id=124461
-
-        Reviewed by Geoffrey Garen.
-
-        Add thisValue parameter to static getter prototype, and switch
-        from JSValue to EncodedJSValue for parameters and return value.
-
-        Currently none of the static getters use the thisValue, but
-        separating out the refactoring will prevent future changes
-        from getting lost in the noise of refactoring.  This means
-        that this patch does not result in any change in behaviour.
-
-        * API/JSCallbackObject.h:
-        * API/JSCallbackObjectFunctions.h:
-        (JSC::::asCallbackObject):
-        (JSC::::staticFunctionGetter):
-        (JSC::::callbackGetter):
-        * jit/JITOperations.cpp:
-        * runtime/JSActivation.cpp:
-        (JSC::JSActivation::argumentsGetter):
-        * runtime/JSActivation.h:
-        * runtime/JSFunction.cpp:
-        (JSC::JSFunction::argumentsGetter):
-        (JSC::JSFunction::callerGetter):
-        (JSC::JSFunction::lengthGetter):
-        (JSC::JSFunction::nameGetter):
-        * runtime/JSFunction.h:
-        * runtime/JSObject.h:
-        (JSC::PropertySlot::getValue):
-        * runtime/NumberConstructor.cpp:
-        (JSC::numberConstructorNaNValue):
-        (JSC::numberConstructorNegInfinity):
-        (JSC::numberConstructorPosInfinity):
-        (JSC::numberConstructorMaxValue):
-        (JSC::numberConstructorMinValue):
-        * runtime/PropertySlot.h:
-        * runtime/RegExpConstructor.cpp:
-        (JSC::asRegExpConstructor):
-        (JSC::regExpConstructorDollar1):
-        (JSC::regExpConstructorDollar2):
-        (JSC::regExpConstructorDollar3):
-        (JSC::regExpConstructorDollar4):
-        (JSC::regExpConstructorDollar5):
-        (JSC::regExpConstructorDollar6):
-        (JSC::regExpConstructorDollar7):
-        (JSC::regExpConstructorDollar8):
-        (JSC::regExpConstructorDollar9):
-        (JSC::regExpConstructorInput):
-        (JSC::regExpConstructorMultiline):
-        (JSC::regExpConstructorLastMatch):
-        (JSC::regExpConstructorLastParen):
-        (JSC::regExpConstructorLeftContext):
-        (JSC::regExpConstructorRightContext):
-        * runtime/RegExpObject.cpp:
-        (JSC::asRegExpObject):
-        (JSC::regExpObjectGlobal):
-        (JSC::regExpObjectIgnoreCase):
-        (JSC::regExpObjectMultiline):
-        (JSC::regExpObjectSource):
-
-2013-12-04  Daniel Bates  
-
-        [iOS] Enable Objective-C ARC when building JSC tools for iOS simulator
-        https://bugs.webkit.org/show_bug.cgi?id=125170
-
-        Reviewed by Geoffrey Garen.
-
-        * API/tests/testapi.mm:
-        * Configurations/ToolExecutable.xcconfig:
-
-2013-12-04  peavo@outlook.com  
-
-        Use ThreadingOnce class to encapsulate pthread_once functionality.
-        https://bugs.webkit.org/show_bug.cgi?id=125228
-
-        Reviewed by Brent Fulgham.
-
-        * runtime/InitializeThreading.cpp:
-        (JSC::initializeThreading):
-
-2013-12-04  Mark Lam  
-
-        Remove unneeded semicolons.
-        https://bugs.webkit.org/show_bug.cgi?id=125083.
-
-        Rubber-stamped by Filip Pizlo.
-
-        * debugger/Debugger.h:
-        (JSC::Debugger::detach):
-        (JSC::Debugger::sourceParsed):
-        (JSC::Debugger::exception):
-        (JSC::Debugger::atStatement):
-        (JSC::Debugger::callEvent):
-        (JSC::Debugger::returnEvent):
-        (JSC::Debugger::willExecuteProgram):
-        (JSC::Debugger::didExecuteProgram):
-        (JSC::Debugger::didReachBreakpoint):
-
-2013-12-04  Andy Estes  
-
-        [iOS] Build projects with $(ARCHS_STANDARD_32_64_BIT)
-        https://bugs.webkit.org/show_bug.cgi?id=125236
-
-        Reviewed by Sam Weinig.
-
-        $(ARCHS_STANDARD_32_64_BIT) is what we want for both device and simulator builds.
-
-        * Configurations/DebugRelease.xcconfig:
-
-2013-12-03  Filip Pizlo  
-
-        Infer constant closure variables
-        https://bugs.webkit.org/show_bug.cgi?id=124630
-
-        Reviewed by Geoffrey Garen.
-        
-        Captured variables that are assigned once (not counting op_enter's Undefined
-        initialization) and that are contained within a function that has thus far only been
-        entered once are now constant folded. It's pretty awesome.
-        
-        This involves a watchpoint on the assignment to variables and a watchpoint on entry
-        into the function. The former is reused from global variable constant inference and the
-        latter is reused from one-time closure inference.
-
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::dumpBytecode):
-        (JSC::CodeBlock::CodeBlock):
-        * bytecode/Instruction.h:
-        (JSC::Instruction::Instruction):
-        * bytecode/Opcode.h:
-        (JSC::padOpcodeName):
-        * bytecode/UnlinkedCodeBlock.h:
-        (JSC::UnlinkedInstruction::UnlinkedInstruction):
-        * bytecode/VariableWatchpointSet.h:
-        (JSC::VariableWatchpointSet::invalidate):
-        * bytecode/Watchpoint.h:
-        (JSC::WatchpointSet::invalidate):
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::addVar):
-        (JSC::BytecodeGenerator::BytecodeGenerator):
-        (JSC::BytecodeGenerator::emitInitLazyRegister):
-        (JSC::BytecodeGenerator::emitMove):
-        (JSC::BytecodeGenerator::emitNewFunctionInternal):
-        (JSC::BytecodeGenerator::createArgumentsIfNecessary):
-        * bytecompiler/BytecodeGenerator.h:
-        (JSC::BytecodeGenerator::addVar):
-        (JSC::BytecodeGenerator::watchableVariable):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::getLocal):
-        (JSC::DFG::ByteCodeParser::inferredConstant):
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        (JSC::DFG::ByteCodeParser::parse):
-        * dfg/DFGGraph.cpp:
-        (JSC::DFG::Graph::tryGetActivation):
-        (JSC::DFG::Graph::tryGetRegisters):
-        * dfg/DFGGraph.h:
-        * jit/JIT.cpp:
-        (JSC::JIT::privateCompileMainPass):
-        (JSC::JIT::privateCompileSlowCases):
-        * jit/JIT.h:
-        * jit/JITOpcodes.cpp:
-        (JSC::JIT::emit_op_mov):
-        (JSC::JIT::emit_op_captured_mov):
-        (JSC::JIT::emit_op_new_captured_func):
-        (JSC::JIT::emitSlow_op_captured_mov):
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::emit_op_mov):
-        (JSC::JIT::emit_op_captured_mov):
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-        * runtime/CommonSlowPaths.cpp:
-        (JSC::SLOW_PATH_DECL):
-        * runtime/CommonSlowPaths.h:
-        * runtime/ConstantMode.h: Added.
-        * runtime/JSGlobalObject.h:
-        * runtime/JSScope.cpp:
-        (JSC::abstractAccess):
-        * runtime/SymbolTable.cpp:
-        (JSC::SymbolTableEntry::prepareToWatch):
-
-2013-12-04  Brent Fulgham  
-
-        [Win] Unreviewed project file gardening.
-
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj: Remove deleted files from project.
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters: Put files in proper directory
-        folders to match the directory structure of the source code.
-
-2013-12-04  Joseph Pecoraro  
-
-        Unreviewed Windows Build Fix attempt after r160099.
-
-        * JavaScriptCore.vcxproj/copy-files.cmd:
-
-2013-12-04  Julien Brianceau  
-
-        REGRESSION (r160094): Fix lots of crashes for sh4 architecture.
-        https://bugs.webkit.org/show_bug.cgi?id=125227
-
-        Reviewed by Michael Saboff.
-
-        * llint/LowLevelInterpreter32_64.asm: Do not use t4 and t5 as they match a0 and a1.
-        * offlineasm/registers.rb: Add t7, t8 and t9 in register list for sh4 port.
-        * offlineasm/sh4.rb: Rearrange RegisterID list and add the missing ones.
-
-2013-12-03  Joseph Pecoraro  
-
-        Web Inspector: Push Remote Inspector debugging connection management into JavaScriptCore
-        https://bugs.webkit.org/show_bug.cgi?id=124613
-
-        Reviewed by Timothy Hatcher.
-
-        Move the ENABLE(REMOTE_INSPECTOR) remote debugger connection management
-        into JavaScriptCore (originally from WebKit/mac). Include enhancements:
-
-          * allow for different types of remote debuggable targets,
-            eventually at least a JSContext, WebView, WKView.
-          * allow debuggables to be registered and debugged on any thread. Unlike
-            WebViews, JSContexts may be run entirely off of the main thread.
-          * move the remote connection (XPC connection) itself off of the main thread,
-            it doesn't need to be on the main thread.
-
-        Make JSContext @class and JavaScriptCore::JSContextRef
-        "JavaScript" Remote Debuggables.
-
-        * inspector/remote/RemoteInspectorDebuggable.h: Added.
-        * inspector/remote/RemoteInspectorDebuggable.cpp: Added.
-        (Inspector::RemoteInspectorDebuggable::RemoteInspectorDebuggable):
-        (Inspector::RemoteInspectorDebuggable::~RemoteInspectorDebuggable):
-        (Inspector::RemoteInspectorDebuggable::init):
-        (Inspector::RemoteInspectorDebuggable::update):
-        (Inspector::RemoteInspectorDebuggable::setRemoteDebuggingAllowed):
-        (Inspector::RemoteInspectorDebuggable::info):
-        RemoteInspectorDebuggable defines a debuggable target. As long as
-        something creates a debuggable and is set to allow remote inspection
-        it will be listed in remote debuggers. For the different types of
-        debuggables (JavaScript and Web) there is different basic information
-        that may be listed.
-
-        * inspector/InspectorFrontendChannel.h: Added.
-        (Inspector::InspectorFrontendChannel::~InspectorFrontendChannel):
-        The only thing a debuggable needs for remote debugging is an
-        InspectorFrontendChannel a way to send messages to a remote frontend.
-        This class provides that method, and is vended to the
-        RemoteInspectorDebuggable when a remote connection is setup.
-
-        * inspector/remote/RemoteInspector.h: Added.
-        * inspector/remote/RemoteInspector.mm: Added.
-        Singleton, created at least when the first Debuggable is created.
-        This class manages the list of debuggables, any connection to a
-        remote debugger proxy (XPC service "com.apple.webinspector").
-
-        (Inspector::dispatchAsyncOnQueueSafeForAnyDebuggable):
-        (Inspector::RemoteInspector::shared):
-        (Inspector::RemoteInspector::RemoteInspector):
-        (Inspector::RemoteInspector::nextAvailableIdentifier):
-        (Inspector::RemoteInspector::registerDebuggable):
-        (Inspector::RemoteInspector::unregisterDebuggable):
-        (Inspector::RemoteInspector::updateDebuggable):
-        Debuggable management. When debuggables are added, removed, or updated
-        we stash a copy of the debuggable information and push an update to
-        debuggers. Stashing a copy of the information in the RemoteInspector
-        is a thread safe way to avoid walking over all debuggables to gather
-        the information when it is needed.
-
-        (Inspector::RemoteInspector::start):
-        (Inspector::RemoteInspector::stop):
-        Runtime API to enable / disable the feature.
-
-        (Inspector::RemoteInspector::listingForDebuggable):
-        (Inspector::RemoteInspector::pushListingNow):
-        (Inspector::RemoteInspector::pushListingSoon):
-        Pushing a listing to remote debuggers.
-
-        (Inspector::RemoteInspector::sendMessageToRemoteFrontend):
-        (Inspector::RemoteInspector::setupXPCConnectionIfNeeded):
-        (Inspector::RemoteInspector::xpcConnectionReceivedMessage):
-        (Inspector::RemoteInspector::xpcConnectionFailed):
-        (Inspector::RemoteInspector::xpcConnectionUnhandledMessage):
-        XPC setup, send, and receive handling.
-
-        (Inspector::RemoteInspector::updateHasActiveDebugSession):
-        Applications being debugged may want to know when a debug
-        session is active. This provides that notification.
-
-        (Inspector::RemoteInspector::receivedSetupMessage):
-        (Inspector::RemoteInspector::receivedDataMessage):
-        (Inspector::RemoteInspector::receivedDidCloseMessage):
-        (Inspector::RemoteInspector::receivedGetListingMessage):
-        (Inspector::RemoteInspector::receivedIndicateMessage):
-        (Inspector::RemoteInspector::receivedConnectionDiedMessage):
-        Dispatching incoming remote debugging protocol messages.
-        These are wrapping above the inspector protocol messages.
-
-        * inspector/remote/RemoteInspectorConstants.h: Added.
-        Protocol messages and dictionary keys inside the messages.
-
-        (Inspector::RemoteInspectorDebuggableInfo::RemoteInspectorDebuggableInfo):
-        * inspector/remote/RemoteInspectorDebuggableConnection.h: Added.
-        * inspector/remote/RemoteInspectorDebuggableConnection.mm: Added.
-        This is a connection between the RemoteInspector singleton and a RemoteInspectorDebuggable.
-
-        (Inspector::RemoteInspectorDebuggableConnection::RemoteInspectorDebuggableConnection):
-        (Inspector::RemoteInspectorDebuggableConnection::~RemoteInspectorDebuggableConnection):
-        Allow for dispatching messages on JavaScript debuggables on a dispatch_queue
-        instead of the main queue.
-
-        (Inspector::RemoteInspectorDebuggableConnection::destination):
-        (Inspector::RemoteInspectorDebuggableConnection::connectionIdentifier):
-        Needed in the remote debugging protocol to identify the remote debugger.
-
-        (Inspector::RemoteInspectorDebuggableConnection::dispatchSyncOnDebuggable):
-        (Inspector::RemoteInspectorDebuggableConnection::dispatchAsyncOnDebuggable):
-        (Inspector::RemoteInspectorDebuggableConnection::setup):
-        (Inspector::RemoteInspectorDebuggableConnection::closeFromDebuggable):
-        (Inspector::RemoteInspectorDebuggableConnection::close):
-        (Inspector::RemoteInspectorDebuggableConnection::sendMessageToBackend):
-        (Inspector::RemoteInspectorDebuggableConnection::sendMessageToFrontend):
-        The connection is a thin channel between the two sides that can be closed
-        from either side, so there is some logic around multi-threaded access.
-
-        * inspector/remote/RemoteInspectorXPCConnection.h: Added.
-        (Inspector::RemoteInspectorXPCConnection::Client::~Client):
-        * inspector/remote/RemoteInspectorXPCConnection.mm: Added.
-        (Inspector::RemoteInspectorXPCConnection::RemoteInspectorXPCConnection):
-        (Inspector::RemoteInspectorXPCConnection::~RemoteInspectorXPCConnection):
-        (Inspector::RemoteInspectorXPCConnection::close):
-        (Inspector::RemoteInspectorXPCConnection::deserializeMessage):
-        (Inspector::RemoteInspectorXPCConnection::handleEvent):
-        (Inspector::RemoteInspectorXPCConnection::sendMessage):
-        This is a connection between the RemoteInspector singleton and an XPC service
-        named "com.apple.webinspector". This handles serialization of the dictionary
-        messages to and from the service. The receiving is done on a non-main queue.
-
-        * API/JSContext.h:
-        * API/JSContext.mm:
-        (-[JSContext name]):
-        (-[JSContext setName:]):
-        ObjC API to enable/disable JSContext remote inspection and give a name.
-
-        * API/JSContextRef.h:
-        * API/JSContextRef.cpp:
-        (JSGlobalContextGetName):
-        (JSGlobalContextSetName):
-        C API to give a JSContext a name.
-
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::setName):
-        * runtime/JSGlobalObject.h:
-        (JSC::JSGlobalObject::name):
-        Shared handling of the APIs above.
-
-        * runtime/JSGlobalObjectDebuggable.cpp: Added.
-        (JSC::JSGlobalObjectDebuggable::JSGlobalObjectDebuggable):
-        (JSC::JSGlobalObjectDebuggable::name):
-        (JSC::JSGlobalObjectDebuggable::connect):
-        (JSC::JSGlobalObjectDebuggable::disconnect):
-        (JSC::JSGlobalObjectDebuggable::dispatchMessageFromRemoteFrontend):
-        * runtime/JSGlobalObjectDebuggable.h: Added.
-        Stub for the actual remote debugging implementation. We will push
-        down the appropriate WebCore/inspector peices suitable for debugging
-        just a JavaScript context.
-
-        * CMakeLists.txt:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * GNUmakefile.am:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        Update build files.
-
-2013-12-04  Michael Saboff  
-
-        Move the setting up of callee's callFrame from pushFrame to callToJavaScript thunk
-        https://bugs.webkit.org/show_bug.cgi?id=123999
-
-        Reviewed by Filip Pizlo.
-
-        Changed LLInt and/or JIT enabled ports to allocate the stack frame in the
-        callToJavaScript stub.  Added an additional stub, callToNativeFunction that
-        allocates a stack frame in a similar way for calling native entry points
-        that take a single ExecState* argument.  These stubs are implemented
-        using common macros in LowLevelInterpreter{32_64,64}.asm.  There are also
-        Windows X86 and X86-64 versions in the corresponding JitStubsXX.h.
-        The stubs allocate and create a sentinel frame, then create the callee's
-        frame, populating  the header and arguments from the passed in ProtoCallFrame*.
-        It is assumed that the caller of either stub does a check for enough stack space
-        via JSStack::entryCheck().
-
-        For ports using the C-Loop interpreter, the prior method for allocating stack
-        frame and invoking functions is used, namely with JSStack::pushFrame() and
-        ::popFrame().
-
-        Made spelling changes "sentinal" -> "sentinel".
-
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * interpreter/CachedCall.h:
-        (JSC::CachedCall::CachedCall):
-        (JSC::CachedCall::setThis):
-        (JSC::CachedCall::setArgument):
-        * interpreter/CallFrameClosure.h:
-        (JSC::CallFrameClosure::resetCallFrame):
-        * interpreter/Interpreter.cpp:
-        (JSC::Interpreter::execute):
-        (JSC::Interpreter::executeCall):
-        (JSC::Interpreter::executeConstruct):
-        (JSC::Interpreter::prepareForRepeatCall):
-        * interpreter/Interpreter.h:
-        * interpreter/JSStack.h:
-        * interpreter/JSStackInlines.h:
-        (JSC::JSStack::entryCheck):
-        (JSC::JSStack::pushFrame):
-        (JSC::JSStack::popFrame):
-        * interpreter/ProtoCallFrame.cpp: Added.
-        (JSC::ProtoCallFrame::init):
-        * interpreter/ProtoCallFrame.h: Added.
-        (JSC::ProtoCallFrame::codeBlock):
-        (JSC::ProtoCallFrame::setCodeBlock):
-        (JSC::ProtoCallFrame::setScope):
-        (JSC::ProtoCallFrame::setCallee):
-        (JSC::ProtoCallFrame::argumentCountIncludingThis):
-        (JSC::ProtoCallFrame::argumentCount):
-        (JSC::ProtoCallFrame::setArgumentCountIncludingThis):
-        (JSC::ProtoCallFrame::setPaddedArgsCount):
-        (JSC::ProtoCallFrame::clearCurrentVPC):
-        (JSC::ProtoCallFrame::setThisValue):
-        (JSC::ProtoCallFrame::setArgument):
-        * jit/JITCode.cpp:
-        (JSC::JITCode::execute):
-        * jit/JITCode.h:
-        * jit/JITOperations.cpp:
-        * jit/JITStubs.h:
-        * jit/JITStubsMSVC64.asm:
-        * jit/JITStubsX86.h:
-        * llint/LLIntOffsetsExtractor.cpp:
-        * llint/LLIntThunks.h:
-        * llint/LowLevelInterpreter.asm:
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-        * runtime/ArgList.h:
-        (JSC::ArgList::data):
-        * runtime/JSArray.cpp:
-        (JSC::AVLTreeAbstractorForArrayCompare::compare_key_key):
-        * runtime/StringPrototype.cpp:
-        (JSC::replaceUsingRegExpSearch):
-
-2013-12-04  László Langó  
-
-        Remove stdio.h from JSC files.
-        https://bugs.webkit.org/show_bug.cgi?id=125220
-
-        Reviewed by Michael Saboff.
-
-        * interpreter/VMInspector.cpp:
-        * jit/JITArithmetic.cpp:
-        * jit/JITArithmetic32_64.cpp:
-        * jit/JITCall.cpp:
-        * jit/JITCall32_64.cpp:
-        * jit/JITPropertyAccess.cpp:
-        * jit/JITPropertyAccess32_64.cpp:
-        * runtime/Completion.cpp:
-        * runtime/IndexingType.cpp:
-        * runtime/Lookup.h:
-        * runtime/Operations.cpp:
-        * runtime/Options.cpp:
-        * runtime/RegExp.cpp:
-
-2013-12-04  László Langó  
-
-        Avoid to add zero offset in BaseIndex.
-        https://bugs.webkit.org/show_bug.cgi?id=125215
-
-        Reviewed by Michael Saboff.
-
-        When using cloop do not generate offsets additions for BaseIndex if the offset is zero.
-
-        * offlineasm/cloop.rb:
-
-2013-12-04  Peter Molnar  
-
-        Fix !ENABLE(JAVASCRIPT_DEBUGGER) build.
-        https://bugs.webkit.org/show_bug.cgi?id=125083
-
-        Reviewed by Mark Lam.
-
-        * debugger/Debugger.cpp:
-        * debugger/Debugger.h:
-        (JSC::Debugger::Debugger):
-        (JSC::Debugger::needsOpDebugCallbacks):
-        (JSC::Debugger::needsExceptionCallbacks):
-        (JSC::Debugger::detach):
-        (JSC::Debugger::sourceParsed):
-        (JSC::Debugger::exception):
-        (JSC::Debugger::atStatement):
-        (JSC::Debugger::callEvent):
-        (JSC::Debugger::returnEvent):
-        (JSC::Debugger::willExecuteProgram):
-        (JSC::Debugger::didExecuteProgram):
-        (JSC::Debugger::didReachBreakpoint):
-        * debugger/DebuggerPrimitives.h:
-        * jit/JITOpcodes.cpp:
-        (JSC::JIT::emit_op_debug):
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::emit_op_debug):
-        * llint/LLIntOfflineAsmConfig.h:
-        * llint/LowLevelInterpreter.asm:
-
-2013-12-03  Mark Lam  
-
-        testapi test crashes on Windows in WTF::Vector::size().
-        https://bugs.webkit.org/show_bug.cgi?id=121972.
-
-        Reviewed by Brent Fulgham.
-
-        * interpreter/JSStack.cpp:
-        (JSC::JSStack::~JSStack):
-        - Reverting the change from r160004 since it's better to fix OSAllocatorWin
-          to be consistent with OSAllocatorPosix.
-
-2013-12-03  Mark Lam  
-
-        Fix LLINT_C_LOOP build for Win64.
-        https://bugs.webkit.org/show_bug.cgi?id=125186.
-
-        Reviewed by Michael Saboff.
-
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * jit/JITOperationsMSVC64.cpp: Added.
-        (JSC::getHostCallReturnValueWithExecState):
-        - Win64 will build JITStubMSVC64.asm even when !ENABLE(JIT). This results
-          in a linkage error due to a missing getHostCallReturnValueWithExecState().
-          So, we add a stub getHostCallReturnValueWithExecState() here to satisfy
-          that linkage. This function will never be called.
-          The alternative to providing such a stub is to make the MSVC project
-          recognize if the JIT is enabled or not, and exclude JITStubMSVC64.asm
-          if it's not enabled. We don't currently set ENABLE(JIT) via the MSVC
-          project and the work to do that is too much trouble for what we're trying
-          to achieve here. So, we're opting for this simpler workaround instead.
-
-        * llint/LowLevelInterpreter.asm:
-        * llint/LowLevelInterpreter.cpp:
-        (JSC::CLoop::execute):
-        - Don't build callToJavaScript if we're building the C loop. Otherwise,
-          the C loop won't build if !ENABLE(COMPUTE_GOTO_OPCODES). 
-
-2013-12-03  Michael Saboff  
-
-        ARM64: Crash in JIT code due to improper reuse of cached memory temp register
-        https://bugs.webkit.org/show_bug.cgi?id=125181
-
-        Reviewed by Geoffrey Garen.
-
-        Changed load8() and load() to invalidate the memory temp CachedTempRegister when the
-        destination of an absolute load is the memory temp register since the source address
-        is also the memory temp register.  Change branch{8,32,64} of an AbsoluteAddress with
-        a register to use the dataTempRegister as the destinate of the absolute load to
-        reduce the chance that we need to invalidate the memory temp register cache.
-        In the process, found and fixed an outright bug in branch8() where we'd load into
-        the data temp register and then compare and branch on the memory temp register.
-
-        * assembler/MacroAssemblerARM64.h:
-        (JSC::MacroAssemblerARM64::load8):
-        (JSC::MacroAssemblerARM64::branch32):
-        (JSC::MacroAssemblerARM64::branch64):
-        (JSC::MacroAssemblerARM64::branch8):
-        (JSC::MacroAssemblerARM64::load):
-
-2013-12-03  Michael Saboff  
-
-        jit/JITArithmetic.cpp doesn't build for non-X86 ports
-        https://bugs.webkit.org/show_bug.cgi?id=125185
-
-        Rubber stamped by Mark Hahnenberg.
-
-        Removed unused declarations and related UNUSED_PARAM().
-
-        * jit/JITArithmetic.cpp:
-        (JSC::JIT::emit_op_mod):
-
-2013-12-03  Filip Pizlo  
-
-        ObjectAllocationProfile is racy and the DFG should be cool with that
-        https://bugs.webkit.org/show_bug.cgi?id=125172
-        
-
-        Reviewed by Mark Hahnenberg.
-        
-        We would previously sometimes get a null Structure because checking if the profile is non-null and loading
-        the structure from it were two separate operations.
-
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::::executeEffects):
-        * dfg/DFGAbstractValue.cpp:
-        (JSC::DFG::AbstractValue::setFuturePossibleStructure):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * runtime/JSFunction.h:
-        (JSC::JSFunction::allocationProfile):
-        (JSC::JSFunction::allocationStructure):
-
-2013-12-03  peavo@outlook.com  
-
-        testapi test crashes on Windows in WTF::Vector::size()
-        https://bugs.webkit.org/show_bug.cgi?id=121972
-
-        Reviewed by Michael Saboff.
-
-        The reason for the crash is that the wrong memory block is decommitted.
-        This can happen if no memory has been committed in the reserved block before the JSStack object is destroyed.
-        In the JSStack destructor, the pointer to decommit then points to the end of the block (or the start of the next), and the decommit size is zero.
-        If there is a block just after the block we are trying to decommit, this block will be decommitted, since Windows will decommit the whole block,
-        if the decommit size is zero (see VirtualFree). When somebody tries to read/write to this block later, we crash.
-
-        * interpreter/JSStack.cpp:
-        (JSC::JSStack::~JSStack): Don't decommit memory if nothing has been committed.
-
-2013-12-03  László Langó  
-
-        Guard JIT include.
-        https://bugs.webkit.org/show_bug.cgi?id=125063
-
-        Reviewed by Filip Pizlo.
-
-        * llint/LLIntThunks.cpp:
-
-2013-12-03  Julien Brianceau  
-
-        Merge mips and arm/sh4 paths in nativeForGenerator and privateCompileCTINativeCall functions.
-        https://bugs.webkit.org/show_bug.cgi?id=125067
-
-        Reviewed by Michael Saboff.
-
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::privateCompileCTINativeCall):
-        * jit/ThunkGenerators.cpp:
-        (JSC::nativeForGenerator):
-
-2013-12-02  Mark Lam  
-
-        Build failure when disabling JIT, YARR_JIT, and ASSEMBLER.
-        https://bugs.webkit.org/show_bug.cgi?id=123809.
-
-        Reviewed by Geoffrey Garen.
-
-        Also fixed build when disabling the DISASSEMBLER.
-        Added some needed #if's and some comments.
-
-        * assembler/LinkBuffer.cpp:
-        (JSC::LinkBuffer::finalizeCodeWithDisassembly):
-        * dfg/DFGDisassembler.cpp:
-        * dfg/DFGDisassembler.h:
-        (JSC::DFG::Disassembler::Disassembler):
-        (JSC::DFG::Disassembler::setStartOfCode):
-        (JSC::DFG::Disassembler::setForBlockIndex):
-        (JSC::DFG::Disassembler::setForNode):
-        (JSC::DFG::Disassembler::setEndOfMainPath):
-        (JSC::DFG::Disassembler::setEndOfCode):
-        (JSC::DFG::Disassembler::dump):
-        (JSC::DFG::Disassembler::reportToProfiler):
-        * disassembler/Disassembler.cpp:
-        * disassembler/X86Disassembler.cpp:
-        * jit/FPRInfo.h:
-        * jit/GPRInfo.h:
-        * jit/JITDisassembler.cpp:
-        * jit/JITDisassembler.h:
-        (JSC::JITDisassembler::JITDisassembler):
-        (JSC::JITDisassembler::setStartOfCode):
-        (JSC::JITDisassembler::setForBytecodeMainPath):
-        (JSC::JITDisassembler::setForBytecodeSlowPath):
-        (JSC::JITDisassembler::setEndOfSlowPath):
-        (JSC::JITDisassembler::setEndOfCode):
-        (JSC::JITDisassembler::dump):
-        (JSC::JITDisassembler::reportToProfiler):
-
-2013-12-02  Filip Pizlo  
-
-        Baseline JIT calls to CommonSlowPaths shouldn't restore the last result
-        https://bugs.webkit.org/show_bug.cgi?id=125107
-
-        Reviewed by Mark Hahnenberg.
-
-        Just killing dead code.
-
-        * jit/JITArithmetic.cpp:
-        (JSC::JIT::emitSlow_op_negate):
-        (JSC::JIT::emitSlow_op_lshift):
-        (JSC::JIT::emitSlow_op_rshift):
-        (JSC::JIT::emitSlow_op_urshift):
-        (JSC::JIT::emitSlow_op_bitand):
-        (JSC::JIT::emitSlow_op_inc):
-        (JSC::JIT::emitSlow_op_dec):
-        (JSC::JIT::emitSlow_op_mod):
-        (JSC::JIT::emit_op_mod):
-        (JSC::JIT::compileBinaryArithOpSlowCase):
-        (JSC::JIT::emitSlow_op_div):
-        * jit/JITArithmetic32_64.cpp:
-        (JSC::JIT::emitSlow_op_negate):
-        (JSC::JIT::emitSlow_op_lshift):
-        (JSC::JIT::emitRightShiftSlowCase):
-        (JSC::JIT::emitSlow_op_bitand):
-        (JSC::JIT::emitSlow_op_bitor):
-        (JSC::JIT::emitSlow_op_bitxor):
-        (JSC::JIT::emitSlow_op_inc):
-        (JSC::JIT::emitSlow_op_dec):
-        (JSC::JIT::emitSlow_op_add):
-        (JSC::JIT::emitSlow_op_sub):
-        (JSC::JIT::emitSlow_op_mul):
-        (JSC::JIT::emitSlow_op_div):
-        * jit/JITOpcodes.cpp:
-        (JSC::JIT::emit_op_strcat):
-        (JSC::JIT::emitSlow_op_get_callee):
-        (JSC::JIT::emitSlow_op_create_this):
-        (JSC::JIT::emitSlow_op_to_this):
-        (JSC::JIT::emitSlow_op_to_primitive):
-        (JSC::JIT::emitSlow_op_not):
-        (JSC::JIT::emitSlow_op_bitxor):
-        (JSC::JIT::emitSlow_op_bitor):
-        (JSC::JIT::emitSlow_op_stricteq):
-        (JSC::JIT::emitSlow_op_nstricteq):
-        (JSC::JIT::emitSlow_op_to_number):
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::emitSlow_op_to_primitive):
-        (JSC::JIT::emitSlow_op_not):
-        (JSC::JIT::emitSlow_op_stricteq):
-        (JSC::JIT::emitSlow_op_nstricteq):
-        (JSC::JIT::emitSlow_op_to_number):
-        (JSC::JIT::emitSlow_op_get_callee):
-        (JSC::JIT::emitSlow_op_create_this):
-        (JSC::JIT::emitSlow_op_to_this):
-
-2013-12-01  Filip Pizlo  
-
-        Stores to local captured variables should be intercepted
-        https://bugs.webkit.org/show_bug.cgi?id=124883
-
-        Reviewed by Mark Hahnenberg.
-        
-        Previously, in bytecode, you could assign to a captured variable just as you would
-        assign to any other kind of variable. This complicates closure variable constant
-        inference because we don't have any place where we can intercept stores to captured
-        variables in the LLInt.
-        
-        This patch institutes a policy that only certain instructions can store to captured
-        variables. If you interpret those instructions and you are required to notifyWrite()
-        then you need to check if the relevant variable is captured. Those instructions are
-        tracked in CodeBlock.cpp's VerifyCapturedDef. The main one is simply op_captured_mov.
-        In the future, we'll probably modify those instructions to have a pointer directly to
-        the VariableWatchpointSet; but for now we just introduce the captured instructions as
-        placeholders.
-        
-        In order to validate that the placeholders are inserted correctly, this patch improves
-        the CodeBlock validation to be able to inspect every def in the bytecode. To do that,
-        this patch refactors the liveness analysis' use/def calculator to be reusable; it now
-        takes a functor for each use or def.
-        
-        In the process of refactoring the liveness analysis, I noticed that op_enter was
-        claiming to def all callee registers. That's wrong; it only defs the non-temporary
-        variables. Making that change revealed preexisting bugs in the liveness analysis, since
-        now the validator would pick up cases where the bytecode claimed to use a temporary and
-        the def calculator never noticed the definition (or the converse - where the bytecode
-        was actually not using a temporary but the liveness analysis thought that it was a
-        use). This patch fixes a few of those bugs.
-
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * bytecode/BytecodeLivenessAnalysis.cpp:
-        (JSC::stepOverInstruction):
-        * bytecode/BytecodeUseDef.h: Added.
-        (JSC::computeUsesForBytecodeOffset):
-        (JSC::computeDefsForBytecodeOffset):
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::dumpBytecode):
-        (JSC::CodeBlock::isCaptured):
-        (JSC::CodeBlock::validate):
-        * bytecode/CodeBlock.h:
-        * bytecode/Opcode.h:
-        (JSC::padOpcodeName):
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::BytecodeGenerator):
-        (JSC::BytecodeGenerator::resolveCallee):
-        (JSC::BytecodeGenerator::emitMove):
-        (JSC::BytecodeGenerator::isCaptured):
-        (JSC::BytecodeGenerator::local):
-        (JSC::BytecodeGenerator::constLocal):
-        (JSC::BytecodeGenerator::emitNewFunction):
-        (JSC::BytecodeGenerator::emitLazyNewFunction):
-        (JSC::BytecodeGenerator::emitNewFunctionInternal):
-        * bytecompiler/BytecodeGenerator.h:
-        (JSC::Local::Local):
-        (JSC::Local::isCaptured):
-        (JSC::Local::captureMode):
-        (JSC::BytecodeGenerator::captureMode):
-        (JSC::BytecodeGenerator::emitNode):
-        (JSC::BytecodeGenerator::pushOptimisedForIn):
-        * bytecompiler/NodesCodegen.cpp:
-        (JSC::PostfixNode::emitResolve):
-        (JSC::PrefixNode::emitResolve):
-        (JSC::ReadModifyResolveNode::emitBytecode):
-        (JSC::AssignResolveNode::emitBytecode):
-        (JSC::ConstDeclNode::emitCodeSingle):
-        (JSC::ForInNode::emitBytecode):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * dfg/DFGCapabilities.cpp:
-        (JSC::DFG::capabilityLevel):
-        * jit/JIT.cpp:
-        (JSC::JIT::privateCompileMainPass):
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-        * runtime/SymbolTable.h:
-        (JSC::SymbolTable::isCaptured):
-
-2013-12-02  Filip Pizlo  
-
-        Instead of watchpointing activation allocation, we should watchpoint entry into functions that have captured variables
-        https://bugs.webkit.org/show_bug.cgi?id=125052
-
-        Reviewed by Mark Hahnenberg.
-        
-        This makes us watch function entry rather than activation creation. We only incur the
-        costs of doing so for functions that have captured variables, and only on the first two
-        entries into the function. This means that closure variable constant inference will
-        naturally work even for local uses of the captured variable, like:
-        
-            (function(){
-                var blah = 42;
-                ... // stuff
-                function () { ... blah /* we can fold this to 42 */ }
-                ... blah // we can also fold this to 42.
-            })();
-        
-        Previously, only the nested use would have been foldable.
-
-        * bytecode/BytecodeLivenessAnalysis.cpp:
-        (JSC::computeUsesForBytecodeOffset):
-        (JSC::computeDefsForBytecodeOffset):
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::dumpBytecode):
-        * bytecode/Opcode.h:
-        (JSC::padOpcodeName):
-        * bytecode/Watchpoint.h:
-        (JSC::WatchpointSet::touch):
-        (JSC::InlineWatchpointSet::touch):
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::BytecodeGenerator):
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::::executeEffects):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * dfg/DFGCapabilities.cpp:
-        (JSC::DFG::capabilityLevel):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        * dfg/DFGNode.h:
-        (JSC::DFG::Node::hasSymbolTable):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        * dfg/DFGSafeToExecute.h:
-        (JSC::DFG::safeToExecute):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGWatchpointCollectionPhase.cpp:
-        (JSC::DFG::WatchpointCollectionPhase::handle):
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        * jit/JIT.cpp:
-        (JSC::JIT::privateCompileMainPass):
-        * jit/JIT.h:
-        * jit/JITOpcodes.cpp:
-        (JSC::JIT::emit_op_touch_entry):
-        * llint/LowLevelInterpreter.asm:
-        * runtime/CommonSlowPaths.cpp:
-        (JSC::SLOW_PATH_DECL):
-        * runtime/CommonSlowPaths.h:
-        * runtime/JSActivation.h:
-        (JSC::JSActivation::create):
-        * runtime/SymbolTable.cpp:
-        (JSC::SymbolTable::SymbolTable):
-        * runtime/SymbolTable.h:
-
-2013-12-02  Nick Diego Yamane  
-
-        [JSC] Get rid of some unused parameters in LLIntSlowPaths.cpp macros
-        https://bugs.webkit.org/show_bug.cgi?id=125075
-
-        Reviewed by Michael Saboff.
-
-        * llint/LLIntSlowPaths.cpp:
-        (JSC::LLInt::handleHostCall): added UNUSED_PARAM(pc).
-        (JSC::LLInt::setUpCall): Doesn't pass 'pc' to LLINT_CALL macros.
-        (JSC::LLInt::LLINT_SLOW_PATH_DECL): Ditto.
-
-2013-12-02  László Langó  
-
-        Remove stdio.h from JSC files.
-        https://bugs.webkit.org/show_bug.cgi?id=125066
-
-        Reviewed by Michael Saboff.
-
-        Remove stdio.h, when it is not necessary to be included.
-
-        * bytecode/CodeBlock.cpp:
-        * bytecode/StructureSet.h:
-        * profiler/LegacyProfiler.cpp:
-        * profiler/Profile.cpp:
-        * profiler/ProfileNode.cpp:
-        * yarr/YarrInterpreter.cpp:
-
-2013-12-02  László Langó  
-
-        Unused include files when building without JIT.
-        https://bugs.webkit.org/show_bug.cgi?id=125062
-
-        Reviewed by Michael Saboff.
-
-        We should organize the includes, and guard JIT methods
-        in ValueRecovery.
-
-        * bytecode/ValueRecovery.cpp: Guard include files.
-        * bytecode/ValueRecovery.h: Guard JIT methods.
-
-2013-12-02  Balazs Kilvady  
-
-        [MIPS] Small stack frame causes regressions.
-        https://bugs.webkit.org/show_bug.cgi?id=124945
-
-        Reviewed by Michael Saboff.
-
-        Fix stack space for LLInt on MIPS.
-
-        * llint/LowLevelInterpreter32_64.asm:
-
-2013-12-02  Brian J. Burg  
-
-        jsc: implement a native readFile function
-        https://bugs.webkit.org/show_bug.cgi?id=125059
-
-        Reviewed by Filip Pizlo.
-
-        This adds a native readFile() function to jsc, used to slurp
-        an entire file into a JavaScript string.
-
-        * jsc.cpp:
-        (GlobalObject::finishCreation): Add readFile() to globals.
-        (functionReadFile): Added.
-
-2013-12-02  László Langó  
-
-        JSC does not build if OPCODE_STATS is enabled.
-        https://bugs.webkit.org/show_bug.cgi?id=125011
-
-        Reviewed by Filip Pizlo.
-
-        * bytecode/Opcode.cpp:
-
-2013-11-29  Filip Pizlo  
-
-        Finally remove those DFG_ENABLE things
-        https://bugs.webkit.org/show_bug.cgi?id=125025
-
-        Rubber stamped by Sam Weinig.
-        
-        This removes a bunch of unused and untested insanity.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::tallyFrequentExitSites):
-        * dfg/DFGArgumentsSimplificationPhase.cpp:
-        (JSC::DFG::ArgumentsSimplificationPhase::run):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::injectLazyOperandSpeculation):
-        (JSC::DFG::ByteCodeParser::getArrayModeConsideringSlowPath):
-        (JSC::DFG::ByteCodeParser::makeSafe):
-        (JSC::DFG::ByteCodeParser::makeDivSafe):
-        (JSC::DFG::ByteCodeParser::handleCall):
-        (JSC::DFG::ByteCodeParser::handleInlining):
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        (JSC::DFG::ByteCodeParser::linkBlock):
-        (JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
-        (JSC::DFG::ByteCodeParser::parseCodeBlock):
-        (JSC::DFG::ByteCodeParser::parse):
-        (JSC::DFG::parse):
-        * dfg/DFGCFGSimplificationPhase.cpp:
-        (JSC::DFG::CFGSimplificationPhase::run):
-        (JSC::DFG::CFGSimplificationPhase::convertToJump):
-        (JSC::DFG::CFGSimplificationPhase::fixJettisonedPredecessors):
-        * dfg/DFGCSEPhase.cpp:
-        (JSC::DFG::CSEPhase::endIndexForPureCSE):
-        (JSC::DFG::CSEPhase::eliminateIrrelevantPhantomChildren):
-        (JSC::DFG::CSEPhase::setReplacement):
-        (JSC::DFG::CSEPhase::eliminate):
-        (JSC::DFG::CSEPhase::performNodeCSE):
-        * dfg/DFGCommon.h:
-        (JSC::DFG::verboseCompilationEnabled):
-        (JSC::DFG::logCompilationChanges):
-        (JSC::DFG::shouldDumpGraphAtEachPhase):
-        * dfg/DFGConstantFoldingPhase.cpp:
-        (JSC::DFG::ConstantFoldingPhase::foldConstants):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        (JSC::DFG::FixupPhase::injectInt32ToDoubleNode):
-        * dfg/DFGInPlaceAbstractState.cpp:
-        (JSC::DFG::InPlaceAbstractState::initialize):
-        (JSC::DFG::InPlaceAbstractState::endBasicBlock):
-        (JSC::DFG::InPlaceAbstractState::mergeStateAtTail):
-        (JSC::DFG::InPlaceAbstractState::mergeToSuccessors):
-        * dfg/DFGJITCompiler.cpp:
-        (JSC::DFG::JITCompiler::compileBody):
-        (JSC::DFG::JITCompiler::link):
-        * dfg/DFGOSRExitCompiler.cpp:
-        * dfg/DFGOSRExitCompiler32_64.cpp:
-        (JSC::DFG::OSRExitCompiler::compileExit):
-        * dfg/DFGOSRExitCompiler64.cpp:
-        (JSC::DFG::OSRExitCompiler::compileExit):
-        * dfg/DFGOSRExitCompilerCommon.cpp:
-        (JSC::DFG::adjustAndJumpToTarget):
-        * dfg/DFGPredictionInjectionPhase.cpp:
-        (JSC::DFG::PredictionInjectionPhase::run):
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::run):
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        (JSC::DFG::PredictionPropagationPhase::propagateForward):
-        (JSC::DFG::PredictionPropagationPhase::propagateBackward):
-        (JSC::DFG::PredictionPropagationPhase::doRoundOfDoubleVoting):
-        * dfg/DFGScoreBoard.h:
-        (JSC::DFG::ScoreBoard::use):
-        * dfg/DFGSlowPathGenerator.h:
-        (JSC::DFG::SlowPathGenerator::generate):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::terminateSpeculativeExecution):
-        (JSC::DFG::SpeculativeJIT::runSlowPathGenerators):
-        (JSC::DFG::SpeculativeJIT::dump):
-        (JSC::DFG::SpeculativeJIT::compileCurrentBlock):
-        (JSC::DFG::SpeculativeJIT::checkGeneratedTypeForToInt32):
-        * dfg/DFGSpeculativeJIT.h:
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::fillSpeculateInt32Internal):
-        (JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
-        (JSC::DFG::SpeculativeJIT::fillSpeculateCell):
-        (JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::fillSpeculateInt32Internal):
-        (JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
-        (JSC::DFG::SpeculativeJIT::fillSpeculateCell):
-        (JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGVariableEventStream.cpp:
-        (JSC::DFG::VariableEventStream::reconstruct):
-        * dfg/DFGVariableEventStream.h:
-        (JSC::DFG::VariableEventStream::appendAndLog):
-        * dfg/DFGVirtualRegisterAllocationPhase.cpp:
-        (JSC::DFG::VirtualRegisterAllocationPhase::run):
-        * jit/JIT.cpp:
-        (JSC::JIT::privateCompile):
-
-2013-11-29  Filip Pizlo  
-
-        FTL IC should nop-fill to make up the difference between the actual IC size and the requested patchpoint size
-        https://bugs.webkit.org/show_bug.cgi?id=124960
-
-        Reviewed by Sam Weinig.
-
-        * assembler/LinkBuffer.h:
-        (JSC::LinkBuffer::size):
-        * assembler/X86Assembler.h:
-        (JSC::X86Assembler::fillNops):
-        * dfg/DFGDisassembler.cpp:
-        (JSC::DFG::Disassembler::dumpHeader):
-        * ftl/FTLCompile.cpp:
-        (JSC::FTL::generateICFastPath):
-        * jit/JITDisassembler.cpp:
-        (JSC::JITDisassembler::dumpHeader):
-
-2013-11-29  Julien Brianceau  
-
-        Use moveDoubleToInts in SpecializedThunkJIT::returnDouble for non-X86 JSVALUE32_64 ports.
-        https://bugs.webkit.org/show_bug.cgi?id=124936
-
-        Reviewed by Zoltan Herczeg.
-
-        The moveDoubleToInts implementations in ARM, MIPS and SH4 macro assemblers do not clobber
-        src FPRegister and are likely to be more efficient than the current generic implementation
-        using the stack.
-
-        * jit/SpecializedThunkJIT.h:
-        (JSC::SpecializedThunkJIT::returnDouble):
-
-2013-11-29  Julien Brianceau  
-
-        Merge arm and sh4 paths in nativeForGenerator and privateCompileCTINativeCall functions.
-        https://bugs.webkit.org/show_bug.cgi?id=124892
-
-        Reviewed by Zoltan Herczeg.
-
-        * assembler/MacroAssemblerSH4.h:
-        (JSC::MacroAssemblerSH4::call): Pick a scratch register instead of getting it as a
-        parameter. The sh4 port was the only one to have this call(Address, RegisterID) prototype.
-        * jit/JITOpcodes32_64.cpp:
-        (JSC::JIT::privateCompileCTINativeCall): Use argumentGPRx and merge arm and sh4 paths.
-        * jit/ThunkGenerators.cpp:
-        (JSC::nativeForGenerator): Use argumentGPRx and merge arm and sh4 paths.
-
-2013-11-28  Nadav Rotem  
-
-        Revert the X86 assembler peephole changes
-        https://bugs.webkit.org/show_bug.cgi?id=124988
-
-        Reviewed by Csaba Osztrogonác.
-
-        * assembler/MacroAssemblerX86.h:
-        (JSC::MacroAssemblerX86::add32):
-        (JSC::MacroAssemblerX86::add64):
-        (JSC::MacroAssemblerX86::or32):
-        * assembler/MacroAssemblerX86Common.h:
-        (JSC::MacroAssemblerX86Common::add32):
-        (JSC::MacroAssemblerX86Common::or32):
-        (JSC::MacroAssemblerX86Common::branchAdd32):
-        * assembler/MacroAssemblerX86_64.h:
-        (JSC::MacroAssemblerX86_64::add32):
-        (JSC::MacroAssemblerX86_64::or32):
-        (JSC::MacroAssemblerX86_64::add64):
-        (JSC::MacroAssemblerX86_64::or64):
-        (JSC::MacroAssemblerX86_64::xor64):
-
-2013-11-28  Antti Koivisto  
-
-        Remove feature: CSS variables
-        https://bugs.webkit.org/show_bug.cgi?id=114119
-
-        Reviewed by Andreas Kling.
-
-        * Configurations/FeatureDefines.xcconfig:
-
-2013-11-28  Peter Gal  
-
-        Typo fix after r159834 to fix 32 bit builds.
-
-        Reviewed by Csaba Osztrogonác.
-
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-
-2013-11-27  Nadav Rotem  
-
-        Add a bunch of early exits and local optimizations to the x86 assembler.
-        https://bugs.webkit.org/show_bug.cgi?id=124904
-
-        Reviewed by Filip Pizlo.
-
-        * assembler/MacroAssemblerX86.h:
-        (JSC::MacroAssemblerX86::add32):
-        (JSC::MacroAssemblerX86::add64):
-        (JSC::MacroAssemblerX86::or32):
-        * assembler/MacroAssemblerX86Common.h:
-        (JSC::MacroAssemblerX86Common::add32):
-        (JSC::MacroAssemblerX86Common::or32):
-        * assembler/MacroAssemblerX86_64.h:
-        (JSC::MacroAssemblerX86_64::add32):
-        (JSC::MacroAssemblerX86_64::or32):
-        (JSC::MacroAssemblerX86_64::add64):
-        (JSC::MacroAssemblerX86_64::or64):
-        (JSC::MacroAssemblerX86_64::xor64):
-
-2013-11-27  Filip Pizlo  
-
-        Infer one-time scopes
-        https://bugs.webkit.org/show_bug.cgi?id=124812
-
-        Reviewed by Oliver Hunt.
-        
-        This detects JSActivations that are created only once. The JSActivation pointer is then
-        baked into the machine code.
-        
-        This takes advantage of the one-time scope inference to reduce the number of
-        indirections needed to get to a closure variable in case where the scope is only
-        allocated once. This isn't really a speed-up since in the common case the total number
-        of instruction bytes needed to load the scope from the stack is about equal to the
-        number of instruction bytes needed to materialize the absolute address of a scoped
-        variable. But, this is a necessary prerequisite to
-        https://bugs.webkit.org/show_bug.cgi?id=124630, so it's probably a good idea anyway.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::dumpBytecode):
-        (JSC::CodeBlock::CodeBlock):
-        (JSC::CodeBlock::finalizeUnconditionally):
-        * bytecode/Instruction.h:
-        * bytecode/Opcode.h:
-        (JSC::padOpcodeName):
-        * bytecode/Watchpoint.h:
-        (JSC::WatchpointSet::notifyWrite):
-        (JSC::InlineWatchpointSet::notifyWrite):
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::emitResolveScope):
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::::executeEffects):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * dfg/DFGCSEPhase.cpp:
-        (JSC::DFG::CSEPhase::scopedVarLoadElimination):
-        (JSC::DFG::CSEPhase::scopedVarStoreElimination):
-        (JSC::DFG::CSEPhase::getLocalLoadElimination):
-        (JSC::DFG::CSEPhase::setLocalStoreElimination):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        * dfg/DFGGraph.cpp:
-        (JSC::DFG::Graph::tryGetRegisters):
-        * dfg/DFGGraph.h:
-        * dfg/DFGNode.h:
-        (JSC::DFG::Node::varNumber):
-        (JSC::DFG::Node::hasSymbolTable):
-        (JSC::DFG::Node::symbolTable):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        * dfg/DFGSafeToExecute.h:
-        (JSC::DFG::safeToExecute):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGWatchpointCollectionPhase.cpp:
-        (JSC::DFG::WatchpointCollectionPhase::handle):
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        (JSC::FTL::LowerDFGToLLVM::compileGetClosureRegisters):
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-        * runtime/JSActivation.h:
-        (JSC::JSActivation::create):
-        * runtime/JSScope.cpp:
-        (JSC::abstractAccess):
-        (JSC::JSScope::abstractResolve):
-        * runtime/JSScope.h:
-        (JSC::ResolveOp::ResolveOp):
-        * runtime/JSVariableObject.h:
-        (JSC::JSVariableObject::registers):
-        * runtime/SymbolTable.cpp:
-        (JSC::SymbolTable::SymbolTable):
-        * runtime/SymbolTable.h:
-
-2013-11-26  Filip Pizlo  
-
-        Do bytecode validation as part of testing
-        https://bugs.webkit.org/show_bug.cgi?id=124913
-
-        Reviewed by Oliver Hunt.
-        
-        Also fix some small bugs in the bytecode liveness analysis that I found by doing
-        this validation thingy.
-
-        * bytecode/BytecodeLivenessAnalysis.cpp:
-        (JSC::isValidRegisterForLiveness):
-        (JSC::BytecodeLivenessAnalysis::runLivenessFixpoint):
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::validate):
-        (JSC::CodeBlock::beginValidationDidFail):
-        (JSC::CodeBlock::endValidationDidFail):
-        * bytecode/CodeBlock.h:
-        * runtime/Executable.cpp:
-        (JSC::ScriptExecutable::prepareForExecutionImpl):
-        * runtime/Options.h:
-
-2013-11-27  Andreas Kling  
-
-        Structure::m_staticFunctionReified should be a single bit.
-        
-
-        Shave 8 bytes off of JSC::Structure by jamming m_staticFunctionReified
-        into the bitfield just above.
-
-        Reviewed by Antti Koivisto.
-
-2013-11-27  Andreas Kling  
-
-        JSActivation constructor should use NotNull placement new.
-        
-
-        Knock a null check outta the storage initialization loop.
-
-        Reviewed by Antti Koivisto.
-
-2013-11-26  Filip Pizlo  
-
-        Restructure global variable constant inference so that it could work for any kind of symbol table variable
-        https://bugs.webkit.org/show_bug.cgi?id=124760
-
-        Reviewed by Oliver Hunt.
-        
-        This changes the way global variable constant inference works so that it can be reused
-        for closure variable constant inference. Some of the premises that originally motivated
-        this patch are somewhat wrong, but it led to some simplifications anyway and I suspect
-        that we'll be able to fix those premises in the future. The main point of this patch is
-        to make it easy to reuse global variable constant inference for closure variable
-        constant inference, and this will be possible provided we can also either (a) infer
-        one-shot closures (easy) or (b) infer closure variables that are always assigned prior
-        to first use.
-        
-        One of the things that this patch is meant to enable is constant inference for closure
-        variables that may be part of a multi-shot closure. Closure variables may be
-        instantiated multiple times, like:
-        
-            function foo() {
-                var WIDTH = 45;
-                function bar() {
-                    ... use WIDTH ...
-                }
-                ...
-            }
-        
-        Even if foo() is called many times and WIDTH is assigned to multiple times, that
-        doesn't change the fact that it's a constant. The goal of closure variable constant
-        inference is to catch any case where a closure variable has been assigned at least once
-        and its value has never changed. This patch doesn't implement that, but it does change
-        global variable constant inference to have most of the powers needed to do that. Note
-        that most likely we will use this functionality only to implement constant inference
-        for one-shot closures, but the resulting machinery is still simpler than what we had
-        before.
-        
-        This involves three changes:
-        
-            - The watchpoint object now contains the inferred value. This involves creating a
-              new kind of watchpoint set, the VariableWatchpointSet. We will reuse this object
-              for closure variables.
-        
-            - Writing to a variable that is watchpointed still involves these three states that
-              we proceed through monotonically (Uninitialized->Initialized->Invalidated) but
-              now, the Initialized->Invalidated state transition only happens if we change the
-              variable's value, rather than store to the variable. Repeatedly storing the same
-              value won't change the variable's state.
-        
-            - On 64-bit systems (the only systems on which we do concurrent JIT), you no longer
-              need fancy fencing to get a consistent view of the watchpoint in the JIT. The
-              state of the VariableWatchpointSet for the purposes of constant folding is
-              entirely encapsulated in the VariableWatchpointSet::m_inferredValue. If that is
-              JSValue() then you cannot fold (either because the set is uninitialized or
-              because it's invalidated - doesn't matter which); on the other hand if the value
-              is anything other than JSValue() then you can fold, and that's the value you fold
-              to. Simple!
-        
-        This also changes the way that DFG IR deals with variable watchpoints. It's now
-        oblivious to global variables. You install a watchpoint using VariableWatchpoint and
-        you notify write using NotifyWrite. Easy!
-        
-        Note that this will requires some more tweaks because of the fact that op_enter will
-        store Undefined into every captured variable. Hence it won't even work for one-shot
-        closures. One-shot closures are easily fixed by introducing another state (so we'll
-        have Uninitialized->Undefined->Initialized->Invalidated). Multi-shot closures will
-        require static analysis. One-shot closures are clearly a higher priority.
-
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * bytecode/Instruction.h:
-        * bytecode/VariableWatchpointSet.h: Added.
-        (JSC::VariableWatchpointSet::VariableWatchpointSet):
-        (JSC::VariableWatchpointSet::~VariableWatchpointSet):
-        (JSC::VariableWatchpointSet::inferredValue):
-        (JSC::VariableWatchpointSet::notifyWrite):
-        (JSC::VariableWatchpointSet::invalidate):
-        (JSC::VariableWatchpointSet::finalizeUnconditionally):
-        (JSC::VariableWatchpointSet::addressOfInferredValue):
-        * bytecode/Watchpoint.h:
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::::executeEffects):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * dfg/DFGCSEPhase.cpp:
-        (JSC::DFG::CSEPhase::performNodeCSE):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        * dfg/DFGNode.h:
-        (JSC::DFG::Node::hasRegisterPointer):
-        (JSC::DFG::Node::hasVariableWatchpointSet):
-        (JSC::DFG::Node::variableWatchpointSet):
-        * dfg/DFGNodeType.h:
-        * dfg/DFGOperations.cpp:
-        * dfg/DFGOperations.h:
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        * dfg/DFGSafeToExecute.h:
-        (JSC::DFG::safeToExecute):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileArithMod):
-        * dfg/DFGSpeculativeJIT.h:
-        (JSC::DFG::SpeculativeJIT::callOperation):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGWatchpointCollectionPhase.cpp:
-        (JSC::DFG::WatchpointCollectionPhase::handle):
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        (JSC::FTL::LowerDFGToLLVM::compileNotifyWrite):
-        * jit/JIT.h:
-        * jit/JITOperations.h:
-        * jit/JITPropertyAccess.cpp:
-        (JSC::JIT::emitNotifyWrite):
-        (JSC::JIT::emitPutGlobalVar):
-        * jit/JITPropertyAccess32_64.cpp:
-        (JSC::JIT::emitNotifyWrite):
-        (JSC::JIT::emitPutGlobalVar):
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::addGlobalVar):
-        (JSC::JSGlobalObject::addFunction):
-        * runtime/JSGlobalObject.h:
-        * runtime/JSScope.h:
-        (JSC::ResolveOp::ResolveOp):
-        * runtime/JSSymbolTableObject.h:
-        (JSC::symbolTablePut):
-        (JSC::symbolTablePutWithAttributes):
-        * runtime/SymbolTable.cpp:
-        (JSC::SymbolTableEntry::inferredValue):
-        (JSC::SymbolTableEntry::prepareToWatch):
-        (JSC::SymbolTableEntry::addWatchpoint):
-        (JSC::SymbolTableEntry::notifyWriteSlow):
-        (JSC::SymbolTable::visitChildren):
-        (JSC::SymbolTable::WatchpointCleanup::WatchpointCleanup):
-        (JSC::SymbolTable::WatchpointCleanup::~WatchpointCleanup):
-        (JSC::SymbolTable::WatchpointCleanup::finalizeUnconditionally):
-        * runtime/SymbolTable.h:
-        (JSC::SymbolTableEntry::watchpointSet):
-        (JSC::SymbolTableEntry::notifyWrite):
-
-2013-11-24  Filip Pizlo  
-
-        Create a new SymbolTable every time code is loaded so that the watchpoints don't get reused
-        https://bugs.webkit.org/show_bug.cgi?id=124824
-
-        Reviewed by Oliver Hunt.
-        
-        This helps with one shot closure inference as well as closure variable constant
-        inference, since without this, if code was reloaded from the cache then we would
-        think that the first run was actually an Nth run. This would cause us to think that
-        the watchpoint(s) should all be invalidated.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-        (JSC::CodeBlock::stronglyVisitStrongReferences):
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlock::symbolTable):
-        * runtime/Executable.cpp:
-        (JSC::FunctionExecutable::symbolTable):
-        * runtime/Executable.h:
-        * runtime/SymbolTable.cpp:
-        (JSC::SymbolTable::clone):
-        * runtime/SymbolTable.h:
-
-2013-11-26  Oliver Hunt  
-
-        Crash in JSC::ASTBuilder::Expression JSC::Parser >::parseUnaryExpression(JSC::ASTBuilder&)
-        https://bugs.webkit.org/show_bug.cgi?id=124886
-
-        Reviewed by Sam Weinig.
-
-        Make sure the error macros propagate an existing error before
-        trying to create a new error message.  We need to do this as
-        the parser state may not be safe for any specific error message
-        if we are already unwinding due to an error.
-
-        * parser/Parser.cpp:
-
-2013-11-26  Nadav Rotem  
-
-        Optimize away OR with zero - a common ASM.js pattern.
-        https://bugs.webkit.org/show_bug.cgi?id=124869
-
-        Reviewed by Filip Pizlo.
-
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-
-2013-11-25  Julien Brianceau  
-
-        [arm][mips] Fix crash in dfg-arrayify-elimination layout jsc test.
-        https://bugs.webkit.org/show_bug.cgi?id=124839
-
-        Reviewed by Michael Saboff.
-
-        In ARM EABI and MIPS, 64-bit values have to be aligned on stack too.
-
-        * jit/CCallHelpers.h:
-        (JSC::CCallHelpers::setupArgumentsWithExecState):
-        * jit/JITInlines.h:
-        (JSC::JIT::callOperation): Add missing EABI_32BIT_DUMMY_ARG.
-
-2013-11-23  Filip Pizlo  
-
-        Fix more fallout from failed attempts at div/mod DFG strength reductions
-        https://bugs.webkit.org/show_bug.cgi?id=124813
-
-        Reviewed by Geoffrey Garen.
-
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileArithMod):
-
-2013-11-22  Mark Hahnenberg  
-
-        JSC Obj-C API should have real documentation
-        https://bugs.webkit.org/show_bug.cgi?id=124805
-
-        Reviewed by Geoffrey Garen.
-
-        Massaging the header comments into proper headerdocs.
-
-        * API/JSContext.h:
-        * API/JSExport.h:
-        * API/JSManagedValue.h:
-        * API/JSValue.h:
-        * API/JSVirtualMachine.h:
-
-2013-11-22  Filip Pizlo  
-
-        CodeBlock::m_numCalleeRegisters shouldn't also mean frame size, frame size needed for exit, or any other unrelated things
-        https://bugs.webkit.org/show_bug.cgi?id=124793
-
-        Reviewed by Mark Hahnenberg.
-        
-        Now m_numCalleeRegisters always refers to the number of locals that the attached
-        bytecode uses. It never means anything else.
-        
-        For frame size, we now have it lazily computed from m_numCalleeRegisters for the
-        baseline engines and we have it stored in DFG::CommonData for the optimizing JITs.
-        
-        For frame-size-needed-at-exit, we store that in DFG::CommonData, too.
-        
-        The code no longer implies that there is any arithmetic relationship between
-        m_numCalleeRegisters and frameSize. Previously it implied that the latter is greater
-        than the former.
-        
-        The code no longer implies that there is any arithmetic relationship between the
-        frame Size and the frame-size-needed-at-exit. Previously it implied that the latter
-        is greater that the former.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::frameRegisterCount):
-        * bytecode/CodeBlock.h:
-        * dfg/DFGCommonData.h:
-        (JSC::DFG::CommonData::CommonData):
-        (JSC::DFG::CommonData::requiredRegisterCountForExecutionAndExit):
-        * dfg/DFGGraph.cpp:
-        (JSC::DFG::Graph::frameRegisterCount):
-        (JSC::DFG::Graph::requiredRegisterCountForExit):
-        (JSC::DFG::Graph::requiredRegisterCountForExecutionAndExit):
-        * dfg/DFGGraph.h:
-        * dfg/DFGJITCompiler.cpp:
-        (JSC::DFG::JITCompiler::link):
-        (JSC::DFG::JITCompiler::compileFunction):
-        * dfg/DFGOSREntry.cpp:
-        (JSC::DFG::prepareOSREntry):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::SpeculativeJIT):
-        * dfg/DFGVirtualRegisterAllocationPhase.cpp:
-        (JSC::DFG::VirtualRegisterAllocationPhase::run):
-        * ftl/FTLLink.cpp:
-        (JSC::FTL::link):
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileCallOrConstruct):
-        * ftl/FTLOSREntry.cpp:
-        (JSC::FTL::prepareOSREntry):
-        * interpreter/CallFrame.cpp:
-        (JSC::CallFrame::frameExtentInternal):
-        * interpreter/JSStackInlines.h:
-        (JSC::JSStack::pushFrame):
-        * jit/JIT.h:
-        (JSC::JIT::frameRegisterCountFor):
-        * jit/JITOperations.cpp:
-        * llint/LLIntEntrypoint.cpp:
-        (JSC::LLInt::frameRegisterCountFor):
-        * llint/LLIntEntrypoint.h:
-
-2013-11-21  Filip Pizlo  
-
-        Combine SymbolTable and SharedSymbolTable
-        https://bugs.webkit.org/show_bug.cgi?id=124761
-
-        Reviewed by Geoffrey Garen.
-        
-        SymbolTable was never used directly; we now always used SharedSymbolTable. So, this
-        gets rid of SymbolTable and renames SharedSymbolTable to SymbolTable.
-
-        * bytecode/CodeBlock.h:
-        (JSC::CodeBlock::symbolTable):
-        * bytecode/UnlinkedCodeBlock.h:
-        (JSC::UnlinkedFunctionExecutable::symbolTable):
-        (JSC::UnlinkedCodeBlock::symbolTable):
-        (JSC::UnlinkedCodeBlock::finishCreation):
-        * bytecompiler/BytecodeGenerator.h:
-        (JSC::BytecodeGenerator::symbolTable):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGStackLayoutPhase.cpp:
-        (JSC::DFG::StackLayoutPhase::run):
-        * jit/AssemblyHelpers.h:
-        (JSC::AssemblyHelpers::symbolTableFor):
-        * runtime/Arguments.h:
-        (JSC::Arguments::finishCreation):
-        * runtime/Executable.h:
-        (JSC::FunctionExecutable::symbolTable):
-        * runtime/JSActivation.h:
-        (JSC::JSActivation::create):
-        (JSC::JSActivation::JSActivation):
-        (JSC::JSActivation::registersOffset):
-        (JSC::JSActivation::allocationSize):
-        * runtime/JSSymbolTableObject.h:
-        (JSC::JSSymbolTableObject::symbolTable):
-        (JSC::JSSymbolTableObject::JSSymbolTableObject):
-        (JSC::JSSymbolTableObject::finishCreation):
-        * runtime/JSVariableObject.h:
-        (JSC::JSVariableObject::JSVariableObject):
-        * runtime/SymbolTable.cpp:
-        (JSC::SymbolTable::destroy):
-        (JSC::SymbolTable::SymbolTable):
-        * runtime/SymbolTable.h:
-        (JSC::SymbolTable::create):
-        (JSC::SymbolTable::createStructure):
-        * runtime/VM.cpp:
-        (JSC::VM::VM):
-        * runtime/VM.h:
-
-2013-11-22  Mark Lam  
-
-        Remove residual references to "dynamicGlobalObject".
-        https://bugs.webkit.org/show_bug.cgi?id=124787.
-
-        Reviewed by Filip Pizlo.
-
-        * JavaScriptCore.order:
-        * interpreter/CallFrame.h:
-
-2013-11-22  Mark Lam  
-
-        Ensure that arity fixups honor stack alignment requirements.
-        https://bugs.webkit.org/show_bug.cgi?id=124756.
-
-        Reviewed by Geoffrey Garen.
-
-        The LLINT and all the JITs rely on CommonSlowPaths::arityCheckFor() to
-        compute the arg count adjustment for the arity fixup. We take advantage
-        of this choke point and introduce the stack alignment padding there in
-        the guise of additional args.
-
-        The only cost of this approach is that the padding will also be
-        initialized to undefined values as if they were args. Since arity fixups
-        are considered a slow path that is rarely taken, this cost is not a
-        concern.
-
-        * runtime/CommonSlowPaths.h:
-        (JSC::CommonSlowPaths::arityCheckFor):
-        * runtime/VM.h:
-        (JSC::VM::isSafeToRecurse):
-
-2013-11-21  Filip Pizlo  
-
-        BytecodeGenerator should align the stack according to native conventions
-        https://bugs.webkit.org/show_bug.cgi?id=124735
-
-        Reviewed by Mark Lam.
-        
-        Rolling this back in because it actually fixed fast/dom/gc-attribute-node.html, but
-        our infrastructure misleads peole into thinking that fixing a test constitutes
-        breaking it.
-
-        * bytecompiler/BytecodeGenerator.h:
-        (JSC::CallArguments::registerOffset):
-        (JSC::CallArguments::argumentCountIncludingThis):
-        * bytecompiler/NodesCodegen.cpp:
-        (JSC::CallArguments::CallArguments):
-
-2013-11-21  Filip Pizlo  
-
-        Get rid of CodeBlock::dumpStatistics()
-        https://bugs.webkit.org/show_bug.cgi?id=124762
-
-        Reviewed by Mark Hahnenberg.
-
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-        (JSC::CodeBlock::~CodeBlock):
-        * bytecode/CodeBlock.h:
-
-2013-11-22  Commit Queue  
-
-        Unreviewed, rolling out r159652.
-        http://trac.webkit.org/changeset/159652
-        https://bugs.webkit.org/show_bug.cgi?id=124778
-
-        broke fast/dom/gc-attribute-node.html (Requested by ap on
-        #webkit).
-
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::emitCall):
-        (JSC::BytecodeGenerator::emitConstruct):
-        * bytecompiler/BytecodeGenerator.h:
-        (JSC::CallArguments::registerOffset):
-        (JSC::CallArguments::argumentCountIncludingThis):
-        * bytecompiler/NodesCodegen.cpp:
-        (JSC::CallArguments::CallArguments):
-        (JSC::CallArguments::newArgument):
-
-2013-11-21  Filip Pizlo  
-
-        Fix a typo (requriements->requirements).
-
-        * runtime/StackAlignment.h:
-
-2013-11-21  Mark Lam  
-
-        CodeBlock::m_numCalleeRegisters need to honor native stack alignment.
-        https://bugs.webkit.org/show_bug.cgi?id=124754.
-
-        Reviewed by Filip Pizlo.
-
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::newRegister):
-        * dfg/DFGVirtualRegisterAllocationPhase.cpp:
-        (JSC::DFG::VirtualRegisterAllocationPhase::run):
-
-2013-11-21  Mark Rowe  
-
-         Stop overriding VALID_ARCHS.
-
-        All modern versions of Xcode set it appropriately for our needs.
-
-        Reviewed by Alexey Proskuryakov.
-
-        * Configurations/Base.xcconfig:
-
-2013-11-21  Mark Rowe  
-
-         Fix an error in a few Xcode configuration setting files.
-
-        Reviewed by Alexey Proskuryakov.
-
-        * Configurations/Base.xcconfig:
-
-2013-11-21  Michael Saboff  
-
-        ARM64: Implement push/pop equivalents in LLInt
-        https://bugs.webkit.org/show_bug.cgi?id=124721
-
-        Reviewed by Filip Pizlo.
-
-        Added pushLRAndFP and popLRAndFP that push and pop the link register and frame pointer register.
-        These ops emit code just like what the compiler emits in the prologue and epilogue.  Also changed
-        pushCalleeSaves and popCalleeSaves to use the same store pair and load pair instructions to do
-        the actually pushing and popping.  Finally changed the implementation of push and pop to raise
-        an exception since we don't have (or need) a single register push or pop.
-
-        * llint/LowLevelInterpreter64.asm:
-        * offlineasm/arm64.rb:
-        * offlineasm/instructions.rb:
-
-2013-11-21  Michael Saboff  
-
-        JSC: Removed unused opcodes from offline assembler
-        https://bugs.webkit.org/show_bug.cgi?id=124749
-
-        Reviewed by Mark Hahnenberg.
-
-        Removed the unused, X86 only peekq and pokeq.
-
-        * offlineasm/instructions.rb:
-        * offlineasm/x86.rb:
-
-2013-11-21  Michael Saboff  
-
-        REGRESSION(159395) Fix branch8(…, AbsoluteAddress, …) in ARM64 MacroAssembler
-        https://bugs.webkit.org/show_bug.cgi?id=124688
-
-        Reviewed by Geoffrey Garen.
-
-        Changed handling of the address for the load8() in the branch8(AbsoluteAddress) to be like
-        the rest of the branchXX(AbsoluteAddress) fucntions.
-
-        * assembler/MacroAssemblerARM64.h:
-        (JSC::MacroAssemblerARM64::branch8):
-
-2013-11-21  Filip Pizlo  
-
-        BytecodeGenerator should align the stack according to native conventions
-        https://bugs.webkit.org/show_bug.cgi?id=124735
-
-        Reviewed by Mark Lam.
-
-        * bytecompiler/BytecodeGenerator.h:
-        (JSC::CallArguments::registerOffset):
-        (JSC::CallArguments::argumentCountIncludingThis):
-        * bytecompiler/NodesCodegen.cpp:
-        (JSC::CallArguments::CallArguments):
-
-2013-11-21  Filip Pizlo  
-
-        Unreviewed, preemptive build fix.
-
-        * runtime/StackAlignment.h:
-        (JSC::stackAlignmentBytes):
-        (JSC::stackAlignmentRegisters):
-
-2013-11-21  Filip Pizlo  
-
-        JSC should know what the stack alignment conventions are
-        https://bugs.webkit.org/show_bug.cgi?id=124736
-
-        Reviewed by Mark Lam.
-
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * runtime/StackAlignment.h: Added.
-        (JSC::stackAlignmentBytes):
-        (JSC::stackAlignmentRegisters):
-
-2013-11-21  Balazs Kilvady  
-
-        [MIPS] Build fails since r159545.
-        https://bugs.webkit.org/show_bug.cgi?id=124716
-
-        Reviewed by Michael Saboff.
-
-        Add missing implementations in MacroAssembler and LLInt for MIPS.
-
-        * assembler/MIPSAssembler.h:
-        (JSC::MIPSAssembler::sync):
-        * assembler/MacroAssemblerMIPS.h:
-        (JSC::MacroAssemblerMIPS::store8):
-        (JSC::MacroAssemblerMIPS::memoryFence):
-        * offlineasm/mips.rb:
-
-2013-11-21  Julien Brianceau  
-
-        Fix sh4 build after r159545.
-        https://bugs.webkit.org/show_bug.cgi?id=124713
-
-        Reviewed by Michael Saboff.
-
-        Add missing implementations in macro assembler and LLINT for sh4.
-
-        * assembler/MacroAssemblerSH4.h:
-        (JSC::MacroAssemblerSH4::load8):
-        (JSC::MacroAssemblerSH4::store8):
-        (JSC::MacroAssemblerSH4::memoryFence):
-        * assembler/SH4Assembler.h:
-        (JSC::SH4Assembler::synco):
-        * offlineasm/sh4.rb: Handle "memfence" opcode.
-
-2013-11-20  Mark Lam  
-
-        Introducing VMEntryScope to update the VM stack limit.
-        https://bugs.webkit.org/show_bug.cgi?id=124634.
-
-        Reviewed by Geoffrey Garen.
-
-        1. Introduced USE(SEPARATE_C_AND_JS_STACK) (defined in Platform.h).
-           Currently, it is hardcoded to use separate C and JS stacks. Once we
-           switch to using the C stack for JS frames, we'll need to fix this to
-           only be enabled when ENABLE(LLINT_C_LOOP).
-
-        2. Stack limits are now tracked in the VM.
-
-           Logically, there are 2 stack limits:
-           a. m_stackLimit for the native C stack, and
-           b. m_jsStackLimit for the JS stack.
-
-           If USE(SEPARATE_C_AND_JS_STACK), then the 2 limits are the same
-           value, and are implemented as 2 fields in a union.
-
-        3. The VM native stackLimit is set as follows:
-           a. Initially, the VM sets it to the limit of the stack of the thread that
-              instantiated the VM. This allows the parser and bytecode generator to
-              run before we enter the VM to execute JS code.
-
-           b. Upon entry into the VM to execute JS code (via one of the
-              Interpreter::execute...() functions), we instantiate a VMEntryScope
-              that sets the VM's stackLimit to the limit of the current thread's
-              stack. The VMEntryScope will automatically restore the previous
-              entryScope and stack limit upon destruction.
-
-           If USE(SEPARATE_C_AND_JS_STACK), the JSStack's methods will set the VM's
-           jsStackLimit whenever it grows or shrinks.
-
-        4. The VM now provides a isSafeToRecurse() function that compares the
-           current stack pointer against its native stackLimit. This subsumes and
-           obsoletes the VMStackBounds class.
-
-        5. The VMEntryScope class also subsumes DynamicGlobalObjectScope for
-           tracking the JSGlobalObject that we last entered the VM with.
-
-        6. Renamed dynamicGlobalObject() to vmEntryGlobalObject() since that is
-           the value that the function retrieves.
-
-        7. Changed JIT and LLINT code to do stack checks against the jsStackLimit
-           in the VM class instead of the JSStack.
-
-        * API/JSBase.cpp:
-        (JSEvaluateScript):
-        (JSCheckScriptSyntax):
-        * API/JSContextRef.cpp:
-        (JSGlobalContextRetain):
-        (JSGlobalContextRelease):
-        * CMakeLists.txt:
-        * GNUmakefile.list.am:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
-        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
-        * JavaScriptCore.xcodeproj/project.pbxproj:
-        * bytecompiler/BytecodeGenerator.cpp:
-        (JSC::BytecodeGenerator::BytecodeGenerator):
-        * bytecompiler/BytecodeGenerator.h:
-        (JSC::BytecodeGenerator::emitNode):
-        (JSC::BytecodeGenerator::emitNodeInConditionContext):
-        * debugger/Debugger.cpp:
-        (JSC::Debugger::detach):
-        (JSC::Debugger::recompileAllJSFunctions):
-        (JSC::Debugger::pauseIfNeeded):
-        * debugger/DebuggerCallFrame.cpp:
-        (JSC::DebuggerCallFrame::vmEntryGlobalObject):
-        * debugger/DebuggerCallFrame.h:
-        * dfg/DFGJITCompiler.cpp:
-        (JSC::DFG::JITCompiler::compileFunction):
-        * dfg/DFGOSREntry.cpp:
-        * ftl/FTLLink.cpp:
-        (JSC::FTL::link):
-        * ftl/FTLOSREntry.cpp:
-        * heap/Heap.cpp:
-        (JSC::Heap::lastChanceToFinalize):
-        (JSC::Heap::deleteAllCompiledCode):
-        * interpreter/CachedCall.h:
-        (JSC::CachedCall::CachedCall):
-        * interpreter/CallFrame.cpp:
-        (JSC::CallFrame::vmEntryGlobalObject):
-        * interpreter/CallFrame.h:
-        * interpreter/Interpreter.cpp:
-        (JSC::unwindCallFrame):
-        (JSC::Interpreter::unwind):
-        (JSC::Interpreter::execute):
-        (JSC::Interpreter::executeCall):
-        (JSC::Interpreter::executeConstruct):
-        (JSC::Interpreter::prepareForRepeatCall):
-        (JSC::Interpreter::debug):
-        * interpreter/JSStack.cpp:
-        (JSC::JSStack::JSStack):
-        (JSC::JSStack::growSlowCase):
-        * interpreter/JSStack.h:
-        * interpreter/JSStackInlines.h:
-        (JSC::JSStack::shrink):
-        (JSC::JSStack::grow):
-        - Moved these inlined functions here from JSStack.h. It reduces some
-          #include dependencies of JSSTack.h which had previously resulted
-          in some EWS bots' unhappiness with this patch.
-        (JSC::JSStack::updateStackLimit):
-        * jit/JIT.cpp:
-        (JSC::JIT::privateCompile):
-        * jit/JITCall.cpp:
-        (JSC::JIT::compileLoadVarargs):
-        * jit/JITCall32_64.cpp:
-        (JSC::JIT::compileLoadVarargs):
-        * jit/JITOperations.cpp:
-        * llint/LLIntSlowPaths.cpp:
-        * llint/LowLevelInterpreter.asm:
-        * parser/Parser.cpp:
-        (JSC::::Parser):
-        * parser/Parser.h:
-        (JSC::Parser::canRecurse):
-        * runtime/CommonSlowPaths.h:
-        * runtime/Completion.cpp:
-        (JSC::evaluate):
-        * runtime/FunctionConstructor.cpp:
-        (JSC::constructFunctionSkippingEvalEnabledCheck):
-        * runtime/JSGlobalObject.cpp:
-        * runtime/JSGlobalObject.h:
-        * runtime/StringRecursionChecker.h:
-        (JSC::StringRecursionChecker::performCheck):
-        * runtime/VM.cpp:
-        (JSC::VM::VM):
-        (JSC::VM::releaseExecutableMemory):
-        (JSC::VM::throwException):
-        * runtime/VM.h:
-        (JSC::VM::addressOfJSStackLimit):
-        (JSC::VM::jsStackLimit):
-        (JSC::VM::setJSStackLimit):
-        (JSC::VM::stackLimit):
-        (JSC::VM::setStackLimit):
-        (JSC::VM::isSafeToRecurse):
-        * runtime/VMEntryScope.cpp: Added.
-        (JSC::VMEntryScope::VMEntryScope):
-        (JSC::VMEntryScope::~VMEntryScope):
-        (JSC::VMEntryScope::requiredCapacity):
-        * runtime/VMEntryScope.h: Added.
-        (JSC::VMEntryScope::globalObject):
-        * runtime/VMStackBounds.h: Removed.
-
-2013-11-20  Michael Saboff  
-
-        [Win] JavaScript JIT crash (with DFG enabled).
-        https://bugs.webkit.org/show_bug.cgi?id=124675
-
-        Reviewed by Geoffrey Garen.
-
-        Similar to the change in r159427, changed linkClosureCall to use regT0/regT1 (payload/tag) for the callee.
-        linkForThunkGenerator already expected the callee in regT0/regT1, but changed the comment to reflect that.
-
-        * jit/Repatch.cpp:
-        (JSC::linkClosureCall):
-        * jit/ThunkGenerators.cpp:
-        (JSC::linkForThunkGenerator):
-
-2013-11-20  Michael Saboff  
-
-        ARMv7: Crash due to use after free of AssemblerBuffer
-        https://bugs.webkit.org/show_bug.cgi?id=124611
-
-        Reviewed by Geoffrey Garen.
-
-        Changed JITFinalizer constructor to take a MacroAssemblerCodePtr instead of a Label.
-        In finalizeFunction(), we use that value instead of calculating it from the label.
-
-        * assembler/MacroAssembler.cpp:
-        * dfg/DFGJITFinalizer.cpp:
-        (JSC::DFG::JITFinalizer::JITFinalizer):
-        (JSC::DFG::JITFinalizer::finalizeFunction):
-        * dfg/DFGJITFinalizer.h:
-
-2013-11-20  Julien Brianceau  
-
-        Fix CPU(ARM_TRADITIONAL) build after r159545.
-        https://bugs.webkit.org/show_bug.cgi?id=124649
-
-        Reviewed by Michael Saboff.
-
-        Add missing memoryFence, load8 and store8 implementations in macro assembler.
-
-        * assembler/ARMAssembler.h:
-        (JSC::ARMAssembler::dmbSY):
-        * assembler/MacroAssemblerARM.h:
-        (JSC::MacroAssemblerARM::load8):
-        (JSC::MacroAssemblerARM::store8):
-        (JSC::MacroAssemblerARM::memoryFence):
-
-2013-11-20  Julien Brianceau  
-
-        [armv7][arm64] Speculative build fix after r159545.
-        https://bugs.webkit.org/show_bug.cgi?id=124646
-
-        Reviewed by Filip Pizlo.
-
-        * assembler/ARMv7Assembler.h:
-        * assembler/MacroAssemblerARM64.h:
-        (JSC::MacroAssemblerARM64::memoryFence):
-        * assembler/MacroAssemblerARMv7.h:
-        (JSC::MacroAssemblerARMv7::memoryFence):
-
-2013-11-19  Ryosuke Niwa  
-
-        Enable HTMLTemplateElement on Mac port
-        https://bugs.webkit.org/show_bug.cgi?id=124637
-
-        Reviewed by Tim Horton.
-
-        * Configurations/FeatureDefines.xcconfig:
-
-2013-11-19  Filip Pizlo  
-
-        Unreviewed, remove completely bogus assertion.
-
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::addFunction):
-
-2013-11-19  Filip Pizlo  
-
-        Unreviewed, debug build fix.
-
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::addFunction):
-
-2013-11-19  Filip Pizlo  
-
-        Infer constant global variables
-        https://bugs.webkit.org/show_bug.cgi?id=124464
-
-        Reviewed by Sam Weinig.
-        
-        All global variables that are candidates for watchpoint-based constant inference (i.e.
-        not 'const' variables) will now have WatchpointSet's associated with them and those
-        are used to drive the inference by tracking three states of each variable:
-        
-        Uninitialized: the variable's value is Undefined and the WatchpointSet state is
-            ClearWatchpoint.
-        
-        Initialized: the variable's value was set to something (could even be explicitly set
-            to Undefined) and the WatchpointSet state is IsWatching.
-        
-        Invalidated: the variable's value was set to something else (could even be the same
-            thing as before but the point is that a put operation did execute again) and the
-            WatchpointSet is IsInvalidated.
-        
-        If the compiler tries to compile a GetGlobalVar and the WatchpointSet state is
-        IsWatching, then the current value of the variable can be folded in place of the get,
-        and a watchpoint on the variable can be registered.
-        
-        We handle race conditions between the mutator and compiler by mandating that:
-        
-        - The mutator changes the WatchpointSet state after executing the put.
-        
-        - There is no opportunity to install code or call functions between when the mutator
-          executes a put and changes the WatchpointSet state.
-        
-        - The compiler checks the WatchpointSet state prior to reading the value.
-        
-        The concrete algorithm used by the mutator is:
-        
-            1. Store the new value into the variable.
-            --- Execute a store-store fence.
-            2. Bump the state (ClearWatchpoing becomes IsWatching, IsWatching becomes
-               IsInvalidated); the IsWatching->IsInvalidated transition may end up firing
-               watchpoints.
-        
-        The concrete algorithm that the compiler uses is:
-        
-            1. Load the state. If it's *not* IsWatching, then give up on constant inference.
-            --- Execute a load-load fence.
-            2. Load the value of the variable and use that for folding, while also registering
-               a DesiredWatchpoint. The various parts of this step can be done in any order.
-        
-        The desired watchpoint registration will fail if the watchpoint set is already
-        invalidated. Now consider the following interesting interleavings:
-        
-        Uninitialized->M1->M2->C1->C2: Compiler sees IsWatching because of the mutator's store
-            operation, and the variable is folded. The fencing ensures that C2 sees the value
-            stored in M1 - i.e. we fold on the value that will actually be watchpointed. If
-            before the compilation is installed the mutator executes another store then we
-            will be sure that it will be a complete sequence of M1+M2 since compilations get
-            installed at safepoints and never "in the middle" of a put_to_scope. Hence that
-            compilation installation will be invalidated. If the M1+M2 sequence happens after
-            the code is installed, then the code will be invalidated by triggering a jettison.
-        
-        Uninitialized->M1->C1->C2->M2: Compiler sees Uninitialized and will not fold. This is
-            a sensible outcome since if the compiler read the variable's value, it would have
-            seen Undefined.
-        
-        Uninitialized->C1->C2->M1->M2: Compiler sees Uninitialized and will not fold.
-        Uninitialized->C1->M1->C2->M2: Compiler sees Uninitialized and will not fold.
-        Uninitialized->C1->M1->M2->C2: Compiler sees Uninitialized and will not fold.
-        Uninitialized->M1->C1->M2->C2: Compiler sees Uninitialized and will not fold.
-        
-        IsWatched->M1->M2->C1->C2: Compiler sees IsInvalidated and will not fold.
-        
-        IsWatched->M1->C1->C2->M2: Compiler will fold, but will also register a desired
-            watchpoint, and that watchpoint will get invalidated before the code is installed.
-        
-        IsWatched->M1->C1->M2->C2: As above, will fold but the code will get invalidated.
-        IsWatched->C1->C2->M1->M2: As above, will fold but the code will get invalidated.
-        IsWatched->C1->M1->C2->M2: As above, will fold but the code will get invalidated.
-        IsWatched->C1->M1->M2->C2: As above, will fold but the code will get invalidated.
-        
-        Note that this kind of reasoning shows why having the mutator first bump the state and
-        then store the new value would be wrong. If we had done that (M1 = bump state, M2 =
-        execute put) then we could have the following deadly interleavings:
-        
-        Uninitialized->M1->C1->C2->M2:
-        Uninitialized->M1->C1->M2->C2: Mutator bumps the state to IsWatched and then the
-            compiler folds Undefined, since M2 hasn't executed yet. Although C2 will set the
-            watchpoint, M1 didn't notify it - it mearly initiated watching. M2 then stores a
-            value other than Undefined, and you're toast.
-        
-        You could fix this sort of thing by making the Desired Watchpoints machinery more
-        sophisticated, for example having it track the value that was folded; if the global
-        variable's value was later found to be different then we could invalidate the
-        compilation. You could also fix it by having the compiler also check that the value of
-        the variable is not Undefined before folding. While those all sound great, I decided
-        to instead just use the right interleaving since that results in less code and feels
-        more intuitive.
-        
-        This is a 0.5% speed-up on SunSpider, mostly due to a 20% speed-up on math-cordic.
-        It's a 0.6% slow-down on LongSpider, mostly due to a 25% slow-down on 3d-cube. This is
-        because 3d-cube takes global variable assignment slow paths very often. Note that this
-        3d-cube slow-down doesn't manifest as much in SunSpider (only 6% there). This patch is
-        also a 1.5% speed-up on V8v7 and a 2.8% speed-up on Octane v1, mostly due to deltablue
-        (3.7%), richards (4%), and mandreel (26%). This is a 2% speed-up on Kraken, mostly due
-        to a 17.5% speed-up on imaging-gaussian-blur. Something that really illustrates the
-        slam-dunk-itude of this patch is the wide range of speed-ups on JSRegress. Casual JS
-        programming often leads to global-var-based idioms and those variables tend to be
-        assigned once, leading to excellent constant folding opportunities in an optimizing
-        JIT. This is very evident in the speed-ups on JSRegress.
-
-        * assembler/ARM64Assembler.h:
-        (JSC::ARM64Assembler::dmbSY):
-        * assembler/ARMv7Assembler.h:
-        (JSC::ARMv7Assembler::dmbSY):
-        * assembler/MacroAssemblerARM64.h:
-        (JSC::MacroAssemblerARM64::memfence):
-        * assembler/MacroAssemblerARMv7.h:
-        (JSC::MacroAssemblerARMv7::load8):
-        (JSC::MacroAssemblerARMv7::memfence):
-        * assembler/MacroAssemblerX86.h:
-        (JSC::MacroAssemblerX86::load8):
-        (JSC::MacroAssemblerX86::store8):
-        * assembler/MacroAssemblerX86Common.h:
-        (JSC::MacroAssemblerX86Common::getUnusedRegister):
-        (JSC::MacroAssemblerX86Common::store8):
-        (JSC::MacroAssemblerX86Common::memoryFence):
-        * assembler/MacroAssemblerX86_64.h:
-        (JSC::MacroAssemblerX86_64::load8):
-        (JSC::MacroAssemblerX86_64::store8):
-        * assembler/X86Assembler.h:
-        (JSC::X86Assembler::movb_rm):
-        (JSC::X86Assembler::movzbl_mr):
-        (JSC::X86Assembler::mfence):
-        (JSC::X86Assembler::X86InstructionFormatter::threeByteOp):
-        (JSC::X86Assembler::X86InstructionFormatter::oneByteOp8):
-        * bytecode/CodeBlock.cpp:
-        (JSC::CodeBlock::CodeBlock):
-        * bytecode/Watchpoint.cpp:
-        (JSC::WatchpointSet::WatchpointSet):
-        (JSC::WatchpointSet::add):
-        (JSC::WatchpointSet::notifyWriteSlow):
-        * bytecode/Watchpoint.h:
-        (JSC::WatchpointSet::state):
-        (JSC::WatchpointSet::isStillValid):
-        (JSC::WatchpointSet::addressOfSetIsNotEmpty):
-        * dfg/DFGAbstractInterpreterInlines.h:
-        (JSC::DFG::::executeEffects):
-        * dfg/DFGByteCodeParser.cpp:
-        (JSC::DFG::ByteCodeParser::getJSConstantForValue):
-        (JSC::DFG::ByteCodeParser::getJSConstant):
-        (JSC::DFG::ByteCodeParser::parseBlock):
-        * dfg/DFGClobberize.h:
-        (JSC::DFG::clobberize):
-        * dfg/DFGFixupPhase.cpp:
-        (JSC::DFG::FixupPhase::fixupNode):
-        * dfg/DFGNode.h:
-        (JSC::DFG::Node::isStronglyProvedConstantIn):
-        (JSC::DFG::Node::hasIdentifierNumberForCheck):
-        (JSC::DFG::Node::hasRegisterPointer):
-        * dfg/DFGNodeFlags.h:
-        * dfg/DFGNodeType.h:
-        * dfg/DFGOperations.cpp:
-        * dfg/DFGOperations.h:
-        * dfg/DFGPredictionPropagationPhase.cpp:
-        (JSC::DFG::PredictionPropagationPhase::propagate):
-        * dfg/DFGSafeToExecute.h:
-        (JSC::DFG::safeToExecute):
-        * dfg/DFGSpeculativeJIT.cpp:
-        (JSC::DFG::SpeculativeJIT::compileNotifyPutGlobalVar):
-        * dfg/DFGSpeculativeJIT.h:
-        (JSC::DFG::SpeculativeJIT::callOperation):
-        * dfg/DFGSpeculativeJIT32_64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * dfg/DFGSpeculativeJIT64.cpp:
-        (JSC::DFG::SpeculativeJIT::compile):
-        * ftl/FTLAbbreviatedTypes.h:
-        * ftl/FTLAbbreviations.h:
-        (JSC::FTL::buildFence):
-        * ftl/FTLCapabilities.cpp:
-        (JSC::FTL::canCompile):
-        * ftl/FTLIntrinsicRepository.h:
-        * ftl/FTLLowerDFGToLLVM.cpp:
-        (JSC::FTL::LowerDFGToLLVM::compileNode):
-        (JSC::FTL::LowerDFGToLLVM::compileNotifyPutGlobalVar):
-        * ftl/FTLOutput.h:
-        (JSC::FTL::Output::fence):
-        * jit/JIT.h:
-        * jit/JITOperations.h:
-        * jit/JITPropertyAccess.cpp:
-        (JSC::JIT::emitPutGlobalVar):
-        (JSC::JIT::emit_op_put_to_scope):
-        (JSC::JIT::emitSlow_op_put_to_scope):
-        * jit/JITPropertyAccess32_64.cpp:
-        (JSC::JIT::emitPutGlobalVar):
-        (JSC::JIT::emit_op_put_to_scope):
-        (JSC::JIT::emitSlow_op_put_to_scope):
-        * llint/LowLevelInterpreter32_64.asm:
-        * llint/LowLevelInterpreter64.asm:
-        * llvm/LLVMAPIFunctions.h:
-        * offlineasm/arm.rb:
-        * offlineasm/arm64.rb:
-        * offlineasm/cloop.rb:
-        * offlineasm/instructions.rb:
-        * offlineasm/x86.rb:
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::addGlobalVar):
-        (JSC::JSGlobalObject::addFunction):
-        * runtime/JSGlobalObject.h:
-        (JSC::JSGlobalObject::addVar):
-        (JSC::JSGlobalObject::addConst):
-        * runtime/JSScope.cpp:
-        (JSC::abstractAccess):
-        * runtime/JSSymbolTableObject.h:
-        (JSC::symbolTablePut):
-        (JSC::symbolTablePutWithAttributes):
-        * runtime/SymbolTable.cpp:
-        (JSC::SymbolTableEntry::couldBeWatched):
-        (JSC::SymbolTableEntry::prepareToWatch):
-        (JSC::SymbolTableEntry::notifyWriteSlow):
-        * runtime/SymbolTable.h:
-
-2013-11-19  Michael Saboff  
-
-        REGRESSION(158384) ARMv7 point checks too restrictive for native calls to traditional ARM code
-        https://bugs.webkit.org/show_bug.cgi?id=124612
-
-        Reviewed by Geoffrey Garen.
-
-        Removed ASSERT checks (i.e. lower bit set) for ARM Thumb2 destination addresses related to
-        calls since we are calling native ARM traditional functions like sin() and cos().
-
-        * assembler/ARMv7Assembler.h:
-        (JSC::ARMv7Assembler::linkCall):
-        (JSC::ARMv7Assembler::relinkCall):
-        * assembler/MacroAssemblerCodeRef.h:
-
-2013-11-19  Commit Queue  
-
-        Unreviewed, rolling out r159459.
-        http://trac.webkit.org/changeset/159459
-        https://bugs.webkit.org/show_bug.cgi?id=124616
-
-        tons of assertions on launch (Requested by thorton on
-        #webkit).
-
-        * API/JSContext.mm:
-        (-[JSContext setException:]):
-        (-[JSContext wrapperForObjCObject:]):
-        (-[JSContext wrapperForJSObject:]):
-        * API/JSContextRef.cpp:
-        (JSContextGroupRelease):
-        (JSGlobalContextRelease):
-        * API/JSManagedValue.mm:
-        (-[JSManagedValue initWithValue:]):
-        (-[JSManagedValue value]):
-        * API/JSObjectRef.cpp:
-        (JSObjectIsFunction):
-        (JSObjectCopyPropertyNames):
-        * API/JSValue.mm:
-        (containerValueToObject):
-        * API/JSWrapperMap.mm:
-        (tryUnwrapObjcObject):
-
-2013-11-19  Filip Pizlo  
-
-        Rename WatchpointSet::notifyWrite() should be renamed to WatchpointSet::fireAll()
-        https://bugs.webkit.org/show_bug.cgi?id=124609
-
-        Rubber stamped by Mark Lam.
-        
-        notifyWrite() is a thing that SymbolTable does. WatchpointSet uses that terminology
-        because it was original designed to match exactly SymbolTable's semantics. But now
-        it's a confusing term.
-
-        * bytecode/Watchpoint.cpp:
-        (JSC::WatchpointSet::fireAllSlow):
-        * bytecode/Watchpoint.h:
-        (JSC::WatchpointSet::fireAll):
-        (JSC::InlineWatchpointSet::fireAll):
-        * interpreter/Interpreter.cpp:
-        (JSC::Interpreter::execute):
-        * runtime/JSFunction.cpp:
-        (JSC::JSFunction::put):
-        (JSC::JSFunction::defineOwnProperty):
-        * runtime/JSGlobalObject.cpp:
-        (JSC::JSGlobalObject::haveABadTime):
-        * runtime/Structure.h:
-        (JSC::Structure::notifyTransitionFromThisStructure):
-        * runtime/SymbolTable.cpp:
-        (JSC::SymbolTableEntry::notifyWriteSlow):
-
-2013-11-18  Michael Saboff  
-
-        REGRESSION (r159395): Error compiling for ARMv7
-        https://bugs.webkit.org/show_bug.cgi?id=124552
-
-        Reviewed by Geoffrey Garen.
-
-        Fixed the implementation of branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right)
-        to materialize and use address similar to other ARMv7 branchXX() functions.
-
-        * assembler/MacroAssemblerARMv7.h:
-        (JSC::MacroAssemblerARMv7::branch8):
-
-2013-11-19  Mark Lam  
-
-        Add tracking of endColumn for Executables.
-        https://bugs.webkit.org/show_bug.cgi?id=124245.
-
-        Reviewed by Geoffrey Garen.
-
-        1. Fixed computation of columns to take into account the startColumn from
-           
+
+import cssmin
+import jsmin
+import os.path
+import re
+import sys
+
+
+def main(argv):
+
+    if len(argv) < 2:
+        print('usage: %s inputFile outputFile' % argv[0])
+        return 1
+
+    inputFileName = argv[1]
+    outputFileName = argv[2]
+    importsDir = os.path.dirname(inputFileName)
+
+    inputFile = open(inputFileName, 'r')
+    inputContent = inputFile.read()
+    inputFile.close()
+
+    def inline(match, minifier, prefix, postfix):
+        importFileName = match.group(1)
+        fullPath = os.path.join(importsDir, importFileName)
+        if not os.access(fullPath, os.F_OK):
+            raise Exception('File %s referenced in %s not found' % (importFileName, inputFileName))
+        importFile = open(fullPath, 'r')
+        importContent = minifier(importFile.read())
+        importFile.close()
+        return '%s%s%s' % (prefix, importContent, postfix)
+
+    def inlineStylesheet(match):
+        return inline(match, cssmin.cssminify, "")
+
+    def inlineScript(match):
+        return inline(match, jsmin.jsmin, "")
+
+    outputContent = re.sub(r'', inlineStylesheet, inputContent)
+    outputContent = re.sub(r'', inlineScript, outputContent)
+
+    outputFile = open(outputFileName, 'w')
+    outputFile.write(outputContent)
+    outputFile.close()
+
+    # Touch output file directory to make sure that Xcode will copy
+    # modified resource files.
+    if sys.platform == 'darwin':
+        outputDirName = os.path.dirname(outputFileName)
+        os.utime(outputDirName, None)
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv))
diff --git a/Source/JavaScriptCore/Scripts/jsmin.py b/Source/JavaScriptCore/Scripts/jsmin.py
new file mode 100644
index 000000000..372418b4d
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/jsmin.py
@@ -0,0 +1,238 @@
+# This code is original from jsmin by Douglas Crockford, it was translated to
+# Python by Baruch Even. It was rewritten by Dave St.Germain for speed.
+#
+# The MIT License (MIT)
+#
+# Copyright (c) 2013 Dave St.Germain
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+
+import sys
+is_3 = sys.version_info >= (3, 0)
+if is_3:
+    import io
+else:
+    import StringIO
+    try:
+        import cStringIO
+    except ImportError:
+        cStringIO = None
+
+
+__all__ = ['jsmin', 'JavascriptMinify']
+__version__ = '2.0.9'
+
+
+def jsmin(js):
+    """
+    returns a minified version of the javascript string
+    """
+    if not is_3:
+        if cStringIO and not isinstance(js, unicode):
+            # strings can use cStringIO for a 3x performance
+            # improvement, but unicode (in python2) cannot
+            klass = cStringIO.StringIO
+        else:
+            klass = StringIO.StringIO
+    else:
+        klass = io.StringIO
+    ins = klass(js)
+    outs = klass()
+    JavascriptMinify(ins, outs).minify()
+    return outs.getvalue()
+
+
+class JavascriptMinify(object):
+    """
+    Minify an input stream of javascript, writing
+    to an output stream
+    """
+
+    def __init__(self, instream=None, outstream=None):
+        self.ins = instream
+        self.outs = outstream
+
+    def minify(self, instream=None, outstream=None):
+        if instream and outstream:
+            self.ins, self.outs = instream, outstream
+
+        self.is_return = False
+        self.return_buf = ''
+
+        def write(char):
+            # all of this is to support literal regular expressions.
+            # sigh
+            if char in 'return':
+                self.return_buf += char
+                self.is_return = self.return_buf == 'return'
+            self.outs.write(char)
+            if self.is_return:
+                self.return_buf = ''
+
+        read = self.ins.read
+
+        space_strings = "abcdefghijklmnopqrstuvwxyz"\
+        "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$\\"
+        starters, enders = '{[(+-', '}])+-"\''
+        newlinestart_strings = starters + space_strings
+        newlineend_strings = enders + space_strings
+        do_newline = False
+        do_space = False
+        escape_slash_count = 0
+        doing_single_comment = False
+        previous_before_comment = ''
+        doing_multi_comment = False
+        in_re = False
+        in_quote = ''
+        quote_buf = []
+
+        previous = read(1)
+        if previous == '\\':
+            escape_slash_count += 1
+        next1 = read(1)
+        if previous == '/':
+            if next1 == '/':
+                doing_single_comment = True
+            elif next1 == '*':
+                doing_multi_comment = True
+                previous = next1
+                next1 = read(1)
+            else:
+                write(previous)
+        elif not previous:
+            return
+        elif previous >= '!':
+            if previous in "'\"":
+                in_quote = previous
+            write(previous)
+            previous_non_space = previous
+        else:
+            previous_non_space = ' '
+        if not next1:
+            return
+
+        while 1:
+            next2 = read(1)
+            if not next2:
+                last = next1.strip()
+                if not (doing_single_comment or doing_multi_comment)\
+                    and last not in ('', '/'):
+                    if in_quote:
+                        write(''.join(quote_buf))
+                    write(last)
+                break
+            if doing_multi_comment:
+                if next1 == '*' and next2 == '/':
+                    doing_multi_comment = False
+                    next2 = read(1)
+            elif doing_single_comment:
+                if next1 in '\r\n':
+                    doing_single_comment = False
+                    while next2 in '\r\n':
+                        next2 = read(1)
+                        if not next2:
+                            break
+                    if previous_before_comment in ')}]':
+                        do_newline = True
+                    elif previous_before_comment in space_strings:
+                        write('\n')
+            elif in_quote:
+                quote_buf.append(next1)
+
+                if next1 == in_quote:
+                    numslashes = 0
+                    for c in reversed(quote_buf[:-1]):
+                        if c != '\\':
+                            break
+                        else:
+                            numslashes += 1
+                    if numslashes % 2 == 0:
+                        in_quote = ''
+                        write(''.join(quote_buf))
+            elif next1 in '\r\n':
+                if previous_non_space in newlineend_strings \
+                    or previous_non_space > '~':
+                    while 1:
+                        if next2 < '!':
+                            next2 = read(1)
+                            if not next2:
+                                break
+                        else:
+                            if next2 in newlinestart_strings \
+                                or next2 > '~' or next2 == '/':
+                                do_newline = True
+                            break
+            elif next1 < '!' and not in_re:
+                if (previous_non_space in space_strings \
+                    or previous_non_space > '~') \
+                    and (next2 in space_strings or next2 > '~'):
+                    do_space = True
+                elif previous_non_space in '-+' and next2 == previous_non_space:
+                    # protect against + ++ or - -- sequences
+                    do_space = True
+                elif self.is_return and next2 == '/':
+                    # returning a regex...
+                    write(' ')
+            elif next1 == '/':
+                if do_space:
+                    write(' ')
+                if in_re:
+                    if previous != '\\' or (not escape_slash_count % 2) or next2 in 'gimy':
+                        in_re = False
+                    write('/')
+                elif next2 == '/':
+                    doing_single_comment = True
+                    previous_before_comment = previous_non_space
+                elif next2 == '*':
+                    doing_multi_comment = True
+                    previous = next1
+                    next1 = next2
+                    next2 = read(1)
+                else:
+                    in_re = previous_non_space in '(,=:[?!&|' or self.is_return  # literal regular expression
+                    write('/')
+            else:
+                if do_space:
+                    do_space = False
+                    write(' ')
+                if do_newline:
+                    write('\n')
+                    do_newline = False
+
+                write(next1)
+                if not in_re and next1 in "'\"`":
+                    in_quote = next1
+                    quote_buf = []
+
+            previous = next1
+            next1 = next2
+
+            if previous >= '!':
+                previous_non_space = previous
+
+            if previous == '\\':
+                escape_slash_count += 1
+            else:
+                escape_slash_count = 0
+
+if __name__ == '__main__':
+    minifier = JavascriptMinify(sys.stdin, sys.stdout)
+    minifier.minify()
+    sys.stdout.write('\n')
diff --git a/Source/JavaScriptCore/Scripts/lazywriter.py b/Source/JavaScriptCore/Scripts/lazywriter.py
new file mode 100644
index 000000000..f93a2c697
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/lazywriter.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# A writer that only updates file if it actually changed.
+
+
+class LazyFileWriter:
+    def __init__(self, filepath, force_output):
+        self._filepath = filepath
+        self._output = ""
+        self.force_output = force_output
+
+    def write(self, text):
+        self._output += text
+
+    def close(self):
+        text_changed = True
+        self._output = self._output.rstrip() + "\n"
+
+        try:
+            if self.force_output:
+                raise
+
+            read_file = open(self._filepath, "r")
+            old_text = read_file.read()
+            read_file.close()
+            text_changed = old_text != self._output
+        except:
+            # Ignore, just overwrite by default
+            pass
+
+        if text_changed or self.force_output:
+            out_file = open(self._filepath, "w")
+            out_file.write(self._output)
+            out_file.close()
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Combined.js b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Combined.js
new file mode 100644
index 000000000..b45d81ceb
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Combined.js
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function rejectPromise(promise, reason)
+{
+    "use strict";
+
+    var reactions = promise.@promiseRejectReactions;
+    promise.@promiseResult = reason;
+    promise.@promiseFulfillReactions = undefined;
+    promise.@promiseRejectReactions = undefined;
+    promise.@promiseState = @promiseRejected;
+
+    @InspectorInstrumentation.promiseRejected(promise, reason, reactions);
+
+    @triggerPromiseReactions(reactions, reason);
+}
+
+function fulfillPromise(promise, value)
+{
+    "use strict";
+
+    var reactions = promise.@promiseFulfillReactions;
+    promise.@promiseResult = value;
+    promise.@promiseFulfillReactions = undefined;
+    promise.@promiseRejectReactions = undefined;
+    promise.@promiseState = @promiseFulfilled;
+
+    @InspectorInstrumentation.promiseFulfilled(promise, value, reactions);
+
+    @triggerPromiseReactions(reactions, value);
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Separate.js
new file mode 100644
index 000000000..b45d81ceb
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Separate.js
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function rejectPromise(promise, reason)
+{
+    "use strict";
+
+    var reactions = promise.@promiseRejectReactions;
+    promise.@promiseResult = reason;
+    promise.@promiseFulfillReactions = undefined;
+    promise.@promiseRejectReactions = undefined;
+    promise.@promiseState = @promiseRejected;
+
+    @InspectorInstrumentation.promiseRejected(promise, reason, reactions);
+
+    @triggerPromiseReactions(reactions, reason);
+}
+
+function fulfillPromise(promise, value)
+{
+    "use strict";
+
+    var reactions = promise.@promiseFulfillReactions;
+    promise.@promiseResult = value;
+    promise.@promiseFulfillReactions = undefined;
+    promise.@promiseRejectReactions = undefined;
+    promise.@promiseState = @promiseFulfilled;
+
+    @InspectorInstrumentation.promiseFulfilled(promise, value, reactions);
+
+    @triggerPromiseReactions(reactions, value);
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Combined.js b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Combined.js
new file mode 100644
index 000000000..5448b9832
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Combined.js
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+function every(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null)
+        throw new @TypeError("Array.prototype.every requires that |this| not be null");
+    
+    if (this === undefined)
+        throw new @TypeError("Array.prototype.every requires that |this| not be undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        throw new @TypeError("Array.prototype.every callback must be a function");
+    
+    var thisArg = arguments.length > 1 ? arguments[1] : undefined;
+    
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        if (!callback.@call(thisArg, array[i], i, array))
+            return false;
+    }
+    
+    return true;
+}
+
+function forEach(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null)
+        throw new @TypeError("Array.prototype.forEach requires that |this| not be null");
+    
+    if (this === undefined)
+        throw new @TypeError("Array.prototype.forEach requires that |this| not be undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        throw new @TypeError("Array.prototype.forEach callback must be a function");
+    
+    var thisArg = arguments.length > 1 ? arguments[1] : undefined;
+    
+    for (var i = 0; i < length; i++) {
+        if (i in array)
+            callback.@call(thisArg, array[i], i, array);
+    }
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Separate.js
new file mode 100644
index 000000000..5448b9832
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Separate.js
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+function every(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null)
+        throw new @TypeError("Array.prototype.every requires that |this| not be null");
+    
+    if (this === undefined)
+        throw new @TypeError("Array.prototype.every requires that |this| not be undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        throw new @TypeError("Array.prototype.every callback must be a function");
+    
+    var thisArg = arguments.length > 1 ? arguments[1] : undefined;
+    
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        if (!callback.@call(thisArg, array[i], i, array))
+            return false;
+    }
+    
+    return true;
+}
+
+function forEach(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null)
+        throw new @TypeError("Array.prototype.forEach requires that |this| not be null");
+    
+    if (this === undefined)
+        throw new @TypeError("Array.prototype.forEach requires that |this| not be undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        throw new @TypeError("Array.prototype.forEach callback must be a function");
+    
+    var thisArg = arguments.length > 1 ? arguments[1] : undefined;
+    
+    for (var i = 0; i < length; i++) {
+        if (i in array)
+            callback.@call(thisArg, array[i], i, array);
+    }
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Combined.js b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Combined.js
new file mode 100644
index 000000000..9e8c1b449
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Combined.js
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function of(/* items... */)
+{
+    "use strict";
+
+    var length = arguments.length;
+    // TODO: Need isConstructor(this) instead of typeof "function" check.
+    var array = typeof this === 'function' ? new this(length) : new @Array(length);
+    for (var k = 0; k < length; ++k)
+        @putByValDirect(array, k, arguments[k]);
+    array.length = length;
+    return array;
+}
+
+function from(items /*, mapFn, thisArg */)
+{
+    "use strict";
+
+    var thisObj = this;
+
+    var mapFn = arguments.length > 1 ? arguments[1] : undefined;
+
+    var thisArg;
+
+    if (mapFn !== undefined) {
+        if (typeof mapFn !== "function")
+            throw new @TypeError("Array.from requires that the second argument, when provided, be a function");
+
+        if (arguments.length > 2)
+            thisArg = arguments[2];
+    }
+
+    if (items == null)
+        throw new @TypeError("Array.from requires an array-like object - not null or undefined");
+
+    var iteratorMethod = items[@symbolIterator];
+    if (iteratorMethod != null) {
+        if (typeof iteratorMethod !== "function")
+            throw new @TypeError("Array.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function");
+
+        // TODO: Need isConstructor(thisObj) instead of typeof "function" check.
+        var result = (typeof thisObj === "function") ? @Object(new thisObj()) : [];
+
+        var k = 0;
+        var iterator = iteratorMethod.@call(items);
+
+        // Since for-of loop once more looks up the @@iterator property of a given iterable,
+        // it could be observable if the user defines a getter for @@iterator.
+        // To avoid this situation, we define a wrapper object that @@iterator just returns a given iterator.
+        var wrapper = {
+            [@symbolIterator]() {
+                return iterator;
+            }
+        };
+
+        for (var value of wrapper) {
+            if (mapFn)
+                @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+            else
+                @putByValDirect(result, k, value);
+            k += 1;
+        }
+
+        result.length = k;
+        return result;
+    }
+
+    var arrayLike = @Object(items);
+    var arrayLikeLength = @toLength(arrayLike.length);
+
+    // TODO: Need isConstructor(thisObj) instead of typeof "function" check.
+    var result = (typeof thisObj === "function") ? @Object(new thisObj(arrayLikeLength)) : new @Array(arrayLikeLength);
+
+    var k = 0;
+    while (k < arrayLikeLength) {
+        var value = arrayLike[k];
+        if (mapFn)
+            @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+        else
+            @putByValDirect(result, k, value);
+        k += 1;
+    }
+
+    result.length = arrayLikeLength;
+    return result;
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Separate.js
new file mode 100644
index 000000000..9e8c1b449
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Separate.js
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function of(/* items... */)
+{
+    "use strict";
+
+    var length = arguments.length;
+    // TODO: Need isConstructor(this) instead of typeof "function" check.
+    var array = typeof this === 'function' ? new this(length) : new @Array(length);
+    for (var k = 0; k < length; ++k)
+        @putByValDirect(array, k, arguments[k]);
+    array.length = length;
+    return array;
+}
+
+function from(items /*, mapFn, thisArg */)
+{
+    "use strict";
+
+    var thisObj = this;
+
+    var mapFn = arguments.length > 1 ? arguments[1] : undefined;
+
+    var thisArg;
+
+    if (mapFn !== undefined) {
+        if (typeof mapFn !== "function")
+            throw new @TypeError("Array.from requires that the second argument, when provided, be a function");
+
+        if (arguments.length > 2)
+            thisArg = arguments[2];
+    }
+
+    if (items == null)
+        throw new @TypeError("Array.from requires an array-like object - not null or undefined");
+
+    var iteratorMethod = items[@symbolIterator];
+    if (iteratorMethod != null) {
+        if (typeof iteratorMethod !== "function")
+            throw new @TypeError("Array.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function");
+
+        // TODO: Need isConstructor(thisObj) instead of typeof "function" check.
+        var result = (typeof thisObj === "function") ? @Object(new thisObj()) : [];
+
+        var k = 0;
+        var iterator = iteratorMethod.@call(items);
+
+        // Since for-of loop once more looks up the @@iterator property of a given iterable,
+        // it could be observable if the user defines a getter for @@iterator.
+        // To avoid this situation, we define a wrapper object that @@iterator just returns a given iterator.
+        var wrapper = {
+            [@symbolIterator]() {
+                return iterator;
+            }
+        };
+
+        for (var value of wrapper) {
+            if (mapFn)
+                @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+            else
+                @putByValDirect(result, k, value);
+            k += 1;
+        }
+
+        result.length = k;
+        return result;
+    }
+
+    var arrayLike = @Object(items);
+    var arrayLikeLength = @toLength(arrayLike.length);
+
+    // TODO: Need isConstructor(thisObj) instead of typeof "function" check.
+    var result = (typeof thisObj === "function") ? @Object(new thisObj(arrayLikeLength)) : new @Array(arrayLikeLength);
+
+    var k = 0;
+    while (k < arrayLikeLength) {
+        var value = arrayLike[k];
+        if (mapFn)
+            @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+        else
+            @putByValDirect(result, k, value);
+        k += 1;
+    }
+
+    result.length = arrayLikeLength;
+    return result;
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-InternalClashingNames-Combined.js b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-InternalClashingNames-Combined.js
new file mode 100644
index 000000000..0a436cf10
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/JavaScriptCore-InternalClashingNames-Combined.js
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CANON INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL CANON INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @internal
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
+
+// Testing clashing names (emulating function with same names in different files)
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-AnotherGuardedInternalBuiltin-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-AnotherGuardedInternalBuiltin-Separate.js
new file mode 100644
index 000000000..c5fae3fe2
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-AnotherGuardedInternalBuiltin-Separate.js
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(FETCH_API)
+// @internal
+
+function letsFetch()
+{
+   "use strict";
+
+    return @fetchRequest(new @Request("yes"));
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-ArbitraryConditionalGuard-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-ArbitraryConditionalGuard-Separate.js
new file mode 100644
index 000000000..c808b3c7f
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-ArbitraryConditionalGuard-Separate.js
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(STREAMS_API) || USE(CF)
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-DuplicateFlagAnnotation-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-DuplicateFlagAnnotation-Separate.js
new file mode 100644
index 000000000..73e7c71b9
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-DuplicateFlagAnnotation-Separate.js
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @internal
+// @internal
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-DuplicateKeyValueAnnotation-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-DuplicateKeyValueAnnotation-Separate.js
new file mode 100644
index 000000000..6d6fe604c
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-DuplicateKeyValueAnnotation-Separate.js
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(STREAMS_API)
+// @conditional=USE(CF)
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-GuardedBuiltin-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-GuardedBuiltin-Separate.js
new file mode 100644
index 000000000..2acec589d
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-GuardedBuiltin-Separate.js
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(STREAMS_API)
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-GuardedInternalBuiltin-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-GuardedInternalBuiltin-Separate.js
new file mode 100644
index 000000000..e95e0c2d5
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-GuardedInternalBuiltin-Separate.js
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+// @internal
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-UnguardedBuiltin-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-UnguardedBuiltin-Separate.js
new file mode 100644
index 000000000..9647f2bdd
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-UnguardedBuiltin-Separate.js
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-xmlCasingTest-Separate.js b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-xmlCasingTest-Separate.js
new file mode 100644
index 000000000..550c89e02
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/WebCore-xmlCasingTest-Separate.js
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(STREAMS_API)
+// @internal
+
+function xmlCasingTest(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
+
+
+function cssCasingTest(stream, reason)
+{
+    "use strict";
+
+    if (stream.@state === @readableStreamClosed)
+        return Promise.resolve();
+    if (stream.@state === @readableStreamErrored)
+        return Promise.reject(stream.@storedError);
+    stream.@queue = [];
+    @finishClosingReadableStream(stream);
+    return @promiseInvokeOrNoop(stream.@underlyingSource, "cancel", [reason]).then(function() { });
+}
+
+
+function urlCasingTest(object, key, args)
+{
+    "use strict";
+
+    try {
+        var method = object[key];
+        if (typeof method === "undefined")
+            return Promise.resolve();
+        var result = method.@apply(object, args);
+        return Promise.resolve(result);
+    }
+    catch(error) {
+        return Promise.reject(error);
+    }
+}
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result
new file mode 100644
index 000000000..9bb21d603
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result
@@ -0,0 +1,161 @@
+### Begin File: JSCBuiltins.h
+/*
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+namespace JSC {
+class FunctionExecutable;
+class VM;
+
+enum class ConstructAbility : unsigned;
+}
+
+namespace JSC {
+
+/* Builtin.Promise */
+extern const char* s_builtinPromiseRejectPromiseCode;
+extern const int s_builtinPromiseRejectPromiseCodeLength;
+extern const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility;
+extern const char* s_builtinPromiseFulfillPromiseCode;
+extern const int s_builtinPromiseFulfillPromiseCodeLength;
+extern const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTINPROMISE_BUILTIN_DATA(macro) \
+    macro(rejectPromise, builtinPromiseRejectPromise, 2) \
+    macro(fulfillPromise, builtinPromiseFulfillPromise, 2) \
+
+#define JSC_FOREACH_BUILTIN_CODE(macro) \
+    macro(builtinPromiseRejectPromiseCode, rejectPromise, s_builtinPromiseRejectPromiseCodeLength) \
+    macro(builtinPromiseFulfillPromiseCode, fulfillPromise, s_builtinPromiseFulfillPromiseCodeLength) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_NAME(macro) \
+    macro(fulfillPromise) \
+    macro(rejectPromise) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_PRIVATE_GLOBAL_NAME(macro) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: JSCBuiltins.h
+
+### Begin File: JSCBuiltins.cpp
+/*
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "JSCBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPromiseRejectPromiseCodeLength = 410;
+static const JSC::Intrinsic s_builtinPromiseRejectPromiseCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPromiseRejectPromiseCode =
+    "(function (promise, reason)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var reactions = promise.@promiseRejectReactions;\n" \
+    "    promise.@promiseResult = reason;\n" \
+    "    promise.@promiseFulfillReactions = undefined;\n" \
+    "    promise.@promiseRejectReactions = undefined;\n" \
+    "    promise.@promiseState = @promiseRejected;\n" \
+    "    @InspectorInstrumentation.promiseRejected(promise, reason, reactions);\n" \
+    "    @triggerPromiseReactions(reactions, reason);\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPromiseFulfillPromiseCodeLength = 409;
+static const JSC::Intrinsic s_builtinPromiseFulfillPromiseCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPromiseFulfillPromiseCode =
+    "(function (promise, value)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var reactions = promise.@promiseFulfillReactions;\n" \
+    "    promise.@promiseResult = value;\n" \
+    "    promise.@promiseFulfillReactions = undefined;\n" \
+    "    promise.@promiseRejectReactions = undefined;\n" \
+    "    promise.@promiseState = @promiseFulfilled;\n" \
+    "    @InspectorInstrumentation.promiseFulfilled(promise, value, reactions);\n" \
+    "    @triggerPromiseReactions(reactions, value);\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: JSCBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result
new file mode 100644
index 000000000..87fdaee38
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result
@@ -0,0 +1,160 @@
+### Begin File: BuiltinPromiseBuiltins.h
+/*
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace JSC {
+
+/* Builtin.Promise */
+extern const char* s_builtinPromiseRejectPromiseCode;
+extern const int s_builtinPromiseRejectPromiseCodeLength;
+extern const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility;
+extern const char* s_builtinPromiseFulfillPromiseCode;
+extern const int s_builtinPromiseFulfillPromiseCodeLength;
+extern const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTIN_PROMISE_BUILTIN_DATA(macro) \
+    macro(rejectPromise, builtinPromiseRejectPromise, 2) \
+    macro(fulfillPromise, builtinPromiseFulfillPromise, 2) \
+
+#define JSC_BUILTIN_BUILTIN_PROMISE_REJECTPROMISE 1
+#define JSC_BUILTIN_BUILTIN_PROMISE_FULFILLPROMISE 1
+
+#define JSC_FOREACH_BUILTIN.PROMISE_BUILTIN_CODE(macro) \
+    macro(builtinPromiseRejectPromiseCode, rejectPromise, s_builtinPromiseRejectPromiseCodeLength) \
+    macro(builtinPromiseFulfillPromiseCode, fulfillPromise, s_builtinPromiseFulfillPromiseCodeLength) \
+
+#define JSC_FOREACH_BUILTIN.PROMISE_BUILTIN_FUNCTION_NAME(macro) \
+    macro(fulfillPromise) \
+    macro(rejectPromise) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN.PROMISE_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: BuiltinPromiseBuiltins.h
+
+### Begin File: BuiltinPromiseBuiltins.cpp
+/*
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "BuiltinPromiseBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPromiseRejectPromiseCodeLength = 410;
+static const JSC::Intrinsic s_builtinPromiseRejectPromiseCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPromiseRejectPromiseCode =
+    "(function (promise, reason)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var reactions = promise.@promiseRejectReactions;\n" \
+    "    promise.@promiseResult = reason;\n" \
+    "    promise.@promiseFulfillReactions = undefined;\n" \
+    "    promise.@promiseRejectReactions = undefined;\n" \
+    "    promise.@promiseState = @promiseRejected;\n" \
+    "    @InspectorInstrumentation.promiseRejected(promise, reason, reactions);\n" \
+    "    @triggerPromiseReactions(reactions, reason);\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPromiseFulfillPromiseCodeLength = 409;
+static const JSC::Intrinsic s_builtinPromiseFulfillPromiseCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPromiseFulfillPromiseCode =
+    "(function (promise, value)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var reactions = promise.@promiseFulfillReactions;\n" \
+    "    promise.@promiseResult = value;\n" \
+    "    promise.@promiseFulfillReactions = undefined;\n" \
+    "    promise.@promiseRejectReactions = undefined;\n" \
+    "    promise.@promiseState = @promiseFulfilled;\n" \
+    "    @InspectorInstrumentation.promiseFulfilled(promise, value, reactions);\n" \
+    "    @triggerPromiseReactions(reactions, value);\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN.PROMISE_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: BuiltinPromiseBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result
new file mode 100644
index 000000000..6bf696fdd
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result
@@ -0,0 +1,185 @@
+### Begin File: JSCBuiltins.h
+/*
+ * Copyright (c) 2014, 2015, 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+namespace JSC {
+class FunctionExecutable;
+class VM;
+
+enum class ConstructAbility : unsigned;
+}
+
+namespace JSC {
+
+/* Builtin.prototype */
+extern const char* s_builtinPrototypeEveryCode;
+extern const int s_builtinPrototypeEveryCodeLength;
+extern const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility;
+extern const char* s_builtinPrototypeForEachCode;
+extern const int s_builtinPrototypeForEachCodeLength;
+extern const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTINPROTOTYPE_BUILTIN_DATA(macro) \
+    macro(every, builtinPrototypeEvery, 1) \
+    macro(forEach, builtinPrototypeForEach, 1) \
+
+#define JSC_FOREACH_BUILTIN_CODE(macro) \
+    macro(builtinPrototypeEveryCode, every, s_builtinPrototypeEveryCodeLength) \
+    macro(builtinPrototypeForEachCode, forEach, s_builtinPrototypeForEachCodeLength) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_NAME(macro) \
+    macro(every) \
+    macro(forEach) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_PRIVATE_GLOBAL_NAME(macro) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: JSCBuiltins.h
+
+### Begin File: JSCBuiltins.cpp
+/*
+ * Copyright (c) 2014, 2015, 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "JSCBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPrototypeEveryCodeLength = 760;
+static const JSC::Intrinsic s_builtinPrototypeEveryCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPrototypeEveryCode =
+    "(function (callback )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    if (this === null)\n" \
+    "        throw new @TypeError(\"Array.prototype.every requires that |this| not be null\");\n" \
+    "    \n" \
+    "    if (this === undefined)\n" \
+    "        throw new @TypeError(\"Array.prototype.every requires that |this| not be undefined\");\n" \
+    "    \n" \
+    "    var array = @Object(this);\n" \
+    "    var length = @toLength(array.length);\n" \
+    "    if (typeof callback !== \"function\")\n" \
+    "        throw new @TypeError(\"Array.prototype.every callback must be a function\");\n" \
+    "    \n" \
+    "    var thisArg = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    \n" \
+    "    for (var i = 0; i < length; i++) {\n" \
+    "        if (!(i in array))\n" \
+    "            continue;\n" \
+    "        if (!callback.@call(thisArg, array[i], i, array))\n" \
+    "            return false;\n" \
+    "    }\n" \
+    "    \n" \
+    "    return true;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPrototypeForEachCodeLength = 692;
+static const JSC::Intrinsic s_builtinPrototypeForEachCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPrototypeForEachCode =
+    "(function (callback )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    if (this === null)\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach requires that |this| not be null\");\n" \
+    "    \n" \
+    "    if (this === undefined)\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach requires that |this| not be undefined\");\n" \
+    "    \n" \
+    "    var array = @Object(this);\n" \
+    "    var length = @toLength(array.length);\n" \
+    "    if (typeof callback !== \"function\")\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach callback must be a function\");\n" \
+    "    \n" \
+    "    var thisArg = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    \n" \
+    "    for (var i = 0; i < length; i++) {\n" \
+    "        if (i in array)\n" \
+    "            callback.@call(thisArg, array[i], i, array);\n" \
+    "    }\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: JSCBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result
new file mode 100644
index 000000000..d0c8f26ef
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result
@@ -0,0 +1,184 @@
+### Begin File: BuiltinPrototypeBuiltins.h
+/*
+ * Copyright (c) 2014, 2015, 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace JSC {
+
+/* Builtin.prototype */
+extern const char* s_builtinPrototypeEveryCode;
+extern const int s_builtinPrototypeEveryCodeLength;
+extern const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility;
+extern const char* s_builtinPrototypeForEachCode;
+extern const int s_builtinPrototypeForEachCodeLength;
+extern const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTIN_PROTOTYPE_BUILTIN_DATA(macro) \
+    macro(every, builtinPrototypeEvery, 1) \
+    macro(forEach, builtinPrototypeForEach, 1) \
+
+#define JSC_BUILTIN_BUILTIN_PROTOTYPE_EVERY 1
+#define JSC_BUILTIN_BUILTIN_PROTOTYPE_FOREACH 1
+
+#define JSC_FOREACH_BUILTIN.PROTOTYPE_BUILTIN_CODE(macro) \
+    macro(builtinPrototypeEveryCode, every, s_builtinPrototypeEveryCodeLength) \
+    macro(builtinPrototypeForEachCode, forEach, s_builtinPrototypeForEachCodeLength) \
+
+#define JSC_FOREACH_BUILTIN.PROTOTYPE_BUILTIN_FUNCTION_NAME(macro) \
+    macro(every) \
+    macro(forEach) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN.PROTOTYPE_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: BuiltinPrototypeBuiltins.h
+
+### Begin File: BuiltinPrototypeBuiltins.cpp
+/*
+ * Copyright (c) 2014, 2015, 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "BuiltinPrototypeBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPrototypeEveryCodeLength = 760;
+static const JSC::Intrinsic s_builtinPrototypeEveryCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPrototypeEveryCode =
+    "(function (callback )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    if (this === null)\n" \
+    "        throw new @TypeError(\"Array.prototype.every requires that |this| not be null\");\n" \
+    "    \n" \
+    "    if (this === undefined)\n" \
+    "        throw new @TypeError(\"Array.prototype.every requires that |this| not be undefined\");\n" \
+    "    \n" \
+    "    var array = @Object(this);\n" \
+    "    var length = @toLength(array.length);\n" \
+    "    if (typeof callback !== \"function\")\n" \
+    "        throw new @TypeError(\"Array.prototype.every callback must be a function\");\n" \
+    "    \n" \
+    "    var thisArg = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    \n" \
+    "    for (var i = 0; i < length; i++) {\n" \
+    "        if (!(i in array))\n" \
+    "            continue;\n" \
+    "        if (!callback.@call(thisArg, array[i], i, array))\n" \
+    "            return false;\n" \
+    "    }\n" \
+    "    \n" \
+    "    return true;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPrototypeForEachCodeLength = 692;
+static const JSC::Intrinsic s_builtinPrototypeForEachCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPrototypeForEachCode =
+    "(function (callback )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    if (this === null)\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach requires that |this| not be null\");\n" \
+    "    \n" \
+    "    if (this === undefined)\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach requires that |this| not be undefined\");\n" \
+    "    \n" \
+    "    var array = @Object(this);\n" \
+    "    var length = @toLength(array.length);\n" \
+    "    if (typeof callback !== \"function\")\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach callback must be a function\");\n" \
+    "    \n" \
+    "    var thisArg = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    \n" \
+    "    for (var i = 0; i < length; i++) {\n" \
+    "        if (i in array)\n" \
+    "            callback.@call(thisArg, array[i], i, array);\n" \
+    "    }\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN.PROTOTYPE_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: BuiltinPrototypeBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result
new file mode 100644
index 000000000..023a8298c
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result
@@ -0,0 +1,198 @@
+### Begin File: JSCBuiltins.h
+/*
+ * Copyright (c) 2015, 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+namespace JSC {
+class FunctionExecutable;
+class VM;
+
+enum class ConstructAbility : unsigned;
+}
+
+namespace JSC {
+
+/* BuiltinConstructor */
+extern const char* s_builtinConstructorOfCode;
+extern const int s_builtinConstructorOfCodeLength;
+extern const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility;
+extern const char* s_builtinConstructorFromCode;
+extern const int s_builtinConstructorFromCodeLength;
+extern const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_DATA(macro) \
+    macro(of, builtinConstructorOf, 0) \
+    macro(from, builtinConstructorFrom, 1) \
+
+#define JSC_FOREACH_BUILTIN_CODE(macro) \
+    macro(builtinConstructorOfCode, of, s_builtinConstructorOfCodeLength) \
+    macro(builtinConstructorFromCode, from, s_builtinConstructorFromCodeLength) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_NAME(macro) \
+    macro(from) \
+    macro(of) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_PRIVATE_GLOBAL_NAME(macro) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: JSCBuiltins.h
+
+### Begin File: JSCBuiltins.cpp
+/*
+ * Copyright (c) 2015, 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "JSCBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinConstructorOfCodeLength = 286;
+static const JSC::Intrinsic s_builtinConstructorOfCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinConstructorOfCode =
+    "(function ()\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var length = arguments.length;\n" \
+    "    var array = typeof this === 'function' ? new this(length) : new @Array(length);\n" \
+    "    for (var k = 0; k < length; ++k)\n" \
+    "        @putByValDirect(array, k, arguments[k]);\n" \
+    "    array.length = length;\n" \
+    "    return array;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinConstructorFromCodeLength = 1979;
+static const JSC::Intrinsic s_builtinConstructorFromCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinConstructorFromCode =
+    "(function (items )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var thisObj = this;\n" \
+    "    var mapFn = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    var thisArg;\n" \
+    "    if (mapFn !== undefined) {\n" \
+    "        if (typeof mapFn !== \"function\")\n" \
+    "            throw new @TypeError(\"Array.from requires that the second argument, when provided, be a function\");\n" \
+    "        if (arguments.length > 2)\n" \
+    "            thisArg = arguments[2];\n" \
+    "    }\n" \
+    "    if (items == null)\n" \
+    "        throw new @TypeError(\"Array.from requires an array-like object - not null or undefined\");\n" \
+    "    var iteratorMethod = items[@symbolIterator];\n" \
+    "    if (iteratorMethod != null) {\n" \
+    "        if (typeof iteratorMethod !== \"function\")\n" \
+    "            throw new @TypeError(\"Array.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function\");\n" \
+    "        var result = (typeof thisObj === \"function\") ? @Object(new thisObj()) : [];\n" \
+    "        var k = 0;\n" \
+    "        var iterator = iteratorMethod.@call(items);\n" \
+    "        var wrapper = {\n" \
+    "            [@symbolIterator]() {\n" \
+    "                return iterator;\n" \
+    "            }\n" \
+    "        };\n" \
+    "        for (var value of wrapper) {\n" \
+    "            if (mapFn)\n" \
+    "                @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));\n" \
+    "            else\n" \
+    "                @putByValDirect(result, k, value);\n" \
+    "            k += 1;\n" \
+    "        }\n" \
+    "        result.length = k;\n" \
+    "        return result;\n" \
+    "    }\n" \
+    "    var arrayLike = @Object(items);\n" \
+    "    var arrayLikeLength = @toLength(arrayLike.length);\n" \
+    "    var result = (typeof thisObj === \"function\") ? @Object(new thisObj(arrayLikeLength)) : new @Array(arrayLikeLength);\n" \
+    "    var k = 0;\n" \
+    "    while (k < arrayLikeLength) {\n" \
+    "        var value = arrayLike[k];\n" \
+    "        if (mapFn)\n" \
+    "            @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));\n" \
+    "        else\n" \
+    "            @putByValDirect(result, k, value);\n" \
+    "        k += 1;\n" \
+    "    }\n" \
+    "    result.length = arrayLikeLength;\n" \
+    "    return result;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: JSCBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result
new file mode 100644
index 000000000..8000b6963
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result
@@ -0,0 +1,197 @@
+### Begin File: BuiltinConstructorBuiltins.h
+/*
+ * Copyright (c) 2015, 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace JSC {
+
+/* BuiltinConstructor */
+extern const char* s_builtinConstructorOfCode;
+extern const int s_builtinConstructorOfCodeLength;
+extern const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility;
+extern const char* s_builtinConstructorFromCode;
+extern const int s_builtinConstructorFromCodeLength;
+extern const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_DATA(macro) \
+    macro(of, builtinConstructorOf, 0) \
+    macro(from, builtinConstructorFrom, 1) \
+
+#define JSC_BUILTIN_BUILTINCONSTRUCTOR_OF 1
+#define JSC_BUILTIN_BUILTINCONSTRUCTOR_FROM 1
+
+#define JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_CODE(macro) \
+    macro(builtinConstructorOfCode, of, s_builtinConstructorOfCodeLength) \
+    macro(builtinConstructorFromCode, from, s_builtinConstructorFromCodeLength) \
+
+#define JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_FUNCTION_NAME(macro) \
+    macro(from) \
+    macro(of) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: BuiltinConstructorBuiltins.h
+
+### Begin File: BuiltinConstructorBuiltins.cpp
+/*
+ * Copyright (c) 2015, 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "BuiltinConstructorBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinConstructorOfCodeLength = 286;
+static const JSC::Intrinsic s_builtinConstructorOfCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinConstructorOfCode =
+    "(function ()\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var length = arguments.length;\n" \
+    "    var array = typeof this === 'function' ? new this(length) : new @Array(length);\n" \
+    "    for (var k = 0; k < length; ++k)\n" \
+    "        @putByValDirect(array, k, arguments[k]);\n" \
+    "    array.length = length;\n" \
+    "    return array;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinConstructorFromCodeLength = 1979;
+static const JSC::Intrinsic s_builtinConstructorFromCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinConstructorFromCode =
+    "(function (items )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var thisObj = this;\n" \
+    "    var mapFn = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    var thisArg;\n" \
+    "    if (mapFn !== undefined) {\n" \
+    "        if (typeof mapFn !== \"function\")\n" \
+    "            throw new @TypeError(\"Array.from requires that the second argument, when provided, be a function\");\n" \
+    "        if (arguments.length > 2)\n" \
+    "            thisArg = arguments[2];\n" \
+    "    }\n" \
+    "    if (items == null)\n" \
+    "        throw new @TypeError(\"Array.from requires an array-like object - not null or undefined\");\n" \
+    "    var iteratorMethod = items[@symbolIterator];\n" \
+    "    if (iteratorMethod != null) {\n" \
+    "        if (typeof iteratorMethod !== \"function\")\n" \
+    "            throw new @TypeError(\"Array.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function\");\n" \
+    "        var result = (typeof thisObj === \"function\") ? @Object(new thisObj()) : [];\n" \
+    "        var k = 0;\n" \
+    "        var iterator = iteratorMethod.@call(items);\n" \
+    "        var wrapper = {\n" \
+    "            [@symbolIterator]() {\n" \
+    "                return iterator;\n" \
+    "            }\n" \
+    "        };\n" \
+    "        for (var value of wrapper) {\n" \
+    "            if (mapFn)\n" \
+    "                @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));\n" \
+    "            else\n" \
+    "                @putByValDirect(result, k, value);\n" \
+    "            k += 1;\n" \
+    "        }\n" \
+    "        result.length = k;\n" \
+    "        return result;\n" \
+    "    }\n" \
+    "    var arrayLike = @Object(items);\n" \
+    "    var arrayLikeLength = @toLength(arrayLike.length);\n" \
+    "    var result = (typeof thisObj === \"function\") ? @Object(new thisObj(arrayLikeLength)) : new @Array(arrayLikeLength);\n" \
+    "    var k = 0;\n" \
+    "    while (k < arrayLikeLength) {\n" \
+    "        var value = arrayLike[k];\n" \
+    "        if (mapFn)\n" \
+    "            @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));\n" \
+    "        else\n" \
+    "            @putByValDirect(result, k, value);\n" \
+    "        k += 1;\n" \
+    "    }\n" \
+    "    result.length = arrayLikeLength;\n" \
+    "    return result;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: BuiltinConstructorBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-error b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-error
new file mode 100644
index 000000000..eb147c40b
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-error
@@ -0,0 +1 @@
+ERROR: There are several internal functions with the same name. Private identifiers may clash.
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result
new file mode 100644
index 000000000..8cbb539be
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result
@@ -0,0 +1,148 @@
+### Begin File: JSCBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+namespace JSC {
+class FunctionExecutable;
+class VM;
+
+enum class ConstructAbility : unsigned;
+}
+
+namespace JSC {
+
+/* InternalClashingNames */
+extern const char* s_internalClashingNamesIsReadableStreamLockedCode;
+extern const int s_internalClashingNamesIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility;
+extern const char* s_internalClashingNamesIsReadableStreamLockedCode;
+extern const int s_internalClashingNamesIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility;
+
+#define JSC_FOREACH_INTERNALCLASHINGNAMES_BUILTIN_DATA(macro) \
+    macro(isReadableStreamLocked, internalClashingNamesIsReadableStreamLocked, 1) \
+    macro(isReadableStreamLocked, internalClashingNamesIsReadableStreamLocked, 1) \
+
+#define JSC_FOREACH_BUILTIN_CODE(macro) \
+    macro(internalClashingNamesIsReadableStreamLockedCode, isReadableStreamLocked, s_internalClashingNamesIsReadableStreamLockedCodeLength) \
+    macro(internalClashingNamesIsReadableStreamLockedCode, isReadableStreamLocked, s_internalClashingNamesIsReadableStreamLockedCodeLength) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_NAME(macro) \
+    macro(isReadableStreamLocked) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_PRIVATE_GLOBAL_NAME(macro) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: JSCBuiltins.h
+
+### Begin File: JSCBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "JSCBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_internalClashingNamesIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_internalClashingNamesIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_internalClashingNamesIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_internalClashingNamesIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_internalClashingNamesIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_internalClashingNamesIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: JSCBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result
new file mode 100644
index 000000000..4ccc65c08
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result
@@ -0,0 +1,227 @@
+### Begin File: AnotherGuardedInternalBuiltinBuiltins.h
+/*
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#if ENABLE(FETCH_API)
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* AnotherGuardedInternalBuiltin */
+extern const char* s_anotherGuardedInternalBuiltinLetsFetchCode;
+extern const int s_anotherGuardedInternalBuiltinLetsFetchCodeLength;
+extern const JSC::ConstructAbility s_anotherGuardedInternalBuiltinLetsFetchCodeConstructAbility;
+
+#define WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_DATA(macro) \
+    macro(letsFetch, anotherGuardedInternalBuiltinLetsFetch, 0) \
+
+#define WEBCORE_BUILTIN_ANOTHERGUARDEDINTERNALBUILTIN_LETSFETCH 1
+
+#define WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(macro) \
+    macro(anotherGuardedInternalBuiltinLetsFetchCode, letsFetch, s_anotherGuardedInternalBuiltinLetsFetchCodeLength) \
+
+#define WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(macro) \
+    macro(letsFetch) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class AnotherGuardedInternalBuiltinBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit AnotherGuardedInternalBuiltinBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length), { }))
+        WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* AnotherGuardedInternalBuiltinBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void AnotherGuardedInternalBuiltinBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+class AnotherGuardedInternalBuiltinBuiltinFunctions {
+public:
+    explicit AnotherGuardedInternalBuiltinBuiltinFunctions(JSC::VM& vm) : m_vm(vm) { }
+
+    void init(JSC::JSGlobalObject&);
+    void visit(JSC::SlotVisitor&);
+
+public:
+    JSC::VM& m_vm;
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(functionName) \
+    JSC::WriteBarrier m_##functionName##Function;
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+};
+
+inline void AnotherGuardedInternalBuiltinBuiltinFunctions::init(JSC::JSGlobalObject& globalObject)
+{
+#define EXPORT_FUNCTION(codeName, functionName, length)\
+    m_##functionName##Function.set(m_vm, &globalObject, JSC::JSFunction::createBuiltinFunction(m_vm, codeName##Generator(m_vm), &globalObject));
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(EXPORT_FUNCTION)
+#undef EXPORT_FUNCTION
+}
+
+inline void AnotherGuardedInternalBuiltinBuiltinFunctions::visit(JSC::SlotVisitor& visitor)
+{
+#define VISIT_FUNCTION(name) visitor.append(m_##name##Function);
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(VISIT_FUNCTION)
+#undef VISIT_FUNCTION
+}
+
+
+} // namespace WebCore
+
+#endif // ENABLE(FETCH_API)
+### End File: AnotherGuardedInternalBuiltinBuiltins.h
+
+### Begin File: AnotherGuardedInternalBuiltinBuiltins.cpp
+/*
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "AnotherGuardedInternalBuiltinBuiltins.h"
+
+#if ENABLE(FETCH_API)
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_anotherGuardedInternalBuiltinLetsFetchCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_anotherGuardedInternalBuiltinLetsFetchCodeLength = 82;
+static const JSC::Intrinsic s_anotherGuardedInternalBuiltinLetsFetchCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_anotherGuardedInternalBuiltinLetsFetchCode =
+    "(function ()\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return @fetchRequest(new @Request(\"yes\"));\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().anotherGuardedInternalBuiltinBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().anotherGuardedInternalBuiltinBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+
+#endif // ENABLE(FETCH_API)
+
+### End File: AnotherGuardedInternalBuiltinBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result
new file mode 100644
index 000000000..e626d670c
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result
@@ -0,0 +1,197 @@
+### Begin File: ArbitraryConditionalGuardBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#if ENABLE(STREAMS_API) || USE(CF)
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* ArbitraryConditionalGuard */
+extern const char* s_arbitraryConditionalGuardIsReadableStreamLockedCode;
+extern const int s_arbitraryConditionalGuardIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_arbitraryConditionalGuardIsReadableStreamLockedCodeConstructAbility;
+
+#define WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_DATA(macro) \
+    macro(isReadableStreamLocked, arbitraryConditionalGuardIsReadableStreamLocked, 1) \
+
+#define WEBCORE_BUILTIN_ARBITRARYCONDITIONALGUARD_ISREADABLESTREAMLOCKED 1
+
+#define WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(macro) \
+    macro(arbitraryConditionalGuardIsReadableStreamLockedCode, isReadableStreamLocked, s_arbitraryConditionalGuardIsReadableStreamLockedCodeLength) \
+
+#define WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_FUNCTION_NAME(macro) \
+    macro(isReadableStreamLocked) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class ArbitraryConditionalGuardBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit ArbitraryConditionalGuardBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length), { }))
+        WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* ArbitraryConditionalGuardBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void ArbitraryConditionalGuardBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API) || USE(CF)
+### End File: ArbitraryConditionalGuardBuiltins.h
+
+### Begin File: ArbitraryConditionalGuardBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "ArbitraryConditionalGuardBuiltins.h"
+
+#if ENABLE(STREAMS_API) || USE(CF)
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_arbitraryConditionalGuardIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_arbitraryConditionalGuardIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_arbitraryConditionalGuardIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_arbitraryConditionalGuardIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().arbitraryConditionalGuardBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().arbitraryConditionalGuardBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API) || USE(CF)
+
+### End File: ArbitraryConditionalGuardBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-DuplicateFlagAnnotation-Separate.js-error b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-DuplicateFlagAnnotation-Separate.js-error
new file mode 100644
index 000000000..b15152e63
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-DuplicateFlagAnnotation-Separate.js-error
@@ -0,0 +1 @@
+ERROR: Duplicate annotation found: internal
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-DuplicateKeyValueAnnotation-Separate.js-error b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-DuplicateKeyValueAnnotation-Separate.js-error
new file mode 100644
index 000000000..f1b429e27
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-DuplicateKeyValueAnnotation-Separate.js-error
@@ -0,0 +1 @@
+ERROR: Duplicate annotation found: conditional
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result
new file mode 100644
index 000000000..0747487c0
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result
@@ -0,0 +1,197 @@
+### Begin File: GuardedBuiltinBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#if ENABLE(STREAMS_API)
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* GuardedBuiltin */
+extern const char* s_guardedBuiltinIsReadableStreamLockedCode;
+extern const int s_guardedBuiltinIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_guardedBuiltinIsReadableStreamLockedCodeConstructAbility;
+
+#define WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_DATA(macro) \
+    macro(isReadableStreamLocked, guardedBuiltinIsReadableStreamLocked, 1) \
+
+#define WEBCORE_BUILTIN_GUARDEDBUILTIN_ISREADABLESTREAMLOCKED 1
+
+#define WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(macro) \
+    macro(guardedBuiltinIsReadableStreamLockedCode, isReadableStreamLocked, s_guardedBuiltinIsReadableStreamLockedCodeLength) \
+
+#define WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(macro) \
+    macro(isReadableStreamLocked) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class GuardedBuiltinBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit GuardedBuiltinBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length), { }))
+        WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* GuardedBuiltinBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void GuardedBuiltinBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API)
+### End File: GuardedBuiltinBuiltins.h
+
+### Begin File: GuardedBuiltinBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "GuardedBuiltinBuiltins.h"
+
+#if ENABLE(STREAMS_API)
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_guardedBuiltinIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_guardedBuiltinIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_guardedBuiltinIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_guardedBuiltinIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().guardedBuiltinBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().guardedBuiltinBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API)
+
+### End File: GuardedBuiltinBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result
new file mode 100644
index 000000000..9d83dee7c
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result
@@ -0,0 +1,229 @@
+### Begin File: GuardedInternalBuiltinBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* GuardedInternalBuiltin */
+extern const char* s_guardedInternalBuiltinIsReadableStreamLockedCode;
+extern const int s_guardedInternalBuiltinIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_guardedInternalBuiltinIsReadableStreamLockedCodeConstructAbility;
+
+#define WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_DATA(macro) \
+    macro(isReadableStreamLocked, guardedInternalBuiltinIsReadableStreamLocked, 1) \
+
+#define WEBCORE_BUILTIN_GUARDEDINTERNALBUILTIN_ISREADABLESTREAMLOCKED 1
+
+#define WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(macro) \
+    macro(guardedInternalBuiltinIsReadableStreamLockedCode, isReadableStreamLocked, s_guardedInternalBuiltinIsReadableStreamLockedCodeLength) \
+
+#define WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(macro) \
+    macro(isReadableStreamLocked) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class GuardedInternalBuiltinBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit GuardedInternalBuiltinBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length), { }))
+        WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* GuardedInternalBuiltinBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void GuardedInternalBuiltinBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+class GuardedInternalBuiltinBuiltinFunctions {
+public:
+    explicit GuardedInternalBuiltinBuiltinFunctions(JSC::VM& vm) : m_vm(vm) { }
+
+    void init(JSC::JSGlobalObject&);
+    void visit(JSC::SlotVisitor&);
+
+public:
+    JSC::VM& m_vm;
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(functionName) \
+    JSC::WriteBarrier m_##functionName##Function;
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+};
+
+inline void GuardedInternalBuiltinBuiltinFunctions::init(JSC::JSGlobalObject& globalObject)
+{
+#define EXPORT_FUNCTION(codeName, functionName, length)\
+    m_##functionName##Function.set(m_vm, &globalObject, JSC::JSFunction::createBuiltinFunction(m_vm, codeName##Generator(m_vm), &globalObject));
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(EXPORT_FUNCTION)
+#undef EXPORT_FUNCTION
+}
+
+inline void GuardedInternalBuiltinBuiltinFunctions::visit(JSC::SlotVisitor& visitor)
+{
+#define VISIT_FUNCTION(name) visitor.append(m_##name##Function);
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(VISIT_FUNCTION)
+#undef VISIT_FUNCTION
+}
+
+
+} // namespace WebCore
+
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+### End File: GuardedInternalBuiltinBuiltins.h
+
+### Begin File: GuardedInternalBuiltinBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "GuardedInternalBuiltinBuiltins.h"
+
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_guardedInternalBuiltinIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_guardedInternalBuiltinIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_guardedInternalBuiltinIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_guardedInternalBuiltinIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().guardedInternalBuiltinBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().guardedInternalBuiltinBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+
+### End File: GuardedInternalBuiltinBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result
new file mode 100644
index 000000000..0352b8cc7
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result
@@ -0,0 +1,188 @@
+### Begin File: UnguardedBuiltinBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* UnguardedBuiltin */
+extern const char* s_unguardedBuiltinIsReadableStreamLockedCode;
+extern const int s_unguardedBuiltinIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_unguardedBuiltinIsReadableStreamLockedCodeConstructAbility;
+
+#define WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_DATA(macro) \
+    macro(isReadableStreamLocked, unguardedBuiltinIsReadableStreamLocked, 1) \
+
+#define WEBCORE_BUILTIN_UNGUARDEDBUILTIN_ISREADABLESTREAMLOCKED 1
+
+#define WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(macro) \
+    macro(unguardedBuiltinIsReadableStreamLockedCode, isReadableStreamLocked, s_unguardedBuiltinIsReadableStreamLockedCodeLength) \
+
+#define WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(macro) \
+    macro(isReadableStreamLocked) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class UnguardedBuiltinBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit UnguardedBuiltinBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length), { }))
+        WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* UnguardedBuiltinBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void UnguardedBuiltinBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+} // namespace WebCore
+### End File: UnguardedBuiltinBuiltins.h
+
+### Begin File: UnguardedBuiltinBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "UnguardedBuiltinBuiltins.h"
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_unguardedBuiltinIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_unguardedBuiltinIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_unguardedBuiltinIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_unguardedBuiltinIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().unguardedBuiltinBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().unguardedBuiltinBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+### End File: UnguardedBuiltinBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result
new file mode 100644
index 000000000..7846ee870
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result
@@ -0,0 +1,280 @@
+### Begin File: xmlCasingTestBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#if ENABLE(STREAMS_API)
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* xmlCasingTest */
+extern const char* s_xmlCasingTestXMLCasingTestCode;
+extern const int s_xmlCasingTestXMLCasingTestCodeLength;
+extern const JSC::ConstructAbility s_xmlCasingTestXMLCasingTestCodeConstructAbility;
+extern const char* s_xmlCasingTestCssCasingTestCode;
+extern const int s_xmlCasingTestCssCasingTestCodeLength;
+extern const JSC::ConstructAbility s_xmlCasingTestCssCasingTestCodeConstructAbility;
+extern const char* s_xmlCasingTestUrlCasingTestCode;
+extern const int s_xmlCasingTestUrlCasingTestCodeLength;
+extern const JSC::ConstructAbility s_xmlCasingTestUrlCasingTestCodeConstructAbility;
+
+#define WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_DATA(macro) \
+    macro(xmlCasingTest, xmlCasingTestXMLCasingTest, 1) \
+    macro(cssCasingTest, xmlCasingTestCssCasingTest, 2) \
+    macro(urlCasingTest, xmlCasingTestUrlCasingTest, 3) \
+
+#define WEBCORE_BUILTIN_XMLCASINGTEST_XMLCASINGTEST 1
+#define WEBCORE_BUILTIN_XMLCASINGTEST_CSSCASINGTEST 1
+#define WEBCORE_BUILTIN_XMLCASINGTEST_URLCASINGTEST 1
+
+#define WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(macro) \
+    macro(xmlCasingTestXMLCasingTestCode, xmlCasingTest, s_xmlCasingTestXMLCasingTestCodeLength) \
+    macro(xmlCasingTestCssCasingTestCode, cssCasingTest, s_xmlCasingTestCssCasingTestCodeLength) \
+    macro(xmlCasingTestUrlCasingTestCode, urlCasingTest, s_xmlCasingTestUrlCasingTestCodeLength) \
+
+#define WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(macro) \
+    macro(cssCasingTest) \
+    macro(urlCasingTest) \
+    macro(xmlCasingTest) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class xmlCasingTestBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit xmlCasingTestBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length), { }))
+        WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* xmlCasingTestBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void xmlCasingTestBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+class xmlCasingTestBuiltinFunctions {
+public:
+    explicit xmlCasingTestBuiltinFunctions(JSC::VM& vm) : m_vm(vm) { }
+
+    void init(JSC::JSGlobalObject&);
+    void visit(JSC::SlotVisitor&);
+
+public:
+    JSC::VM& m_vm;
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(functionName) \
+    JSC::WriteBarrier m_##functionName##Function;
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+};
+
+inline void xmlCasingTestBuiltinFunctions::init(JSC::JSGlobalObject& globalObject)
+{
+#define EXPORT_FUNCTION(codeName, functionName, length)\
+    m_##functionName##Function.set(m_vm, &globalObject, JSC::JSFunction::createBuiltinFunction(m_vm, codeName##Generator(m_vm), &globalObject));
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(EXPORT_FUNCTION)
+#undef EXPORT_FUNCTION
+}
+
+inline void xmlCasingTestBuiltinFunctions::visit(JSC::SlotVisitor& visitor)
+{
+#define VISIT_FUNCTION(name) visitor.append(m_##name##Function);
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(VISIT_FUNCTION)
+#undef VISIT_FUNCTION
+}
+
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API)
+### End File: xmlCasingTestBuiltins.h
+
+### Begin File: xmlCasingTestBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "xmlCasingTestBuiltins.h"
+
+#if ENABLE(STREAMS_API)
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_xmlCasingTestXMLCasingTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_xmlCasingTestXMLCasingTestCodeLength = 70;
+static const JSC::Intrinsic s_xmlCasingTestXMLCasingTestCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_xmlCasingTestXMLCasingTestCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_xmlCasingTestCssCasingTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_xmlCasingTestCssCasingTestCodeLength = 401;
+static const JSC::Intrinsic s_xmlCasingTestCssCasingTestCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_xmlCasingTestCssCasingTestCode =
+    "(function (stream, reason)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    if (stream.@state === @readableStreamClosed)\n" \
+    "        return Promise.resolve();\n" \
+    "    if (stream.@state === @readableStreamErrored)\n" \
+    "        return Promise.reject(stream.@storedError);\n" \
+    "    stream.@queue = [];\n" \
+    "    @finishClosingReadableStream(stream);\n" \
+    "    return @promiseInvokeOrNoop(stream.@underlyingSource, \"cancel\", [reason]).then(function() { });\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_xmlCasingTestUrlCasingTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_xmlCasingTestUrlCasingTestCodeLength = 337;
+static const JSC::Intrinsic s_xmlCasingTestUrlCasingTestCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_xmlCasingTestUrlCasingTestCode =
+    "(function (object, key, args)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    try {\n" \
+    "        var method = object[key];\n" \
+    "        if (typeof method === \"undefined\")\n" \
+    "            return Promise.resolve();\n" \
+    "        var result = method.@apply(object, args);\n" \
+    "        return Promise.resolve(result);\n" \
+    "    }\n" \
+    "    catch(error) {\n" \
+    "        return Promise.reject(error);\n" \
+    "    }\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().xmlCasingTestBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().xmlCasingTestBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API)
+
+### End File: xmlCasingTestBuiltins.cpp
diff --git a/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCoreJSBuiltins.h-result b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCoreJSBuiltins.h-result
new file mode 100644
index 000000000..dab424432
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/tests/builtins/expected/WebCoreJSBuiltins.h-result
@@ -0,0 +1,351 @@
+### Begin File: WebCoreJSBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#include "AnotherGuardedInternalBuiltinBuiltins.h"
+#include "ArbitraryConditionalGuardBuiltins.h"
+#include "GuardedBuiltinBuiltins.h"
+#include "GuardedInternalBuiltinBuiltins.h"
+#include "UnguardedBuiltinBuiltins.h"
+#include "xmlCasingTestBuiltins.h"
+#include 
+
+namespace WebCore {
+
+class JSBuiltinFunctions {
+public:
+    explicit JSBuiltinFunctions(JSC::VM& vm)
+        : m_vm(vm)
+#if ENABLE(FETCH_API)
+        , m_anotherGuardedInternalBuiltinBuiltins(&m_vm)
+#endif // ENABLE(FETCH_API)
+#if ENABLE(STREAMS_API) || USE(CF)
+        , m_arbitraryConditionalGuardBuiltins(&m_vm)
+#endif // ENABLE(STREAMS_API) || USE(CF)
+#if ENABLE(STREAMS_API)
+        , m_guardedBuiltinBuiltins(&m_vm)
+#endif // ENABLE(STREAMS_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+        , m_guardedInternalBuiltinBuiltins(&m_vm)
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+        , m_unguardedBuiltinBuiltins(&m_vm)
+#if ENABLE(STREAMS_API)
+        , m_xmlCasingTestBuiltins(&m_vm)
+#endif // ENABLE(STREAMS_API)
+    {
+#if ENABLE(FETCH_API)
+        m_anotherGuardedInternalBuiltinBuiltins.exportNames();
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+        m_guardedInternalBuiltinBuiltins.exportNames();
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+        m_xmlCasingTestBuiltins.exportNames();
+#endif // ENABLE(STREAMS_API)
+    }
+
+#if ENABLE(FETCH_API)
+    AnotherGuardedInternalBuiltinBuiltinsWrapper& anotherGuardedInternalBuiltinBuiltins() { return m_anotherGuardedInternalBuiltinBuiltins; }
+#endif // ENABLE(FETCH_API)
+#if ENABLE(STREAMS_API) || USE(CF)
+    ArbitraryConditionalGuardBuiltinsWrapper& arbitraryConditionalGuardBuiltins() { return m_arbitraryConditionalGuardBuiltins; }
+#endif // ENABLE(STREAMS_API) || USE(CF)
+#if ENABLE(STREAMS_API)
+    GuardedBuiltinBuiltinsWrapper& guardedBuiltinBuiltins() { return m_guardedBuiltinBuiltins; }
+#endif // ENABLE(STREAMS_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    GuardedInternalBuiltinBuiltinsWrapper& guardedInternalBuiltinBuiltins() { return m_guardedInternalBuiltinBuiltins; }
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    UnguardedBuiltinBuiltinsWrapper& unguardedBuiltinBuiltins() { return m_unguardedBuiltinBuiltins; }
+#if ENABLE(STREAMS_API)
+    XMLCasingTestBuiltinsWrapper& xmlCasingTestBuiltins() { return m_xmlCasingTestBuiltins; }
+#endif // ENABLE(STREAMS_API)
+
+private:
+    JSC::VM& m_vm;
+#if ENABLE(FETCH_API)
+    AnotherGuardedInternalBuiltinBuiltinsWrapper m_anotherGuardedInternalBuiltinBuiltins;
+#endif // ENABLE(FETCH_API)
+#if ENABLE(STREAMS_API) || USE(CF)
+    ArbitraryConditionalGuardBuiltinsWrapper m_arbitraryConditionalGuardBuiltins;
+#endif // ENABLE(STREAMS_API) || USE(CF)
+#if ENABLE(STREAMS_API)
+    GuardedBuiltinBuiltinsWrapper m_guardedBuiltinBuiltins;
+#endif // ENABLE(STREAMS_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    GuardedInternalBuiltinBuiltinsWrapper m_guardedInternalBuiltinBuiltins;
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    UnguardedBuiltinBuiltinsWrapper m_unguardedBuiltinBuiltins;
+#if ENABLE(STREAMS_API)
+    XMLCasingTestBuiltinsWrapper m_xmlCasingTestBuiltins;
+#endif // ENABLE(STREAMS_API)
+};
+
+} // namespace WebCore
+### End File: WebCoreJSBuiltins.h
+
+### Begin File: WebCoreJSBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "AnotherGuardedInternalBuiltinBuiltins.cpp"
+#include "ArbitraryConditionalGuardBuiltins.cpp"
+#include "GuardedBuiltinBuiltins.cpp"
+#include "GuardedInternalBuiltinBuiltins.cpp"
+#include "UnguardedBuiltinBuiltins.cpp"
+#include "xmlCasingTestBuiltins.cpp"
+### End File: WebCoreJSBuiltins.cpp
+
+### Begin File: WebCoreJSBuiltinInternals.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#include "AnotherGuardedInternalBuiltinBuiltins.h"
+#include "GuardedInternalBuiltinBuiltins.h"
+#include "xmlCasingTestBuiltins.h"
+#include 
+#include 
+
+namespace WebCore {
+
+class JSDOMGlobalObject;
+
+class JSBuiltinInternalFunctions {
+public:
+    explicit JSBuiltinInternalFunctions(JSC::VM&);
+
+    void visit(JSC::SlotVisitor&);
+    void initialize(JSDOMGlobalObject&);
+
+#if ENABLE(FETCH_API)
+    AnotherGuardedInternalBuiltinBuiltinFunctions& anotherGuardedInternalBuiltin() { return m_anotherGuardedInternalBuiltin; }
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    GuardedInternalBuiltinBuiltinFunctions& guardedInternalBuiltin() { return m_guardedInternalBuiltin; }
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+    XMLCasingTestBuiltinFunctions& xmlCasingTest() { return m_xmlCasingTest; }
+#endif // ENABLE(STREAMS_API)
+
+private:
+#if ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+    JSC::VM& m_vm;
+#endif // ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+#if ENABLE(FETCH_API)
+    AnotherGuardedInternalBuiltinBuiltinFunctions m_anotherGuardedInternalBuiltin;
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    GuardedInternalBuiltinBuiltinFunctions m_guardedInternalBuiltin;
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+    XMLCasingTestBuiltinFunctions m_xmlCasingTest;
+#endif // ENABLE(STREAMS_API)
+};
+
+} // namespace WebCore
+### End File: WebCoreJSBuiltinInternals.h
+
+### Begin File: WebCoreJSBuiltinInternals.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "WebCoreJSBuiltinInternals.h"
+
+#include "JSDOMGlobalObject.h"
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+JSBuiltinInternalFunctions::JSBuiltinInternalFunctions(JSC::VM& vm)
+#if ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+    : m_vm(vm)
+#endif // ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+#if ENABLE(FETCH_API)
+    , m_anotherGuardedInternalBuiltin(m_vm)
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    , m_guardedInternalBuiltin(m_vm)
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+    , m_xmlCasingTest(m_vm)
+#endif // ENABLE(STREAMS_API)
+{
+    UNUSED_PARAM(vm);
+}
+
+void JSBuiltinInternalFunctions::visit(JSC::SlotVisitor& visitor)
+{
+#if ENABLE(FETCH_API)
+    m_anotherGuardedInternalBuiltin.visit(visitor);
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    m_guardedInternalBuiltin.visit(visitor);
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+    m_xmlCasingTest.visit(visitor);
+#endif // ENABLE(STREAMS_API)
+    UNUSED_PARAM(visitor);
+}
+
+void JSBuiltinInternalFunctions::initialize(JSDOMGlobalObject& globalObject)
+{
+    UNUSED_PARAM(globalObject);
+#if ENABLE(FETCH_API)
+    m_anotherGuardedInternalBuiltin.init(globalObject);
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    m_guardedInternalBuiltin.init(globalObject);
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+    m_xmlCasingTest.init(globalObject);
+#endif // ENABLE(STREAMS_API)
+
+#if ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+    JSVMClientData& clientData = *static_cast(m_vm.clientData);
+    JSDOMGlobalObject::GlobalPropertyInfo staticGlobals[] = {
+#if ENABLE(FETCH_API)
+#define DECLARE_GLOBAL_STATIC(name) \
+    JSDOMGlobalObject::GlobalPropertyInfo( \
+        clientData.builtinFunctions().anotherGuardedInternalBuiltinBuiltins().name##PrivateName(), anotherGuardedInternalBuiltin().m_##name##Function.get() , JSC::DontDelete | JSC::ReadOnly),
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_GLOBAL_STATIC)
+#undef DECLARE_GLOBAL_STATIC
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#define DECLARE_GLOBAL_STATIC(name) \
+    JSDOMGlobalObject::GlobalPropertyInfo( \
+        clientData.builtinFunctions().guardedInternalBuiltinBuiltins().name##PrivateName(), guardedInternalBuiltin().m_##name##Function.get() , JSC::DontDelete | JSC::ReadOnly),
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_GLOBAL_STATIC)
+#undef DECLARE_GLOBAL_STATIC
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+#define DECLARE_GLOBAL_STATIC(name) \
+    JSDOMGlobalObject::GlobalPropertyInfo( \
+        clientData.builtinFunctions().xmlCasingTestBuiltins().name##PrivateName(), xmlCasingTest().m_##name##Function.get() , JSC::DontDelete | JSC::ReadOnly),
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(DECLARE_GLOBAL_STATIC)
+#undef DECLARE_GLOBAL_STATIC
+#endif // ENABLE(STREAMS_API)
+    };
+    globalObject.addStaticGlobals(staticGlobals, WTF_ARRAY_LENGTH(staticGlobals));
+    UNUSED_PARAM(clientData);
+#endif // ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+}
+
+} // namespace WebCore
+### End File: WebCoreJSBuiltinInternals.cpp
diff --git a/Source/JavaScriptCore/Scripts/xxd.pl b/Source/JavaScriptCore/Scripts/xxd.pl
new file mode 100644
index 000000000..5ee08a52d
--- /dev/null
+++ b/Source/JavaScriptCore/Scripts/xxd.pl
@@ -0,0 +1,45 @@
+#! /usr/bin/perl
+
+# Copyright (C) 2010-2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    # Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    # Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    # Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+$varname = shift;
+$fname = shift;
+$output = shift;
+
+open($input, '<', $fname) or die "Can't open file for read: $fname $!";
+$/ = undef;
+$text = <$input>;
+close($input);
+
+$text = join(', ', map('0x' . unpack("H*", $_), split(undef, $text)));
+
+open($output, '>', $output) or die "Can't open file for write: $output $!";
+print $output "const unsigned char $varname\[\] = {\n$text\n};\n";
+close($output);
diff --git a/Source/JavaScriptCore/assembler/ARM64Assembler.h b/Source/JavaScriptCore/assembler/ARM64Assembler.h
index cfbd8cec5..7421460e9 100644
--- a/Source/JavaScriptCore/assembler/ARM64Assembler.h
+++ b/Source/JavaScriptCore/assembler/ARM64Assembler.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014, 2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,12 +23,13 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ARM64Assembler_h
-#define ARM64Assembler_h
+#pragma once
 
 #if ENABLE(ASSEMBLER) && CPU(ARM64)
 
 #include "AssemblerBuffer.h"
+#include "AssemblerCommon.h"
+#include 
 #include 
 #include 
 #include 
@@ -37,30 +38,28 @@
 #define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32)
 #define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64)
 #define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize)
+#define CHECK_VECTOR_DATASIZE() ASSERT(datasize == 64 || datasize == 128)
 #define DATASIZE DATASIZE_OF(datasize)
 #define MEMOPSIZE MEMOPSIZE_OF(datasize)
 #define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
+#define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32)
+#define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32)
 
 namespace JSC {
 
-ALWAYS_INLINE bool isInt9(int32_t value)
+ALWAYS_INLINE bool isInt7(int32_t value)
 {
-    return value == ((value << 23) >> 23);
+    return value == ((value << 25) >> 25);
 }
 
-ALWAYS_INLINE bool isUInt5(int32_t value)
+ALWAYS_INLINE bool isInt11(int32_t value)
 {
-    return !(value & ~0x1f);
+    return value == ((value << 21) >> 21);
 }
 
-ALWAYS_INLINE bool isUInt12(int32_t value)
-{
-    return !(value & ~0xfff);
-}
-
-ALWAYS_INLINE bool isUInt12(intptr_t value)
+ALWAYS_INLINE bool isUInt5(int32_t value)
 {
-    return !(value & ~0xfffL);
+    return !(value & ~0x1f);
 }
 
 class UInt5 {
@@ -119,333 +118,174 @@ private:
     int m_value;
 };
 
-class LogicalImmediate {
+class PairPostIndex {
 public:
-    static LogicalImmediate create32(uint32_t value)
-    {
-        // Check for 0, -1 - these cannot be encoded.
-        if (!value || !~value)
-            return InvalidLogicalImmediate;
-
-        // First look for a 32-bit pattern, then for repeating 16-bit
-        // patterns, 8-bit, 4-bit, and finally 2-bit.
-
-        unsigned hsb, lsb;
-        bool inverted;
-        if (findBitRange<32>(value, hsb, lsb, inverted))
-            return encodeLogicalImmediate<32>(hsb, lsb, inverted);
-
-        if ((value & 0xffff) != (value >> 16))
-            return InvalidLogicalImmediate;
-        value &= 0xffff;
-
-        if (findBitRange<16>(value, hsb, lsb, inverted))
-            return encodeLogicalImmediate<16>(hsb, lsb, inverted);
-
-        if ((value & 0xff) != (value >> 8))
-            return InvalidLogicalImmediate;
-        value &= 0xff;
-
-        if (findBitRange<8>(value, hsb, lsb, inverted))
-            return encodeLogicalImmediate<8>(hsb, lsb, inverted);
-
-        if ((value & 0xf) != (value >> 4))
-            return InvalidLogicalImmediate;
-        value &= 0xf;
-
-        if (findBitRange<4>(value, hsb, lsb, inverted))
-            return encodeLogicalImmediate<4>(hsb, lsb, inverted);
-
-        if ((value & 0x3) != (value >> 2))
-            return InvalidLogicalImmediate;
-        value &= 0x3;
-
-        if (findBitRange<2>(value, hsb, lsb, inverted))
-            return encodeLogicalImmediate<2>(hsb, lsb, inverted);
-
-        return InvalidLogicalImmediate;
-    }
-
-    static LogicalImmediate create64(uint64_t value)
-    {
-        // Check for 0, -1 - these cannot be encoded.
-        if (!value || !~value)
-            return InvalidLogicalImmediate;
-
-        // Look for a contiguous bit range.
-        unsigned hsb, lsb;
-        bool inverted;
-        if (findBitRange<64>(value, hsb, lsb, inverted))
-            return encodeLogicalImmediate<64>(hsb, lsb, inverted);
-
-        // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
-        if (static_cast(value) == static_cast(value >> 32))
-            return create32(static_cast(value));
-        return InvalidLogicalImmediate;
-    }
-
-    int value() const
-    {
-        ASSERT(isValid());
-        return m_value;
-    }
-
-    bool isValid() const
+    explicit PairPostIndex(int value)
+        : m_value(value)
     {
-        return m_value != InvalidLogicalImmediate;
+        ASSERT(isInt11(value));
     }
 
-    bool is64bit() const
-    {
-        return m_value & (1 << 12);
-    }
+    operator int() { return m_value; }
 
 private:
-    LogicalImmediate(int value)
-        : m_value(value)
-    {
-    }
-
-    // Generate a mask with bits in the range hsb..0 set, for example:
-    //   hsb:63 = 0xffffffffffffffff
-    //   hsb:42 = 0x000007ffffffffff
-    //   hsb: 0 = 0x0000000000000001
-    static uint64_t mask(unsigned hsb)
-    {
-        ASSERT(hsb < 64);
-        return 0xffffffffffffffffull >> (63 - hsb);
-    }
+    int m_value;
+};
 
-    template
-    static void partialHSB(uint64_t& value, unsigned&result)
+class PairPreIndex {
+public:
+    explicit PairPreIndex(int value)
+        : m_value(value)
     {
-        if (value & (0xffffffffffffffffull << N)) {
-            result += N;
-            value >>= N;
-        }
+        ASSERT(isInt11(value));
     }
 
-    // Find the bit number of the highest bit set in a non-zero value, for example:
-    //   0x8080808080808080 = hsb:63
-    //   0x0000000000000001 = hsb: 0
-    //   0x000007ffffe00000 = hsb:42
-    static unsigned highestSetBit(uint64_t value)
-    {
-        ASSERT(value);
-        unsigned hsb = 0;
-        partialHSB<32>(value, hsb);
-        partialHSB<16>(value, hsb);
-        partialHSB<8>(value, hsb);
-        partialHSB<4>(value, hsb);
-        partialHSB<2>(value, hsb);
-        partialHSB<1>(value, hsb);
-        return hsb;
-    }
-
-    // This function takes a value and a bit width, where value obeys the following constraints:
-    //   * bits outside of the width of the value must be zero.
-    //   * bits within the width of value must neither be all clear or all set.
-    // The input is inspected to detect values that consist of either two or three contiguous
-    // ranges of bits. The output range hsb..lsb will describe the second range of the value.
-    // if the range is set, inverted will be false, and if the range is clear, inverted will
-    // be true. For example (with width 8):
-    //   00001111 = hsb:3, lsb:0, inverted:false
-    //   11110000 = hsb:3, lsb:0, inverted:true
-    //   00111100 = hsb:5, lsb:2, inverted:false
-    //   11000011 = hsb:5, lsb:2, inverted:true
-    template
-    static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted)
-    {
-        ASSERT(value & mask(width - 1));
-        ASSERT(value != mask(width - 1));
-        ASSERT(!(value & ~mask(width - 1)));
-
-        // Detect cases where the top bit is set; if so, flip all the bits & set invert.
-        // This halves the number of patterns we need to look for.
-        const uint64_t msb = 1ull << (width - 1);
-        if ((inverted = (value & msb)))
-            value ^= mask(width - 1);
-
-        // Find the highest set bit in value, generate a corresponding mask & flip all
-        // bits under it.
-        hsb = highestSetBit(value);
-        value ^= mask(hsb);
-        if (!value) {
-            // If this cleared the value, then the range hsb..0 was all set.
-            lsb = 0;
-            return true;
-        }
-
-        // Try making one more mask, and flipping the bits!
-        lsb = highestSetBit(value);
-        value ^= mask(lsb);
-        if (!value) {
-            // Success - but lsb actually points to the hsb of a third range - add one
-            // to get to the lsb of the mid range.
-            ++lsb;
-            return true;
-        }
-
-        return false;
-    }
-
-    // Encodes the set of immN:immr:imms fields found in a logical immediate.
-    template
-    static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted)
-    {
-        // Check width is a power of 2!
-        ASSERT(!(width & (width -1)));
-        ASSERT(width <= 64 && width >= 2);
-        ASSERT(hsb >= lsb);
-        ASSERT(hsb < width);
-
-        int immN = 0;
-        int imms = 0;
-        int immr = 0;
-
-        // For 64-bit values this is easy - just set immN to true, and imms just
-        // contains the bit number of the highest set bit of the set range. For
-        // values with narrower widths, these are encoded by a leading set of
-        // one bits, followed by a zero bit, followed by the remaining set of bits
-        // being the high bit of the range. For a 32-bit immediate there are no
-        // leading one bits, just a zero followed by a five bit number. For a
-        // 16-bit immediate there is one one bit, a zero bit, and then a four bit
-        // bit-position, etc.
-        if (width == 64)
-            immN = 1;
-        else
-            imms = 63 & ~(width + width - 1);
-
-        if (inverted) {
-            // if width is 64 & hsb is 62, then we have a value something like:
-            //   0x80000000ffffffff (in this case with lsb 32).
-            // The ror should be by 1, imms (effectively set width minus 1) is
-            // 32. Set width is full width minus cleared width.
-            immr = (width - 1) - hsb;
-            imms |= (width - ((hsb - lsb) + 1)) - 1;
-        } else {
-            // if width is 64 & hsb is 62, then we have a value something like:
-            //   0x7fffffff00000000 (in this case with lsb 32).
-            // The value is effectively rol'ed by lsb, which is equivalent to
-            // a ror by width - lsb (or 0, in the case where lsb is 0). imms
-            // is hsb - lsb.
-            immr = (width - lsb) & (width - 1);
-            imms |= hsb - lsb;
-        }
-
-        return immN << 12 | immr << 6 | imms;
-    }
-
-    static const int InvalidLogicalImmediate = -1;
+    operator int() { return m_value; }
 
+private:
     int m_value;
 };
 
+typedef ARM64LogicalImmediate LogicalImmediate;
+
 inline uint16_t getHalfword(uint64_t value, int which)
 {
     return value >> (which << 4);
 }
 
 namespace ARM64Registers {
-    typedef enum {
-        // Parameter/result registers
-        x0,
-        x1,
-        x2,
-        x3,
-        x4,
-        x5,
-        x6,
-        x7,
-        // Indirect result location register
-        x8,
-        // Temporary registers
-        x9,
-        x10,
-        x11,
-        x12,
-        x13,
-        x14,
-        x15,
-        // Intra-procedure-call scratch registers (temporary)
-        x16, ip0 = x16,
-        x17, ip1 = x17,
-        // Platform Register (temporary)
-        x18,
-        // Callee-saved
-        x19,
-        x20,
-        x21,
-        x22,
-        x23,
-        x24,
-        x25,
-        x26,
-        x27,
-        x28,
-        // Special
-        x29, fp = x29,
-        x30, lr = x30,
-        sp,
-        zr = 0x3f,
-    } RegisterID;
 
-    typedef enum {
-        // Parameter/result registers
-        q0,
-        q1,
-        q2,
-        q3,
-        q4,
-        q5,
-        q6,
-        q7,
-        // Callee-saved (up to 64-bits only!)
-        q8,
-        q9,
-        q10,
-        q11,
-        q12,
-        q13,
-        q14,
-        q15,
-        // Temporary registers
-        q16,
-        q17,
-        q18,
-        q19,
-        q20,
-        q21,
-        q22,
-        q23,
-        q24,
-        q25,
-        q26,
-        q27,
-        q28,
-        q29,
-        q30,
-        q31,
-    } FPRegisterID;
-
-    static bool isSp(RegisterID reg) { return reg == sp; }
-    static bool isZr(RegisterID reg) { return reg == zr; }
-}
+#define FOR_EACH_CPU_REGISTER(V) \
+    FOR_EACH_CPU_GPREGISTER(V) \
+    FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+    FOR_EACH_CPU_FPREGISTER(V)
+
+// The following are defined as pairs of the following value:
+// 1. type of the storage needed to save the register value by the JIT probe.
+// 2. name of the register.
+#define FOR_EACH_CPU_GPREGISTER(V) \
+    /* Parameter/result registers */ \
+    V(void*, x0) \
+    V(void*, x1) \
+    V(void*, x2) \
+    V(void*, x3) \
+    V(void*, x4) \
+    V(void*, x5) \
+    V(void*, x6) \
+    V(void*, x7) \
+    /* Indirect result location register */ \
+    V(void*, x8) \
+    /* Temporary registers */ \
+    V(void*, x9) \
+    V(void*, x10) \
+    V(void*, x11) \
+    V(void*, x12) \
+    V(void*, x13) \
+    V(void*, x14) \
+    V(void*, x15) \
+    /* Intra-procedure-call scratch registers (temporary) */ \
+    V(void*, x16) \
+    V(void*, x17) \
+    /* Platform Register (temporary) */ \
+    V(void*, x18) \
+    /* Callee-saved */ \
+    V(void*, x19) \
+    V(void*, x20) \
+    V(void*, x21) \
+    V(void*, x22) \
+    V(void*, x23) \
+    V(void*, x24) \
+    V(void*, x25) \
+    V(void*, x26) \
+    V(void*, x27) \
+    V(void*, x28) \
+    /* Special */ \
+    V(void*, fp) \
+    V(void*, lr) \
+    V(void*, sp)
+
+#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+    V(void*, pc) \
+    V(void*, nzcv) \
+    V(void*, fpsr) \
+
+// ARM64 always has 32 FPU registers 128-bits each. See http://llvm.org/devmtg/2012-11/Northover-AArch64.pdf
+// and Section 5.1.2 in http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf.
+// However, we only use them for 64-bit doubles.
+#define FOR_EACH_CPU_FPREGISTER(V) \
+    /* Parameter/result registers */ \
+    V(double, q0) \
+    V(double, q1) \
+    V(double, q2) \
+    V(double, q3) \
+    V(double, q4) \
+    V(double, q5) \
+    V(double, q6) \
+    V(double, q7) \
+    /* Callee-saved (up to 64-bits only!) */ \
+    V(double, q8) \
+    V(double, q9) \
+    V(double, q10) \
+    V(double, q11) \
+    V(double, q12) \
+    V(double, q13) \
+    V(double, q14) \
+    V(double, q15) \
+    /* Temporary registers */ \
+    V(double, q16) \
+    V(double, q17) \
+    V(double, q18) \
+    V(double, q19) \
+    V(double, q20) \
+    V(double, q21) \
+    V(double, q22) \
+    V(double, q23) \
+    V(double, q24) \
+    V(double, q25) \
+    V(double, q26) \
+    V(double, q27) \
+    V(double, q28) \
+    V(double, q29) \
+    V(double, q30) \
+    V(double, q31)
+
+typedef enum {
+    #define DECLARE_REGISTER(_type, _regName) _regName,
+    FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+    #undef DECLARE_REGISTER
+
+    ip0 = x16,
+    ip1 = x17,
+    x29 = fp,
+    x30 = lr,
+    zr = 0x3f,
+} RegisterID;
+
+typedef enum {
+    #define DECLARE_REGISTER(_type, _regName) _regName,
+    FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+    #undef DECLARE_REGISTER
+} FPRegisterID;
+
+static constexpr bool isSp(RegisterID reg) { return reg == sp; }
+static constexpr bool isZr(RegisterID reg) { return reg == zr; }
+
+} // namespace ARM64Registers
 
 class ARM64Assembler {
 public:
     typedef ARM64Registers::RegisterID RegisterID;
     typedef ARM64Registers::FPRegisterID FPRegisterID;
     
-    static RegisterID firstRegister() { return ARM64Registers::x0; }
-    static RegisterID lastRegister() { return ARM64Registers::x28; }
+    static constexpr RegisterID firstRegister() { return ARM64Registers::x0; }
+    static constexpr RegisterID lastRegister() { return ARM64Registers::sp; }
     
-    static FPRegisterID firstFPRegister() { return ARM64Registers::q0; }
-    static FPRegisterID lastFPRegister() { return ARM64Registers::q31; }
+    static constexpr FPRegisterID firstFPRegister() { return ARM64Registers::q0; }
+    static constexpr FPRegisterID lastFPRegister() { return ARM64Registers::q31; }
 
 private:
-    static bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); }
-    static bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); }
+    static constexpr bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); }
+    static constexpr bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); }
 
 public:
     ARM64Assembler()
@@ -583,9 +423,9 @@ public:
                 JumpType m_type : 8;
                 JumpLinkType m_linkType : 8;
                 Condition m_condition : 4;
-                bool m_is64Bit : 1;
                 unsigned m_bitNumber : 6;
-                RegisterID m_compareRegister : 5;
+                RegisterID m_compareRegister : 6;
+                bool m_is64Bit : 1;
             } realTypes;
             struct CopyTypes {
                 uint64_t content[3];
@@ -642,19 +482,12 @@ public:
     template
     static bool canEncodePImmOffset(int32_t offset)
     {
-        int32_t maxPImm = 4095 * (datasize / 8);
-        if (offset < 0)
-            return false;
-        if (offset > maxPImm)
-            return false;
-        if (offset & ((datasize / 8 ) - 1))
-            return false;
-        return true;
+        return isValidScaledUImm12(offset);
     }
 
     static bool canEncodeSImmOffset(int32_t offset)
     {
-        return isInt9(offset);
+        return isValidSignedImm9(offset);
     }
 
 private:
@@ -787,6 +620,22 @@ private:
         FPDataOp_FNMUL
     };
 
+    enum SIMD3Same {
+        SIMD_LogicalOp = 0x03
+    };
+
+    enum SIMD3SameLogical {
+        // This includes both the U bit and the "size" / opc for convience.
+        SIMD_LogicalOp_AND = 0x00,
+        SIMD_LogicalOp_BIC = 0x01,
+        SIMD_LogicalOp_ORR = 0x02,
+        SIMD_LogicalOp_ORN = 0x03,
+        SIMD_LogacalOp_EOR = 0x80,
+        SIMD_LogicalOp_BSL = 0x81,
+        SIMD_LogicalOp_BIT = 0x82,
+        SIMD_LogicalOp_BIF = 0x83,
+    };
+
     enum FPIntConvOp {
         FPIntConvOp_FCVTNS = 0x00,
         FPIntConvOp_FCVTNU = 0x01,
@@ -823,6 +672,16 @@ private:
         MemOp_LOAD_signed32 = 3 // size may be 0 or 1
     };
 
+    enum MemPairOpSize {
+        MemPairOp_32 = 0,
+        MemPairOp_LoadSigned_32 = 1,
+        MemPairOp_64 = 2,
+
+        MemPairOp_V32 = MemPairOp_32,
+        MemPairOp_V64 = 1,
+        MemPairOp_V128 = 2
+    };
+
     enum MoveWideOp {
         MoveWideOp_N = 0,
         MoveWideOp_Z = 2,
@@ -836,6 +695,14 @@ private:
         LdrLiteralOp_128BIT = 2
     };
 
+    static unsigned memPairOffsetShift(bool V, MemPairOpSize size)
+    {
+        // return the log2 of the size in bytes, e.g. 64 bit size returns 3
+        if (V)
+            return size + 2;
+        return (size >> 1) + 2;
+    }
+
 public:
     // Integer Instructions:
 
@@ -871,8 +738,9 @@ public:
     ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
     {
         CHECK_DATASIZE();
-        if (isSp(rn)) {
+        if (isSp(rd) || isSp(rn)) {
             ASSERT(shift == LSL);
+            ASSERT(!isSp(rm));
             add(rd, rn, rm, UXTX, amount);
         } else
             insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd));
@@ -887,6 +755,7 @@ public:
     {
         ASSERT(!(offset & 0xfff));
         insn(pcRelative(true, offset >> 12, rd));
+        nopCortexA53Fix843419();
     }
 
     template
@@ -1215,6 +1084,40 @@ public:
         insn(excepnGeneration(ExcepnOp_HALT, imm, 0));
     }
 
+    // Only used for testing purposes.
+    void illegalInstruction()
+    {
+        insn(0x0);
+    }
+
+    template
+    ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairOffset(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, pimm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void ldnp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairNonTemporal(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, pimm, rn, rt, rt2));
+    }
+
     template
     ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm)
     {
@@ -1494,6 +1397,7 @@ public:
     ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
     {
         CHECK_DATASIZE();
+        nopCortexA53Fix835769();
         insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd));
     }
 
@@ -1546,6 +1450,7 @@ public:
     ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
     {
         CHECK_DATASIZE();
+        nopCortexA53Fix835769();
         insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd));
     }
 
@@ -1596,9 +1501,27 @@ public:
         insn(nopPseudo());
     }
     
-    ALWAYS_INLINE void dmbSY()
+    static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
+    {
+        RELEASE_ASSERT(!(size % sizeof(int32_t)));
+        size_t n = size / sizeof(int32_t);
+        for (int32_t* ptr = static_cast(base); n--;) {
+            int insn = nopPseudo();
+            if (isCopyingToExecutableMemory)
+                performJITMemcpy(ptr++, &insn, sizeof(int));
+            else
+                memcpy(ptr++, &insn, sizeof(int));
+        }
+    }
+    
+    ALWAYS_INLINE void dmbISH()
+    {
+        insn(0xd5033bbf);
+    }
+
+    ALWAYS_INLINE void dmbISHST()
     {
-        insn(0xd5033fbf);
+        insn(0xd5033abf);
     }
 
     template
@@ -1724,6 +1647,7 @@ public:
 
     ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
     {
+        nopCortexA53Fix835769<64>();
         insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd));
     }
 
@@ -1734,6 +1658,7 @@ public:
 
     ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
     {
+        nopCortexA53Fix835769<64>();
         insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd));
     }
 
@@ -1747,6 +1672,34 @@ public:
         smaddl(rd, rn, rm, ARM64Registers::zr);
     }
 
+    template
+    ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairOffset(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, pimm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void stnp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairNonTemporal(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, pimm, rn, rt, rt2));
+    }
+
     template
     ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm)
     {
@@ -1862,7 +1815,13 @@ public:
     template
     ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
     {
-        sub(rd, rn, rm, LSL, 0);
+        ASSERT_WITH_MESSAGE(!isSp(rd) || setFlags == DontSetFlags, "SUBS with shifted register does not support SP for Xd, it uses XZR for the register 31. SUBS with extended register support SP for Xd, but only if SetFlag is not used, otherwise register 31 is Xd.");
+        ASSERT_WITH_MESSAGE(!isSp(rm), "No encoding of SUBS supports SP for the third operand.");
+
+        if (isSp(rd) || isSp(rn))
+            sub(rd, rn, rm, UXTX, 0);
+        else
+            sub(rd, rn, rm, LSL, 0);
     }
 
     template
@@ -1876,11 +1835,8 @@ public:
     ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
     {
         CHECK_DATASIZE();
-        if (isSp(rn)) {
-            ASSERT(shift == LSL);
-            sub(rd, rn, rm, UXTX, amount);
-        } else
-            insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
+        ASSERT(!isSp(rd) && !isSp(rn) && !isSp(rm));
+        insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
     }
 
     template
@@ -1960,6 +1916,7 @@ public:
 
     ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
     {
+        nopCortexA53Fix835769<64>();
         insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd));
     }
 
@@ -1970,6 +1927,7 @@ public:
 
     ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
     {
+        nopCortexA53Fix835769<64>();
         insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd));
     }
 
@@ -2278,6 +2236,20 @@ public:
         insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FNMUL, vn, vd));
     }
 
+    template
+    ALWAYS_INLINE void vand(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_VECTOR_DATASIZE();
+        insn(vectorDataProcessingLogical(SIMD_LogicalOp_AND, vm, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void vorr(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_VECTOR_DATASIZE();
+        insn(vectorDataProcessingLogical(SIMD_LogicalOp_ORR, vm, vn, vd));
+    }
+
     template
     ALWAYS_INLINE void frinta(FPRegisterID vd, FPRegisterID vn)
     {
@@ -2494,13 +2466,6 @@ public:
         return b.m_offset - a.m_offset;
     }
 
-    int executableOffsetFor(int location)
-    {
-        if (!location)
-            return 0;
-        return static_cast(m_buffer.data())[location / sizeof(int32_t) - 1];
-    }
-
     void* unlinkedCode() { return m_buffer.data(); }
     size_t codeSize() const { return m_buffer.codeSize(); }
 
@@ -2539,23 +2504,23 @@ public:
         m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, bitNumber, compareRegister));
     }
 
-    void linkJump(AssemblerLabel from, AssemblerLabel to)
+    void linkJump(AssemblerLabel from, void* executableCode, AssemblerLabel to)
     {
         ASSERT(from.isSet());
         ASSERT(to.isSet());
-        relinkJumpOrCall(addressOf(from), addressOf(to));
+        relinkJumpOrCall(addressOf(from), addressOf(executableCode, from), addressOf(to));
     }
     
     static void linkJump(void* code, AssemblerLabel from, void* to)
     {
         ASSERT(from.isSet());
-        relinkJumpOrCall(addressOf(code, from), to);
+        relinkJumpOrCall(addressOf(code, from), addressOf(code, from), to);
     }
 
     static void linkCall(void* code, AssemblerLabel from, void* to)
     {
         ASSERT(from.isSet());
-        linkJumpOrCall(addressOf(code, from) - 1, to);
+        linkJumpOrCall(addressOf(code, from) - 1, addressOf(code, from) - 1, to);
     }
 
     static void linkPointer(void* code, AssemblerLabel where, void* valuePtr)
@@ -2567,7 +2532,8 @@ public:
     {
         intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(where)) >> 2;
         ASSERT(static_cast(offset) == offset);
-        *static_cast(where) = unconditionalBranchImmediate(false, static_cast(offset));
+        int insn = unconditionalBranchImmediate(false, static_cast(offset));
+        performJITMemcpy(where, &insn, sizeof(int));
         cacheFlush(where, sizeof(int));
     }
     
@@ -2575,6 +2541,11 @@ public:
     {
         return 4;
     }
+
+    static constexpr ptrdiff_t patchableJumpSize()
+    {
+        return 4;
+    }
     
     static void replaceWithLoad(void* where)
     {
@@ -2591,7 +2562,8 @@ public:
             ASSERT(!S);
             ASSERT(!shift);
             ASSERT(!(imm12 & ~0xff8));
-            *static_cast(where) = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd);
+            int insn = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd);
+            performJITMemcpy(where, &insn, sizeof(int));
             cacheFlush(where, sizeof(int));
         }
 #if !ASSERT_DISABLED
@@ -2624,7 +2596,8 @@ public:
             ASSERT(!V);
             ASSERT(opc == MemOp_LOAD);
             ASSERT(!(imm12 & ~0x1ff));
-            *static_cast(where) = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt);
+            int insn = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt);
+            performJITMemcpy(where, &insn, sizeof(int));
             cacheFlush(where, sizeof(int));
         }
 #if !ASSERT_DISABLED
@@ -2654,9 +2627,11 @@ public:
     static void setPointer(int* address, void* valuePtr, RegisterID rd, bool flush)
     {
         uintptr_t value = reinterpret_cast(valuePtr);
-        address[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
-        address[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd);
-        address[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd);
+        int buffer[3];
+        buffer[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
+        buffer[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+        buffer[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd);
+        performJITMemcpy(address, buffer, sizeof(int) * 3);
 
         if (flush)
             cacheFlush(address, sizeof(int) * 3);
@@ -2675,13 +2650,15 @@ public:
         ASSERT_UNUSED(expected, expected && !sf && (opc == MoveWideOp_Z || opc == MoveWideOp_N) && !hw);
         ASSERT(checkMovk(address[1], 1, rd));
 
+        int buffer[2];
         if (value >= 0) {
-            address[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
-            address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+            buffer[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
+            buffer[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
         } else {
-            address[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd);
-            address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+            buffer[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd);
+            buffer[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
         }
+        performJITMemcpy(where, &buffer, sizeof(int) * 2);
 
         cacheFlush(where, sizeof(int) * 2);
     }
@@ -2716,15 +2693,25 @@ public:
         return readPointer(reinterpret_cast(from) - 4);
     }
 
+    // The static relink, repatch, and replace methods can use can
+    // use |from| for both the write and executable address for call
+    // and jump patching as they're modifying existing (linked) code,
+    // so the address being provided is correct for relative address
+    // computation.
     static void relinkJump(void* from, void* to)
     {
-        relinkJumpOrCall(reinterpret_cast(from), to);
+        relinkJumpOrCall(reinterpret_cast(from), reinterpret_cast(from), to);
         cacheFlush(from, sizeof(int));
     }
     
+    static void relinkJumpToNop(void* from)
+    {
+        relinkJump(from, static_cast(from) + 4);
+    }
+    
     static void relinkCall(void* from, void* to)
     {
-        relinkJumpOrCall(reinterpret_cast(from) - 1, to);
+        relinkJumpOrCall(reinterpret_cast(from) - 1, reinterpret_cast(from) - 1, to);
         cacheFlush(reinterpret_cast(from) - 1, sizeof(int));
     }
     
@@ -2745,17 +2732,42 @@ public:
             imm12 = encodePositiveImmediate<32>(value);
         else
             imm12 = encodePositiveImmediate<64>(value);
-        *static_cast(where) = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt);
+        int insn = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt);
+        performJITMemcpy(where, &insn, sizeof(int));
 
         cacheFlush(where, sizeof(int));
     }
 
     unsigned debugOffset() { return m_buffer.debugOffset(); }
 
+#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
+    static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+    {
+        __builtin___clear_cache(reinterpret_cast(begin), reinterpret_cast(end));
+    }
+#endif
+
     static void cacheFlush(void* code, size_t size)
     {
 #if OS(IOS)
         sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+#elif OS(LINUX)
+        size_t page = pageSize();
+        uintptr_t current = reinterpret_cast(code);
+        uintptr_t end = current + size;
+        uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+        if (end <= firstPageEnd) {
+            linuxPageFlush(current, end);
+            return;
+        }
+
+        linuxPageFlush(current, firstPageEnd);
+
+        for (current = firstPageEnd; current + page < end; current += page)
+            linuxPageFlush(current, current + page);
+
+        linuxPageFlush(current, end);
 #else
 #error "The cacheFlush support is missing on this platform."
 #endif
@@ -2763,20 +2775,20 @@ public:
 
     // Assembler admin methods:
 
-    int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+    static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
 
     static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
     {
         return a.from() < b.from();
     }
 
-    bool canCompact(JumpType jumpType)
+    static bool canCompact(JumpType jumpType)
     {
         // Fixed jumps cannot be compacted
         return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit);
     }
 
-    JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+    static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
     {
         switch (jumpType) {
         case JumpFixed:
@@ -2828,51 +2840,43 @@ public:
         return LinkJumpNoCondition;
     }
 
-    JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+    static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
     {
         JumpLinkType linkType = computeJumpType(record.type(), from, to);
         record.setLinkType(linkType);
         return linkType;
     }
 
-    void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
-    {
-        int32_t ptr = regionStart / sizeof(int32_t);
-        const int32_t end = regionEnd / sizeof(int32_t);
-        int32_t* offsets = static_cast(m_buffer.data());
-        while (ptr < end)
-            offsets[ptr++] = offset;
-    }
-
     Vector& jumpsToLink()
     {
         std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
         return m_jumpsToLink;
     }
 
-    void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+    static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to)
     {
+        const int* fromInstruction = reinterpret_cast(fromInstruction8);
         switch (record.linkType()) {
         case LinkJumpNoCondition:
-            linkJumpOrCall(reinterpret_cast(from), to);
+            linkJumpOrCall(reinterpret_cast(from), fromInstruction, to);
             break;
         case LinkJumpConditionDirect:
-            linkConditionalBranch(record.condition(), reinterpret_cast(from), to);
+            linkConditionalBranch(record.condition(), reinterpret_cast(from), fromInstruction, to);
             break;
         case LinkJumpCondition:
-            linkConditionalBranch(record.condition(), reinterpret_cast(from) - 1, to);
+            linkConditionalBranch(record.condition(), reinterpret_cast(from) - 1, fromInstruction - 1, to);
             break;
         case LinkJumpCompareAndBranchDirect:
-            linkCompareAndBranch(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast(from), to);
+            linkCompareAndBranch(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast(from), fromInstruction, to);
             break;
         case LinkJumpCompareAndBranch:
-            linkCompareAndBranch(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast(from) - 1, to);
+            linkCompareAndBranch(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast(from) - 1, fromInstruction - 1, to);
             break;
         case LinkJumpTestBitDirect:
-            linkTestAndBranch(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast(from), to);
+            linkTestAndBranch(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast(from), fromInstruction, to);
             break;
         case LinkJumpTestBit:
-            linkTestAndBranch(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast(from) - 1, to);
+            linkTestAndBranch(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast(from) - 1, fromInstruction - 1, to);
             break;
         default:
             ASSERT_NOT_REACHED();
@@ -2914,7 +2918,7 @@ private:
     }
 
     template
-    static void linkJumpOrCall(int* from, void* to)
+    static void linkJumpOrCall(int* from, const int* fromInstruction, void* to)
     {
         bool link;
         int imm26;
@@ -2924,60 +2928,69 @@ private:
         ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from));
         ASSERT(!(reinterpret_cast(from) & 3));
         ASSERT(!(reinterpret_cast(to) & 3));
-        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(from)) >> 2;
+        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2;
         ASSERT(static_cast(offset) == offset);
 
-        *from = unconditionalBranchImmediate(isCall, static_cast(offset));
+        int insn = unconditionalBranchImmediate(isCall, static_cast(offset));
+        performJITMemcpy(from, &insn, sizeof(int));
     }
 
     template
-    static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, void* to)
+    static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, const int* fromInstruction, void* to)
     {
         ASSERT(!(reinterpret_cast(from) & 3));
         ASSERT(!(reinterpret_cast(to) & 3));
-        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(from)) >> 2;
+        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2;
         ASSERT(((offset << 38) >> 38) == offset);
 
         bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
         ASSERT(!isDirect || useDirect);
 
         if (useDirect || isDirect) {
-            *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast(offset), rt);
-            if (!isDirect)
-                *(from + 1) = nopPseudo();
+            int insn = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast(offset), rt);
+            performJITMemcpy(from, &insn, sizeof(int));
+            if (!isDirect) {
+                insn = nopPseudo();
+                performJITMemcpy(from + 1, &insn, sizeof(int));
+            }
         } else {
-            *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt);
-            linkJumpOrCall(from + 1, to);
+            int insn = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt);
+            performJITMemcpy(from, &insn, sizeof(int));
+            linkJumpOrCall(from + 1, fromInstruction + 1, to);
         }
     }
 
     template
-    static void linkConditionalBranch(Condition condition, int* from, void* to)
+    static void linkConditionalBranch(Condition condition, int* from, const int* fromInstruction, void* to)
     {
         ASSERT(!(reinterpret_cast(from) & 3));
         ASSERT(!(reinterpret_cast(to) & 3));
-        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(from)) >> 2;
+        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2;
         ASSERT(((offset << 38) >> 38) == offset);
 
         bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
         ASSERT(!isDirect || useDirect);
 
         if (useDirect || isDirect) {
-            *from = conditionalBranchImmediate(static_cast(offset), condition);
-            if (!isDirect)
-                *(from + 1) = nopPseudo();
+            int insn = conditionalBranchImmediate(static_cast(offset), condition);
+            performJITMemcpy(from, &insn, sizeof(int));
+            if (!isDirect) {
+                insn = nopPseudo();
+                performJITMemcpy(from + 1, &insn, sizeof(int));
+            }
         } else {
-            *from = conditionalBranchImmediate(2, invert(condition));
-            linkJumpOrCall(from + 1, to);
+            int insn = conditionalBranchImmediate(2, invert(condition));
+            performJITMemcpy(from, &insn, sizeof(int));
+            linkJumpOrCall(from + 1, fromInstruction + 1, to);
         }
     }
 
     template
-    static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, void* to)
+    static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, const int* fromInstruction, void* to)
     {
         ASSERT(!(reinterpret_cast(from) & 3));
         ASSERT(!(reinterpret_cast(to) & 3));
-        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(from)) >> 2;
+        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2;
         ASSERT(static_cast(offset) == offset);
         ASSERT(((offset << 38) >> 38) == offset);
 
@@ -2985,17 +2998,21 @@ private:
         ASSERT(!isDirect || useDirect);
 
         if (useDirect || isDirect) {
-            *from = testAndBranchImmediate(condition == ConditionNE, static_cast(bitNumber), static_cast(offset), rt);
-            if (!isDirect)
-                *(from + 1) = nopPseudo();
+            int insn = testAndBranchImmediate(condition == ConditionNE, static_cast(bitNumber), static_cast(offset), rt);
+            performJITMemcpy(from, &insn, sizeof(int));
+            if (!isDirect) {
+                insn = nopPseudo();
+                performJITMemcpy(from + 1, &insn, sizeof(int));
+            }
         } else {
-            *from = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast(bitNumber), 2, rt);
-            linkJumpOrCall(from + 1, to);
+            int insn = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast(bitNumber), 2, rt);
+            performJITMemcpy(from, &insn, sizeof(int));
+            linkJumpOrCall(from + 1, fromInstruction + 1, to);
         }
     }
 
     template
-    static void relinkJumpOrCall(int* from, void* to)
+    static void relinkJumpOrCall(int* from, const int* fromInstruction, void* to)
     {
         if (!isCall && disassembleNop(from)) {
             unsigned op01;
@@ -3010,7 +3027,7 @@ private:
                 if (imm19 == 8)
                     condition = invert(condition);
 
-                linkConditionalBranch(condition, from - 1, to);
+                linkConditionalBranch(condition, from - 1, fromInstruction - 1, to);
                 return;
             }
 
@@ -3023,7 +3040,7 @@ private:
                 if (imm19 == 8)
                     op = !op;
 
-                linkCompareAndBranch(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, to);
+                linkCompareAndBranch(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, fromInstruction - 1, to);
                 return;
             }
 
@@ -3035,12 +3052,12 @@ private:
                 if (imm14 == 8)
                     op = !op;
 
-                linkTestAndBranch(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, to);
+                linkTestAndBranch(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, fromInstruction - 1, to);
                 return;
             }
         }
 
-        linkJumpOrCall(from, to);
+        linkJumpOrCall(from, fromInstruction, to);
     }
 
     static int* addressOf(void* code, AssemblerLabel label)
@@ -3124,7 +3141,7 @@ private:
         int insn = *static_cast(address);
         op = (insn >> 24) & 0x1;
         imm14 = (insn << 13) >> 18;
-        bitNumber = static_cast((((insn >> 26) & 0x20)) | ((insn > 19) & 0x1f));
+        bitNumber = static_cast((((insn >> 26) & 0x20)) | ((insn >> 19) & 0x1f));
         rt = static_cast(insn & 0x1f);
         return (insn & 0x7e000000) == 0x36000000;
         
@@ -3138,8 +3155,18 @@ private:
         return (insn & 0x7c000000) == 0x14000000;
     }
 
-    static int xOrSp(RegisterID reg) { ASSERT(!isZr(reg)); return reg; }
-    static int xOrZr(RegisterID reg) { ASSERT(!isSp(reg)); return reg & 31; }
+    static int xOrSp(RegisterID reg)
+    {
+        ASSERT(!isZr(reg));
+        ASSERT(!isIOS() || reg != ARM64Registers::x18);
+        return reg;
+    }
+    static int xOrZr(RegisterID reg)
+    {
+        ASSERT(!isSp(reg));
+        ASSERT(!isIOS() || reg != ARM64Registers::x18);
+        return reg & 31;
+    }
     static FPRegisterID xOrZrAsFPR(RegisterID reg) { return static_cast(xOrZr(reg)); }
     static int xOrZrOrSp(bool useZr, RegisterID reg) { return useZr ? xOrZr(reg) : xOrSp(reg); }
 
@@ -3326,6 +3353,12 @@ private:
         return (0x1e200800 | M << 31 | S << 29 | type << 22 | rm << 16 | opcode << 12 | rn << 5 | rd);
     }
 
+    ALWAYS_INLINE static int vectorDataProcessingLogical(SIMD3SameLogical uAndSize, FPRegisterID vm, FPRegisterID vn, FPRegisterID vd)
+    {
+        const int Q = 0;
+        return (0xe200400 | Q << 30 | uAndSize << 22 | vm << 16 | SIMD_LogicalOp << 11 | vn << 5 | vd);
+    }
+
     // 'o1' means negate
     ALWAYS_INLINE static int floatingPointDataProcessing3Source(Datasize type, bool o1, FPRegisterID rm, AddOp o2, FPRegisterID ra, FPRegisterID rn, FPRegisterID rd)
     {
@@ -3360,6 +3393,23 @@ private:
         return loadStoreRegisterPostIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
     }
 
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+    {
+        ASSERT(size < 3);
+        ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+        ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+        unsigned immedShiftAmount = memPairOffsetShift(V, size);
+        int imm7 = immediate >> immedShiftAmount;
+        ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+        return (0x28800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+    {
+        return loadStoreRegisterPairPostIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+    }
+
     // 'V' means vector
     ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
     {
@@ -3374,6 +3424,57 @@ private:
         return loadStoreRegisterPreIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
     }
 
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+    {
+        ASSERT(size < 3);
+        ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+        ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+        unsigned immedShiftAmount = memPairOffsetShift(V, size);
+        int imm7 = immediate >> immedShiftAmount;
+        ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+        return (0x29800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+    {
+        return loadStoreRegisterPairPreIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPairOffset(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+    {
+        ASSERT(size < 3);
+        ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+        ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+        unsigned immedShiftAmount = memPairOffsetShift(V, size);
+        int imm7 = immediate >> immedShiftAmount;
+        ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+        return (0x29000000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPairOffset(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+    {
+        return loadStoreRegisterPairOffset(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPairNonTemporal(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+    {
+        ASSERT(size < 3);
+        ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+        ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+        unsigned immedShiftAmount = memPairOffsetShift(V, size);
+        int imm7 = immediate >> immedShiftAmount;
+        ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+        return (0x28000000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPairNonTemporal(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+    {
+        return loadStoreRegisterPairNonTemporal(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+    }
+
     // 'V' means vector
     // 'S' means shift rm
     ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt)
@@ -3488,6 +3589,37 @@ private:
         return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4);
     }
 
+    // Workaround for Cortex-A53 erratum (835769). Emit an extra nop if the
+    // last instruction in the buffer is a load, store or prefetch. Needed
+    // before 64-bit multiply-accumulate instructions.
+    template
+    ALWAYS_INLINE void nopCortexA53Fix835769()
+    {
+#if CPU(ARM64_CORTEXA53)
+        CHECK_DATASIZE();
+        if (datasize == 64) {
+            if (LIKELY(m_buffer.codeSize() >= sizeof(int32_t))) {
+                // From ARMv8 Reference Manual, Section C4.1: the encoding of the
+                // instructions in the Loads and stores instruction group is:
+                // ---- 1-0- ---- ---- ---- ---- ---- ----
+                if (UNLIKELY((*reinterpret_cast_ptr(reinterpret_cast_ptr(m_buffer.data()) + m_buffer.codeSize() - sizeof(int32_t)) & 0x0a000000) == 0x08000000))
+                    nop();
+            }
+        }
+#endif
+    }
+
+    // Workaround for Cortex-A53 erratum (843419). Emit extra nops to avoid
+    // wrong address access after ADRP instruction.
+    ALWAYS_INLINE void nopCortexA53Fix843419()
+    {
+#if CPU(ARM64_CORTEXA53)
+        nop();
+        nop();
+        nop();
+#endif
+    }
+
     AssemblerBuffer m_buffer;
     Vector m_jumpsToLink;
     int m_indexOfLastWatchpoint;
@@ -3505,5 +3637,3 @@ private:
 #undef CHECK_FP_MEMOP_DATASIZE
 
 #endif // ENABLE(ASSEMBLER) && CPU(ARM64)
-
-#endif // ARM64Assembler_h
diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.cpp b/Source/JavaScriptCore/assembler/ARMAssembler.cpp
index f9100d4c9..552f37f68 100644
--- a/Source/JavaScriptCore/assembler/ARMAssembler.cpp
+++ b/Source/JavaScriptCore/assembler/ARMAssembler.cpp
@@ -395,8 +395,6 @@ void ARMAssembler::prepareExecutableCopy(void* to)
 {
     // 64-bit alignment is required for next constant pool and JIT code as well
     m_buffer.flushWithoutBarrier(true);
-    if (!m_buffer.isAligned(8))
-        bkpt(0);
 
     char* data = reinterpret_cast(m_buffer.data());
     ptrdiff_t delta = reinterpret_cast(to) - data;
diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.h b/Source/JavaScriptCore/assembler/ARMAssembler.h
index 087d31c14..6fba9ed18 100644
--- a/Source/JavaScriptCore/assembler/ARMAssembler.h
+++ b/Source/JavaScriptCore/assembler/ARMAssembler.h
@@ -24,8 +24,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef ARMAssembler_h
-#define ARMAssembler_h
+#pragma once
 
 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
 
@@ -36,62 +35,6 @@ namespace JSC {
 
     typedef uint32_t ARMWord;
 
-    namespace ARMRegisters {
-        typedef enum {
-            r0 = 0,
-            r1,
-            r2,
-            r3,
-            r4,
-            r5,
-            r6, S0 = r6,
-            r7,
-            r8,
-            r9,
-            r10,
-            r11, fp = r11, // frame pointer
-            r12, ip = r12, S1 = r12,
-            r13, sp = r13,
-            r14, lr = r14,
-            r15, pc = r15
-        } RegisterID;
-
-        typedef enum {
-            d0,
-            d1,
-            d2,
-            d3,
-            d4,
-            d5,
-            d6,
-            d7, SD0 = d7, /* Same as thumb assembler. */
-            d8,
-            d9,
-            d10,
-            d11,
-            d12,
-            d13,
-            d14,
-            d15,
-            d16,
-            d17,
-            d18,
-            d19,
-            d20,
-            d21,
-            d22,
-            d23,
-            d24,
-            d25,
-            d26,
-            d27,
-            d28,
-            d29,
-            d30,
-            d31
-        } FPRegisterID;
-
-#if USE(MASM_PROBE)
     #define FOR_EACH_CPU_REGISTER(V) \
         FOR_EACH_CPU_GPREGISTER(V) \
         FOR_EACH_CPU_SPECIAL_REGISTER(V) \
@@ -109,11 +52,11 @@ namespace JSC {
         V(void*, r8) \
         V(void*, r9) \
         V(void*, r10) \
-        V(void*, r11) \
+        V(void*, fp) \
         V(void*, ip) \
         V(void*, sp) \
         V(void*, lr) \
-        V(void*, pc)
+        V(void*, pc) \
 
     #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
         V(void*, apsr) \
@@ -135,8 +78,49 @@ namespace JSC {
         V(double, d12) \
         V(double, d13) \
         V(double, d14) \
-        V(double, d15)
-#endif // USE(MASM_PROBE)
+        V(double, d15) \
+        V(double, d16) \
+        V(double, d17) \
+        V(double, d18) \
+        V(double, d19) \
+        V(double, d20) \
+        V(double, d21) \
+        V(double, d22) \
+        V(double, d23) \
+        V(double, d24) \
+        V(double, d25) \
+        V(double, d26) \
+        V(double, d27) \
+        V(double, d28) \
+        V(double, d29) \
+        V(double, d30) \
+        V(double, d31) \
+
+    namespace ARMRegisters {
+
+        typedef enum {
+            #define DECLARE_REGISTER(_type, _regName) _regName,
+            FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+            #undef DECLARE_REGISTER
+
+            // Pseudonyms for some of the registers.
+            S0 = r6,
+            r11 = fp, // frame pointer
+            r12 = ip, S1 = ip,
+            r13 = sp,
+            r14 = lr,
+            r15 = pc
+        } RegisterID;
+
+        typedef enum {
+            #define DECLARE_REGISTER(_type, _regName) _regName,
+            FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+            #undef DECLARE_REGISTER
+
+            // Pseudonyms for some of the registers.
+            SD0 = d7, /* Same as thumb assembler. */
+        } FPRegisterID;
+
     } // namespace ARMRegisters
 
     class ARMAssembler {
@@ -153,11 +137,11 @@ namespace JSC {
 
         ARMBuffer& buffer() { return m_buffer; }
 
-        static RegisterID firstRegister() { return ARMRegisters::r0; }
-        static RegisterID lastRegister() { return ARMRegisters::r15; }
+        static constexpr RegisterID firstRegister() { return ARMRegisters::r0; }
+        static constexpr RegisterID lastRegister() { return ARMRegisters::r15; }
 
-        static FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
-        static FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
+        static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
+        static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
 
         // ARM conditional constants
         typedef enum {
@@ -231,6 +215,11 @@ namespace JSC {
 #endif
             NOP = 0xe1a00000,
             DMB_SY = 0xf57ff05f,
+            DMB_ISHST = 0xf57ff05a,
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+            SDIV = 0x0710f010,
+            UDIV = 0x0730f010,
+#endif
         };
 
         enum {
@@ -492,6 +481,26 @@ namespace JSC {
             m_buffer.putInt(toARMWord(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
         }
 
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+        template
+        void sdiv(int rd, int rn, int rm, Condition cc = AL)
+        {
+            static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
+            ASSERT(rd != ARMRegisters::pc);
+            ASSERT(rn != ARMRegisters::pc);
+            ASSERT(rm != ARMRegisters::pc);
+            m_buffer.putInt(toARMWord(cc) | SDIV | RN(rd) | RM(rn) | RS(rm));
+        }
+
+        void udiv(int rd, int rn, int rm, Condition cc = AL)
+        {
+            ASSERT(rd != ARMRegisters::pc);
+            ASSERT(rn != ARMRegisters::pc);
+            ASSERT(rm != ARMRegisters::pc);
+            m_buffer.putInt(toARMWord(cc) | UDIV | RN(rd) | RM(rn) | RS(rm));
+        }
+#endif
+
         void vmov_f64(int dd, int dm, Condition cc = AL)
         {
             emitDoublePrecisionInstruction(toARMWord(cc) | VMOV_F64, dd, 0, dm);
@@ -697,11 +706,28 @@ namespace JSC {
             m_buffer.putInt(NOP);
         }
 
+        static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
+        {
+            UNUSED_PARAM(isCopyingToExecutableMemory);
+            RELEASE_ASSERT(!(size % sizeof(int32_t)));
+
+            int32_t* ptr = static_cast(base);
+            const size_t num32s = size / sizeof(int32_t);
+            const int32_t insn = NOP;
+            for (size_t i = 0; i < num32s; i++)
+                *ptr++ = insn;
+        }
+
         void dmbSY()
         {
             m_buffer.putInt(DMB_SY);
         }
 
+        void dmbISHST()
+        {
+            m_buffer.putInt(DMB_ISHST);
+        }
+
         void bx(int rm, Condition cc = AL)
         {
             emitInstruction(toARMWord(cc) | BX, 0, 0, RM(rm));
@@ -940,6 +966,11 @@ namespace JSC {
             patchPointerInternal(getAbsoluteJumpAddress(from), to);
         }
 
+        static void relinkJumpToNop(void* from)
+        {
+            relinkJump(from, from);
+        }
+
         static void linkCall(void* code, AssemblerLabel from, void* to)
         {
             patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
@@ -981,6 +1012,11 @@ namespace JSC {
             return sizeof(ARMWord) * 2;
         }
 
+        static constexpr ptrdiff_t patchableJumpSize()
+        {
+            return sizeof(ARMWord) * 3;
+        }
+
         static void replaceWithLoad(void* instructionStart)
         {
             ARMWord* instruction = reinterpret_cast(instructionStart);
@@ -1082,7 +1118,7 @@ namespace JSC {
             return AL | B | (offset & BranchOffsetMask);
         }
 
-#if OS(LINUX) && COMPILER(GCC)
+#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
         static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
         {
             asm volatile(
@@ -1102,7 +1138,7 @@ namespace JSC {
 
         static void cacheFlush(void* code, size_t size)
         {
-#if OS(LINUX) && COMPILER(GCC)
+#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
             size_t page = pageSize();
             uintptr_t current = reinterpret_cast(code);
             uintptr_t end = current + size;
@@ -1119,8 +1155,6 @@ namespace JSC {
                 linuxPageFlush(current, current + page);
 
             linuxPageFlush(current, end);
-#elif OS(WINCE)
-            CacheRangeFlush(code, size, CACHE_SYNC_ALL);
 #else
 #error "The cacheFlush support is missing on this platform."
 #endif
@@ -1176,5 +1210,3 @@ namespace JSC {
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
-
-#endif // ARMAssembler_h
diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp b/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp
deleted file mode 100644
index faca66421..000000000
--- a/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
-
-#include "ARMv7Assembler.h"
-
-namespace JSC {
-
-}
-
-#endif
diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.h b/Source/JavaScriptCore/assembler/ARMv7Assembler.h
index 5257f32a8..86218ea72 100644
--- a/Source/JavaScriptCore/assembler/ARMv7Assembler.h
+++ b/Source/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
  * Copyright (C) 2010 University of Szeged
  *
  * Redistribution and use in source and binary forms, with or without
@@ -24,12 +24,12 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ARMAssembler_h
-#define ARMAssembler_h
+#pragma once
 
 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
 
 #include "AssemblerBuffer.h"
+#include "AssemblerCommon.h"
 #include 
 #include 
 #include 
@@ -38,23 +38,83 @@
 namespace JSC {
 
 namespace ARMRegisters {
+
+    #define FOR_EACH_CPU_REGISTER(V) \
+        FOR_EACH_CPU_GPREGISTER(V) \
+        FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+        FOR_EACH_CPU_FPREGISTER(V)
+
+    // The following are defined as pairs of the following value:
+    // 1. type of the storage needed to save the register value by the JIT probe.
+    // 2. name of the register.
+    #define FOR_EACH_CPU_GPREGISTER(V) \
+        V(void*, r0) \
+        V(void*, r1) \
+        V(void*, r2) \
+        V(void*, r3) \
+        V(void*, r4) \
+        V(void*, r5) \
+        V(void*, r6) \
+        V(void*, r7) \
+        V(void*, r8) \
+        V(void*, r9) \
+        V(void*, r10) \
+        V(void*, r11) \
+        V(void*, ip) \
+        V(void*, sp) \
+        V(void*, lr) \
+        V(void*, pc)
+
+    #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+        V(void*, apsr) \
+        V(void*, fpscr) \
+
+    #define FOR_EACH_CPU_FPREGISTER(V) \
+        V(double, d0) \
+        V(double, d1) \
+        V(double, d2) \
+        V(double, d3) \
+        V(double, d4) \
+        V(double, d5) \
+        V(double, d6) \
+        V(double, d7) \
+        V(double, d8) \
+        V(double, d9) \
+        V(double, d10) \
+        V(double, d11) \
+        V(double, d12) \
+        V(double, d13) \
+        V(double, d14) \
+        V(double, d15) \
+        V(double, d16) \
+        V(double, d17) \
+        V(double, d18) \
+        V(double, d19) \
+        V(double, d20) \
+        V(double, d21) \
+        V(double, d22) \
+        V(double, d23) \
+        V(double, d24) \
+        V(double, d25) \
+        V(double, d26) \
+        V(double, d27) \
+        V(double, d28) \
+        V(double, d29) \
+        V(double, d30) \
+        V(double, d31)
+
     typedef enum {
-        r0,
-        r1,
-        r2,
-        r3,
-        r4,
-        r5,
-        r6,
-        r7, fp = r7,   // frame pointer
-        r8,
-        r9, sb = r9,   // static base
-        r10, sl = r10, // stack limit
-        r11,
-        r12, ip = r12,
-        r13, sp = r13,
-        r14, lr = r14,
-        r15, pc = r15,
+        #define DECLARE_REGISTER(_type, _regName) _regName,
+        FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+        #undef DECLARE_REGISTER
+
+        fp = r7,   // frame pointer
+        sb = r9,   // static base
+        sl = r10,  // stack limit
+        r12 = ip,
+        r13 = sp,
+        r14 = lr,
+        r15 = pc
     } RegisterID;
 
     typedef enum {
@@ -93,38 +153,9 @@ namespace ARMRegisters {
     } FPSingleRegisterID;
 
     typedef enum {
-        d0,
-        d1,
-        d2,
-        d3,
-        d4,
-        d5,
-        d6,
-        d7,
-        d8,
-        d9,
-        d10,
-        d11,
-        d12,
-        d13,
-        d14,
-        d15,
-        d16,
-        d17,
-        d18,
-        d19,
-        d20,
-        d21,
-        d22,
-        d23,
-        d24,
-        d25,
-        d26,
-        d27,
-        d28,
-        d29,
-        d30,
-        d31,
+        #define DECLARE_REGISTER(_type, _regName) _regName,
+        FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+        #undef DECLARE_REGISTER
     } FPDoubleRegisterID;
 
     typedef enum {
@@ -174,77 +205,7 @@ namespace ARMRegisters {
         return (FPDoubleRegisterID)(reg >> 1);
     }
 
-#if USE(MASM_PROBE)
-    #define FOR_EACH_CPU_REGISTER(V) \
-        FOR_EACH_CPU_GPREGISTER(V) \
-        FOR_EACH_CPU_SPECIAL_REGISTER(V) \
-        FOR_EACH_CPU_FPREGISTER(V)
-
-    #define FOR_EACH_CPU_GPREGISTER(V) \
-        V(void*, r0) \
-        V(void*, r1) \
-        V(void*, r2) \
-        V(void*, r3) \
-        V(void*, r4) \
-        V(void*, r5) \
-        V(void*, r6) \
-        V(void*, r7) \
-        V(void*, r8) \
-        V(void*, r9) \
-        V(void*, r10) \
-        V(void*, r11) \
-        V(void*, ip) \
-        V(void*, sp) \
-        V(void*, lr) \
-        V(void*, pc)
-
-    #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
-        V(void*, apsr) \
-        V(void*, fpscr) \
-
-    #define FOR_EACH_CPU_FPREGISTER(V) \
-        V(double, d0) \
-        V(double, d1) \
-        V(double, d2) \
-        V(double, d3) \
-        V(double, d4) \
-        V(double, d5) \
-        V(double, d6) \
-        V(double, d7) \
-        V(double, d8) \
-        V(double, d9) \
-        V(double, d10) \
-        V(double, d11) \
-        V(double, d12) \
-        V(double, d13) \
-        V(double, d14) \
-        V(double, d15) \
-        FOR_EACH_CPU_FPREGISTER_EXTENSION(V)
-
-#if CPU(APPLE_ARMV7S)
-    #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) \
-        V(double, d16) \
-        V(double, d17) \
-        V(double, d18) \
-        V(double, d19) \
-        V(double, d20) \
-        V(double, d21) \
-        V(double, d22) \
-        V(double, d23) \
-        V(double, d24) \
-        V(double, d25) \
-        V(double, d26) \
-        V(double, d27) \
-        V(double, d28) \
-        V(double, d29) \
-        V(double, d30) \
-        V(double, d31)
-#else
-    #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) // Nothing to add.
-#endif // CPU(APPLE_ARMV7S)
-
-#endif // USE(MASM_PROBE)
-}
+} // namespace ARMRegisters
 
 class ARMv7Assembler;
 class ARMThumbImmediate {
@@ -492,11 +453,11 @@ public:
     typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
     typedef FPDoubleRegisterID FPRegisterID;
     
-    static RegisterID firstRegister() { return ARMRegisters::r0; }
-    static RegisterID lastRegister() { return ARMRegisters::r13; }
-    
-    static FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
-    static FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
+    static constexpr RegisterID firstRegister() { return ARMRegisters::r0; }
+    static constexpr RegisterID lastRegister() { return ARMRegisters::r13; }
+
+    static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
+    static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
 
     // (HS, LO, HI, LS) -> (AE, B, A, BE)
     // (VS, VC) -> (O, NO)
@@ -583,6 +544,8 @@ public:
     {
     }
 
+    AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
+
 private:
 
     // ARMv7, Appx-A.6.3
@@ -646,6 +609,8 @@ private:
         OP_ADD_SP_imm_T1    = 0xA800,
         OP_ADD_SP_imm_T2    = 0xB000,
         OP_SUB_SP_imm_T1    = 0xB080,
+        OP_PUSH_T1          = 0xB400,
+        OP_POP_T1           = 0xBC00,
         OP_BKPT             = 0xBE00,
         OP_IT               = 0xBF00,
         OP_NOP_T1           = 0xBF00,
@@ -654,6 +619,8 @@ private:
     typedef enum {
         OP_B_T1         = 0xD000,
         OP_B_T2         = 0xE000,
+        OP_POP_T2       = 0xE8BD,
+        OP_PUSH_T2      = 0xE92D,
         OP_AND_reg_T2   = 0xEA00,
         OP_TST_reg_T2   = 0xEA10,
         OP_ORR_reg_T2   = 0xEA40,
@@ -714,7 +681,7 @@ private:
         OP_MOVT         = 0xF2C0,
         OP_UBFX_T1      = 0xF3C0,
         OP_NOP_T2a      = 0xF3AF,
-        OP_DMB_SY_T2a   = 0xF3BF,
+        OP_DMB_T1a      = 0xF3BF,
         OP_STRB_imm_T3  = 0xF800,
         OP_STRB_reg_T2  = 0xF800,
         OP_LDRB_imm_T3  = 0xF810,
@@ -741,39 +708,40 @@ private:
         OP_ROR_reg_T2   = 0xFA60,
         OP_CLZ          = 0xFAB0,
         OP_SMULL_T1     = 0xFB80,
-#if CPU(APPLE_ARMV7S)
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
         OP_SDIV_T1      = 0xFB90,
         OP_UDIV_T1      = 0xFBB0,
 #endif
     } OpcodeID1;
 
     typedef enum {
-        OP_VADD_T2b     = 0x0A00,
-        OP_VDIVb        = 0x0A00,
-        OP_FLDSb        = 0x0A00,
-        OP_VLDRb        = 0x0A00,
-        OP_VMOV_IMM_T2b = 0x0A00,
-        OP_VMOV_T2b     = 0x0A40,
-        OP_VMUL_T2b     = 0x0A00,
-        OP_FSTSb        = 0x0A00,
-        OP_VSTRb        = 0x0A00,
-        OP_VMOV_StoCb   = 0x0A10,
-        OP_VMOV_CtoSb   = 0x0A10,
-        OP_VMOV_DtoCb   = 0x0A10,
-        OP_VMOV_CtoDb   = 0x0A10,
-        OP_VMRSb        = 0x0A10,
-        OP_VABS_T2b     = 0x0A40,
-        OP_VCMPb        = 0x0A40,
-        OP_VCVT_FPIVFPb = 0x0A40,
-        OP_VNEG_T2b     = 0x0A40,
-        OP_VSUB_T2b     = 0x0A40,
-        OP_VSQRT_T1b    = 0x0A40,
-        OP_VCVTSD_T1b   = 0x0A40,
-        OP_VCVTDS_T1b   = 0x0A40,
-        OP_NOP_T2b      = 0x8000,
-        OP_DMB_SY_T2b   = 0x8F5F,
-        OP_B_T3b        = 0x8000,
-        OP_B_T4b        = 0x9000,
+        OP_VADD_T2b      = 0x0A00,
+        OP_VDIVb         = 0x0A00,
+        OP_FLDSb         = 0x0A00,
+        OP_VLDRb         = 0x0A00,
+        OP_VMOV_IMM_T2b  = 0x0A00,
+        OP_VMOV_T2b      = 0x0A40,
+        OP_VMUL_T2b      = 0x0A00,
+        OP_FSTSb         = 0x0A00,
+        OP_VSTRb         = 0x0A00,
+        OP_VMOV_StoCb    = 0x0A10,
+        OP_VMOV_CtoSb    = 0x0A10,
+        OP_VMOV_DtoCb    = 0x0A10,
+        OP_VMOV_CtoDb    = 0x0A10,
+        OP_VMRSb         = 0x0A10,
+        OP_VABS_T2b      = 0x0A40,
+        OP_VCMPb         = 0x0A40,
+        OP_VCVT_FPIVFPb  = 0x0A40,
+        OP_VNEG_T2b      = 0x0A40,
+        OP_VSUB_T2b      = 0x0A40,
+        OP_VSQRT_T1b     = 0x0A40,
+        OP_VCVTSD_T1b    = 0x0A40,
+        OP_VCVTDS_T1b    = 0x0A40,
+        OP_NOP_T2b       = 0x8000,
+        OP_DMB_SY_T1b    = 0x8F5F,
+        OP_DMB_ISHST_T1b = 0x8F5A,
+        OP_B_T3b         = 0x8000,
+        OP_B_T4b         = 0x9000,
     } OpcodeID2;
 
     struct FourFours {
@@ -799,11 +767,11 @@ private:
     class ARMInstructionFormatter;
 
     // false means else!
-    bool ifThenElseConditionBit(Condition condition, bool isIf)
+    static bool ifThenElseConditionBit(Condition condition, bool isIf)
     {
         return isIf ? (condition & 1) : !(condition & 1);
     }
-    uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+    static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
     {
         int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
             | (ifThenElseConditionBit(condition, inst3if) << 2)
@@ -812,7 +780,7 @@ private:
         ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
         return (condition << 4) | mask;
     }
-    uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+    static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
     {
         int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
             | (ifThenElseConditionBit(condition, inst3if) << 2)
@@ -820,7 +788,7 @@ private:
         ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
         return (condition << 4) | mask;
     }
-    uint8_t ifThenElse(Condition condition, bool inst2if)
+    static uint8_t ifThenElse(Condition condition, bool inst2if)
     {
         int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
             | 4;
@@ -828,7 +796,7 @@ private:
         return (condition << 4) | mask;
     }
 
-    uint8_t ifThenElse(Condition condition)
+    static uint8_t ifThenElse(Condition condition)
     {
         int mask = 8;
         return (condition << 4) | mask;
@@ -855,7 +823,7 @@ public:
         ASSERT(rn != ARMRegisters::pc);
         ASSERT(imm.isValid());
 
-        if (rn == ARMRegisters::sp) {
+        if (rn == ARMRegisters::sp && imm.isUInt16()) {
             ASSERT(!(imm.getUInt16() & 3));
             if (!(rd & 8) && imm.isUInt10()) {
                 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast(imm.getUInt10() >> 2));
@@ -894,6 +862,11 @@ public:
     // NOTE: In an IT block, add doesn't modify the flags register.
     ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
     {
+        if (rd == ARMRegisters::sp) {
+            mov(rd, rn);
+            rn = rd;
+        }
+
         if (rd == rn)
             m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
         else if (rd == rm)
@@ -1183,9 +1156,10 @@ public:
     {
         ASSERT(rn != ARMRegisters::pc); // LDR (literal)
         ASSERT(imm.isUInt12());
+        ASSERT(!(imm.getUInt12() & 1));
 
         if (!((rt | rn) & 8) && imm.isUInt6())
-            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
         else
             m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
     }
@@ -1353,11 +1327,14 @@ public:
         uint16_t* address = static_cast(instructionStart);
         ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(imm));
         ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(imm >> 16));
-        address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
-        address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16);
-        address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
-        address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16);
-        address[4] = OP_CMP_reg_T2 | left;
+        uint16_t instruction[] = {
+            twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16),
+            twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16),
+            twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16),
+            twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16),
+            static_cast(OP_CMP_reg_T2 | left)
+        };
+        performJITMemcpy(address, instruction, sizeof(uint16_t) * 5);
         cacheFlush(address, sizeof(uint16_t) * 5);
     }
 #else
@@ -1368,8 +1345,11 @@ public:
         ASSERT(!BadReg(rd));
         
         uint16_t* address = static_cast(instructionStart);
-        address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm);
-        address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm);
+        uint16_t instruction[] = {
+            twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm),
+            twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm)
+        };
+        performJITMemcpy(address, instruction, sizeof(uint16_t) * 2);
         cacheFlush(address, sizeof(uint16_t) * 2);
     }
 #endif
@@ -1488,9 +1468,49 @@ public:
         m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
     }
 
-#if CPU(APPLE_ARMV7S)
+    ALWAYS_INLINE void pop(RegisterID dest)
+    {
+        if (dest < ARMRegisters::r8)
+            m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest);
+        else {
+            // Load postindexed with writeback.
+            ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
+        }
+    }
+
+    ALWAYS_INLINE void pop(uint32_t registerList)
+    {
+        ASSERT(WTF::bitCount(registerList) > 1);
+        ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList));
+        ASSERT(!((1 << ARMRegisters::sp) & registerList));
+        m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList);
+    }
+
+    ALWAYS_INLINE void push(RegisterID src)
+    {
+        if (src < ARMRegisters::r8)
+            m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src);
+        else if (src == ARMRegisters::lr)
+            m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100);
+        else {
+            // Store preindexed with writeback.
+            str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+        }
+    }
+
+    ALWAYS_INLINE void push(uint32_t registerList)
+    {
+        ASSERT(WTF::bitCount(registerList) > 1);
+        ASSERT(!((1 << ARMRegisters::pc) & registerList));
+        ASSERT(!((1 << ARMRegisters::sp) & registerList));
+        m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList);
+    }
+
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+    template
     ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
     {
+        static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");        
         ASSERT(!BadReg(rd));
         ASSERT(!BadReg(rn));
         ASSERT(!BadReg(rm));
@@ -1635,8 +1655,8 @@ public:
         ASSERT(rn != ARMRegisters::pc);
         ASSERT(imm.isUInt12());
         
-        if (!((rt | rn) & 8) && imm.isUInt7())
-            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
+        if (!((rt | rn) & 8) && imm.isUInt6())
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
         else
             m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
     }
@@ -1834,7 +1854,7 @@ public:
         m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
     }
 
-#if CPU(APPLE_ARMV7S)
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
     ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
     {
         ASSERT(!BadReg(rd));
@@ -1984,9 +2004,51 @@ public:
         m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
     }
     
+    static constexpr int16_t nopPseudo16()
+    {
+        return OP_NOP_T1;
+    }
+
+    static constexpr int32_t nopPseudo32()
+    {
+        return OP_NOP_T2a | (OP_NOP_T2b << 16);
+    }
+
+    static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
+    {
+        RELEASE_ASSERT(!(size % sizeof(int16_t)));
+
+        char* ptr = static_cast(base);
+        const size_t num32s = size / sizeof(int32_t);
+        for (size_t i = 0; i < num32s; i++) {
+            const int32_t insn = nopPseudo32();
+            if (isCopyingToExecutableMemory)
+                performJITMemcpy(ptr, &insn, sizeof(int32_t));
+            else
+                memcpy(ptr, &insn, sizeof(int32_t));
+            ptr += sizeof(int32_t);
+        }
+
+        const size_t num16s = (size % sizeof(int32_t)) / sizeof(int16_t);
+        ASSERT(num16s == 0 || num16s == 1);
+        ASSERT(num16s * sizeof(int16_t) + num32s * sizeof(int32_t) == size);
+        if (num16s) {
+            const int16_t insn = nopPseudo16();
+            if (isCopyingToExecutableMemory)
+                performJITMemcpy(ptr, &insn, sizeof(int16_t));
+            else
+                memcpy(ptr, &insn, sizeof(int16_t));
+        }
+    }
+
     void dmbSY()
     {
-        m_formatter.twoWordOp16Op16(OP_DMB_SY_T2a, OP_DMB_SY_T2b);
+        m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_SY_T1b);
+    }
+
+    void dmbISHST()
+    {
+        m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_ISHST_T1b);
     }
 
     AssemblerLabel labelIgnoringWatchpoints()
@@ -2036,14 +2098,7 @@ public:
         return b.m_offset - a.m_offset;
     }
 
-    int executableOffsetFor(int location)
-    {
-        if (!location)
-            return 0;
-        return static_cast(m_formatter.data())[location / sizeof(int32_t) - 1];
-    }
-    
-    int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+    static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
     
     // Assembler admin methods:
 
@@ -2052,7 +2107,7 @@ public:
         return a.from() < b.from();
     }
 
-    bool canCompact(JumpType jumpType)
+    static bool canCompact(JumpType jumpType)
     {
         // The following cannot be compacted:
         //   JumpFixed: represents custom jump sequence
@@ -2061,7 +2116,7 @@ public:
         return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
     }
     
-    JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+    static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
     {
         if (jumpType == JumpFixed)
             return LinkInvalid;
@@ -2105,51 +2160,43 @@ public:
         return LinkConditionalBX;
     }
     
-    JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+    static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
     {
         JumpLinkType linkType = computeJumpType(record.type(), from, to);
         record.setLinkType(linkType);
         return linkType;
     }
     
-    void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
-    {
-        int32_t ptr = regionStart / sizeof(int32_t);
-        const int32_t end = regionEnd / sizeof(int32_t);
-        int32_t* offsets = static_cast(m_formatter.data());
-        while (ptr < end)
-            offsets[ptr++] = offset;
-    }
-    
     Vector& jumpsToLink()
     {
         std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
         return m_jumpsToLink;
     }
 
-    void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+    static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to)
     {
+        const uint16_t* fromInstruction = reinterpret_cast_ptr(fromInstruction8);
         switch (record.linkType()) {
         case LinkJumpT1:
-            linkJumpT1(record.condition(), reinterpret_cast_ptr(from), to);
+            linkJumpT1(record.condition(), reinterpret_cast_ptr(from), fromInstruction, to);
             break;
         case LinkJumpT2:
-            linkJumpT2(reinterpret_cast_ptr(from), to);
+            linkJumpT2(reinterpret_cast_ptr(from), fromInstruction, to);
             break;
         case LinkJumpT3:
-            linkJumpT3(record.condition(), reinterpret_cast_ptr(from), to);
+            linkJumpT3(record.condition(), reinterpret_cast_ptr(from), fromInstruction, to);
             break;
         case LinkJumpT4:
-            linkJumpT4(reinterpret_cast_ptr(from), to);
+            linkJumpT4(reinterpret_cast_ptr(from), fromInstruction, to);
             break;
         case LinkConditionalJumpT4:
-            linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr(from), to);
+            linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr(from), fromInstruction, to);
             break;
         case LinkConditionalBX:
-            linkConditionalBX(record.condition(), reinterpret_cast_ptr(from), to);
+            linkConditionalBX(record.condition(), reinterpret_cast_ptr(from), fromInstruction, to);
             break;
         case LinkBX:
-            linkBX(reinterpret_cast_ptr(from), to);
+            linkBX(reinterpret_cast_ptr(from), fromInstruction, to);
             break;
         default:
             RELEASE_ASSERT_NOT_REACHED();
@@ -2186,7 +2233,7 @@ public:
         ASSERT(from.isSet());
         
         uint16_t* location = reinterpret_cast(reinterpret_cast(code) + from.m_offset);
-        linkJumpAbsolute(location, to);
+        linkJumpAbsolute(location, location, to);
     }
 
     static void linkCall(void* code, AssemblerLabel from, void* to)
@@ -2202,15 +2249,24 @@ public:
         setPointer(reinterpret_cast(code) + where.m_offset, value, false);
     }
 
+    // The static relink and replace methods can use can use |from| for both
+    // the write and executable address for call and jump patching
+    // as they're modifying existing (linked) code, so the address being
+    // provided is correct for relative address computation.
     static void relinkJump(void* from, void* to)
     {
         ASSERT(!(reinterpret_cast(from) & 1));
         ASSERT(!(reinterpret_cast(to) & 1));
 
-        linkJumpAbsolute(reinterpret_cast(from), to);
+        linkJumpAbsolute(reinterpret_cast(from), reinterpret_cast(from), to);
 
         cacheFlush(reinterpret_cast(from) - 5, 5 * sizeof(uint16_t));
     }
+
+    static void relinkJumpToNop(void* from)
+    {
+        relinkJump(from, from);
+    }
     
     static void relinkCall(void* from, void* to)
     {
@@ -2246,8 +2302,9 @@ public:
         offset |= (1 << 11);
 
         uint16_t* location = reinterpret_cast(where);
-        location[1] &= ~((1 << 12) - 1);
-        location[1] |= offset;
+        uint16_t instruction = location[1] & ~((1 << 12) - 1);
+        instruction |= offset;
+        performJITMemcpy(location + 1, &instruction, sizeof(uint16_t));
         cacheFlush(location, sizeof(uint16_t) * 2);
     }
 
@@ -2271,16 +2328,16 @@ public:
 #if OS(LINUX)
         if (canBeJumpT4(reinterpret_cast(instructionStart), to)) {
             uint16_t* ptr = reinterpret_cast(instructionStart) + 2;
-            linkJumpT4(ptr, to);
+            linkJumpT4(ptr, ptr, to);
             cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
         } else {
             uint16_t* ptr = reinterpret_cast(instructionStart) + 5;
-            linkBX(ptr, to);
+            linkBX(ptr, ptr, to);
             cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
         }
 #else
         uint16_t* ptr = reinterpret_cast(instructionStart) + 2;
-        linkJumpT4(ptr, to);
+        linkJumpT4(ptr, ptr, to);
         cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
 #endif
     }
@@ -2293,6 +2350,11 @@ public:
         return 4;
 #endif
     }
+
+    static constexpr ptrdiff_t patchableJumpSize()
+    {
+        return 10;
+    }
     
     static void replaceWithLoad(void* instructionStart)
     {
@@ -2301,14 +2363,17 @@ public:
         switch (ptr[0] & 0xFFF0) {
         case OP_LDR_imm_T3:
             break;
-        case OP_ADD_imm_T3:
+        case OP_ADD_imm_T3: {
             ASSERT(!(ptr[1] & 0xF000));
-            ptr[0] &= 0x000F;
-            ptr[0] |= OP_LDR_imm_T3;
-            ptr[1] |= (ptr[1] & 0x0F00) << 4;
-            ptr[1] &= 0xF0FF;
+            uint16_t instructions[2];
+            instructions[0] = ptr[0] & 0x000F;
+            instructions[0] |= OP_LDR_imm_T3;
+            instructions[1] = ptr[1] | (ptr[1] & 0x0F00) << 4;
+            instructions[1] &= 0xF0FF;
+            performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
             cacheFlush(ptr, sizeof(uint16_t) * 2);
             break;
+        }
         default:
             RELEASE_ASSERT_NOT_REACHED();
         }
@@ -2319,14 +2384,17 @@ public:
         ASSERT(!(bitwise_cast(instructionStart) & 1));
         uint16_t* ptr = reinterpret_cast(instructionStart);
         switch (ptr[0] & 0xFFF0) {
-        case OP_LDR_imm_T3:
+        case OP_LDR_imm_T3: {
             ASSERT(!(ptr[1] & 0x0F00));
-            ptr[0] &= 0x000F;
-            ptr[0] |= OP_ADD_imm_T3;
-            ptr[1] |= (ptr[1] & 0xF000) >> 4;
-            ptr[1] &= 0x0FFF;
+            uint16_t instructions[2];
+            instructions[0] = ptr[0] & 0x000F;
+            instructions[0] |= OP_ADD_imm_T3;
+            instructions[1] = ptr[1] | (ptr[1] & 0xF000) >> 4;
+            instructions[1] &= 0x0FFF;
+            performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
             cacheFlush(ptr, sizeof(uint16_t) * 2);
             break;
+        }
         case OP_ADD_imm_T3:
             break;
         default:
@@ -2375,8 +2443,6 @@ public:
             linuxPageFlush(current, current + page);
 
         linuxPageFlush(current, end);
-#elif OS(WINCE)
-        CacheRangeFlush(code, size, CACHE_SYNC_ALL);
 #else
 #error "The cacheFlush support is missing on this platform."
 #endif
@@ -2456,11 +2522,13 @@ private:
 
         ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(value));
         ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(value >> 16));
-        location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
-        location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
-        location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
-        location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
+        uint16_t instructions[4];
+        instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+        instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
+        instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+        instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
 
+        performJITMemcpy(location - 4, instructions, 4 * sizeof(uint16_t));
         if (flush)
             cacheFlush(location - 4, 4 * sizeof(uint16_t));
     }
@@ -2488,8 +2556,10 @@ private:
         ASSERT(imm.isValid());
         ASSERT(imm.isUInt7());
         uint16_t* location = reinterpret_cast(code);
-        location[0] &= ~((static_cast(0x7f) >> 2) << 6);
-        location[0] |= (imm.getUInt7() >> 2) << 6;
+        uint16_t instruction;
+        instruction = location[0] & ~((static_cast(0x7f) >> 2) << 6);
+        instruction |= (imm.getUInt7() >> 2) << 6;
+        performJITMemcpy(location, &instruction, sizeof(uint16_t));
         cacheFlush(location, sizeof(uint16_t));
     }
 
@@ -2498,39 +2568,39 @@ private:
         setInt32(code, reinterpret_cast(value), flush);
     }
 
-    static bool isB(void* address)
+    static bool isB(const void* address)
     {
-        uint16_t* instruction = static_cast(address);
+        const uint16_t* instruction = static_cast(address);
         return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
     }
 
-    static bool isBX(void* address)
+    static bool isBX(const void* address)
     {
-        uint16_t* instruction = static_cast(address);
+        const uint16_t* instruction = static_cast(address);
         return (instruction[0] & 0xff87) == OP_BX;
     }
 
-    static bool isMOV_imm_T3(void* address)
+    static bool isMOV_imm_T3(const void* address)
     {
-        uint16_t* instruction = static_cast(address);
+        const uint16_t* instruction = static_cast(address);
         return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
     }
 
-    static bool isMOVT(void* address)
+    static bool isMOVT(const void* address)
     {
-        uint16_t* instruction = static_cast(address);
+        const uint16_t* instruction = static_cast(address);
         return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
     }
 
-    static bool isNOP_T1(void* address)
+    static bool isNOP_T1(const void* address)
     {
-        uint16_t* instruction = static_cast(address);
+        const uint16_t* instruction = static_cast(address);
         return instruction[0] == OP_NOP_T1;
     }
 
-    static bool isNOP_T2(void* address)
+    static bool isNOP_T2(const void* address)
     {
-        uint16_t* instruction = static_cast(address);
+        const uint16_t* instruction = static_cast(address);
         return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
     }
 
@@ -2578,7 +2648,7 @@ private:
         return ((relative << 7) >> 7) == relative;
     }
     
-    void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
+    static void linkJumpT1(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
     {
         // FIMXE: this should be up in the MacroAssembler layer. :-(        
         ASSERT(!(reinterpret_cast(instruction) & 1));
@@ -2593,10 +2663,11 @@ private:
         
         // All branch offsets should be an even distance.
         ASSERT(!(relative & 1));
-        instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+        uint16_t newInstruction = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+        performJITMemcpy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
     }
     
-    static void linkJumpT2(uint16_t* instruction, void* target)
+    static void linkJumpT2(uint16_t* writeTarget, const uint16_t* instruction, void* target)
     {
         // FIMXE: this should be up in the MacroAssembler layer. :-(        
         ASSERT(!(reinterpret_cast(instruction) & 1));
@@ -2611,10 +2682,11 @@ private:
         
         // All branch offsets should be an even distance.
         ASSERT(!(relative & 1));
-        instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
+        uint16_t newInstruction = OP_B_T2 | ((relative & 0xffe) >> 1);
+        performJITMemcpy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
     }
     
-    void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
+    static void linkJumpT3(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
     {
         // FIMXE: this should be up in the MacroAssembler layer. :-(
         ASSERT(!(reinterpret_cast(instruction) & 1));
@@ -2625,11 +2697,13 @@ private:
         
         // All branch offsets should be an even distance.
         ASSERT(!(relative & 1));
-        instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
-        instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+        uint16_t instructions[2];
+        instructions[0] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
+        instructions[1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+        performJITMemcpy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
     }
     
-    static void linkJumpT4(uint16_t* instruction, void* target)
+    static void linkJumpT4(uint16_t* writeTarget, const uint16_t* instruction, void* target)
     {
         // FIMXE: this should be up in the MacroAssembler layer. :-(        
         ASSERT(!(reinterpret_cast(instruction) & 1));
@@ -2643,47 +2717,55 @@ private:
         
         // All branch offsets should be an even distance.
         ASSERT(!(relative & 1));
-        instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
-        instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+        uint16_t instructions[2];
+        instructions[0] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+        instructions[1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+        performJITMemcpy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
     }
     
-    void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
+    static void linkConditionalJumpT4(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
     {
         // FIMXE: this should be up in the MacroAssembler layer. :-(        
         ASSERT(!(reinterpret_cast(instruction) & 1));
         ASSERT(!(reinterpret_cast(target) & 1));
         
-        instruction[-3] = ifThenElse(cond) | OP_IT;
-        linkJumpT4(instruction, target);
+        uint16_t newInstruction = ifThenElse(cond) | OP_IT;
+        performJITMemcpy(writeTarget - 3, &newInstruction, sizeof(uint16_t));
+        linkJumpT4(writeTarget, instruction, target);
     }
     
-    static void linkBX(uint16_t* instruction, void* target)
+    static void linkBX(uint16_t* writeTarget, const uint16_t* instruction, void* target)
     {
         // FIMXE: this should be up in the MacroAssembler layer. :-(
-        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT_UNUSED(instruction, !(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(writeTarget) & 1));
         ASSERT(!(reinterpret_cast(target) & 1));
         
         const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
         ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) + 1));
         ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) >> 16));
-        instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
-        instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
-        instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
-        instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
-        instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+        uint16_t instructions[5];
+        instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+        instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+        instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+        instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+        instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+
+        performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
     }
     
-    void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
+    static void linkConditionalBX(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
     {
         // FIMXE: this should be up in the MacroAssembler layer. :-(        
         ASSERT(!(reinterpret_cast(instruction) & 1));
         ASSERT(!(reinterpret_cast(target) & 1));
         
-        linkBX(instruction, target);
-        instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
+        linkBX(writeTarget, instruction, target);
+        uint16_t newInstruction = ifThenElse(cond, true, true) | OP_IT;
+        performJITMemcpy(writeTarget - 6, &newInstruction, sizeof(uint16_t));
     }
     
-    static void linkJumpAbsolute(uint16_t* instruction, void* target)
+    static void linkJumpAbsolute(uint16_t* writeTarget, const uint16_t* instruction, void* target)
     {
         // FIMXE: this should be up in the MacroAssembler layer. :-(
         ASSERT(!(reinterpret_cast(instruction) & 1));
@@ -2691,26 +2773,31 @@ private:
         
         ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
                || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
-        
+
         if (canBeJumpT4(instruction, target)) {
             // There may be a better way to fix this, but right now put the NOPs first, since in the
             // case of an conditional branch this will be coming after an ITTT predicating *three*
             // instructions!  Looking backwards to modify the ITTT to an IT is not easy, due to
             // variable wdith encoding - the previous instruction might *look* like an ITTT but
             // actually be the second half of a 2-word op.
-            instruction[-5] = OP_NOP_T1;
-            instruction[-4] = OP_NOP_T2a;
-            instruction[-3] = OP_NOP_T2b;
-            linkJumpT4(instruction, target);
+            uint16_t instructions[3];
+            instructions[0] = OP_NOP_T1;
+            instructions[1] = OP_NOP_T2a;
+            instructions[2] = OP_NOP_T2b;
+            performJITMemcpy(writeTarget - 5, instructions, 3 * sizeof(uint16_t));
+            linkJumpT4(writeTarget, instruction, target);
         } else {
             const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
             ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) + 1));
             ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) >> 16));
-            instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
-            instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
-            instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
-            instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
-            instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+
+            uint16_t instructions[5];
+            instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+            instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+            instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+            instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+            instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+            performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
         }
     }
     
@@ -2753,6 +2840,11 @@ private:
             m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
         }
 
+        ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm)
+        {
+            m_buffer.putShort(op | imm);
+        }
+
         ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
         {
             m_buffer.putShort(op | imm);
@@ -2791,6 +2883,12 @@ private:
             m_buffer.putShort(op2);
         }
 
+        ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm)
+        {
+            m_buffer.putShort(op1);
+            m_buffer.putShort(imm);
+        }
+        
         ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
         {
             ARMThumbImmediate newImm = imm;
@@ -2851,7 +2949,6 @@ private:
 
         unsigned debugOffset() { return m_buffer.debugOffset(); }
 
-    private:
         AssemblerBuffer m_buffer;
     } m_formatter;
 
@@ -2863,5 +2960,3 @@ private:
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
-
-#endif // ARMAssembler_h
diff --git a/Source/JavaScriptCore/assembler/AbortReason.h b/Source/JavaScriptCore/assembler/AbortReason.h
new file mode 100644
index 000000000..32ae0867a
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/AbortReason.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+namespace JSC {
+
+// It's important to not change the values of existing abort reasons unless we really
+// have to. For this reason there is a BASIC-style numbering that should allow us to
+// sneak new reasons in without changing the numbering of existing reasons - at least
+// for a while.
+enum AbortReason {
+    AHCallFrameMisaligned                             =  10,
+    AHIndexingTypeIsValid                             =  20,
+    AHInsaneArgumentCount                             =  30,
+    AHIsNotCell                                       =  40,
+    AHIsNotInt32                                      =  50,
+    AHIsNotJSDouble                                   =  60,
+    AHIsNotJSInt32                                    =  70,
+    AHIsNotJSNumber                                   =  80,
+    AHIsNotNull                                       =  90,
+    AHStackPointerMisaligned                          = 100,
+    AHStructureIDIsValid                              = 110,
+    AHTagMaskNotInPlace                               = 120,
+    AHTagTypeNumberNotInPlace                         = 130,
+    AHTypeInfoInlineTypeFlagsAreValid                 = 140,
+    AHTypeInfoIsValid                                 = 150,
+    B3Oops                                            = 155,
+    DFGBailedAtTopOfBlock                             = 161,
+    DFGBailedAtEndOfNode                              = 162,
+    DFGBasicStorageAllocatorZeroSize                  = 170,
+    DFGIsNotCell                                      = 180,
+    DFGIneffectiveWatchpoint                          = 190,
+    DFGNegativeStringLength                           = 200,
+    DFGSlowPathGeneratorFellThrough                   = 210,
+    DFGUnreachableBasicBlock                          = 220,
+    DFGUnreachableNode                                = 225,
+    DFGUnreasonableOSREntryJumpDestination            = 230,
+    DFGVarargsThrowingPathDidNotThrow                 = 235,
+    FTLCrash                                          = 236,
+    JITDidReturnFromTailCall                          = 237,
+    JITDivOperandsAreNotNumbers                       = 240,
+    JITGetByValResultIsNotEmpty                       = 250,
+    JITNotSupported                                   = 260,
+    JITOffsetIsNotOutOfLine                           = 270,
+    JITUncoughtExceptionAfterCall                     = 275,
+    JITUnexpectedCallFrameSize                        = 277,
+    JITUnreasonableLoopHintJumpTarget                 = 280,
+    RPWUnreasonableJumpTarget                         = 290,
+    RepatchIneffectiveWatchpoint                      = 300,
+    RepatchInsaneArgumentCount                        = 310,
+    TGInvalidPointer                                  = 320,
+    TGNotSupported                                    = 330,
+    YARRNoInputConsumed                               = 340,
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
index 28537201b..b791e5cb1 100644
--- a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,74 +23,37 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef AbstractMacroAssembler_h
-#define AbstractMacroAssembler_h
+#pragma once
 
+#include "AbortReason.h"
 #include "AssemblerBuffer.h"
+#include "AssemblerCommon.h"
+#include "CPU.h"
 #include "CodeLocation.h"
 #include "MacroAssemblerCodeRef.h"
+#include "MacroAssemblerHelpers.h"
 #include "Options.h"
-#include "WeakRandom.h"
 #include 
 #include 
-
-#if ENABLE(ASSEMBLER)
+#include 
+#include 
 
 namespace JSC {
 
-inline bool isARMv7s()
-{
-#if CPU(APPLE_ARMV7S)
-    return true;
-#else
-    return false;
-#endif
-}
-
-inline bool isARM64()
-{
-#if CPU(ARM64)
-    return true;
-#else
-    return false;
-#endif
-}
-
-inline bool isX86()
-{
-#if CPU(X86_64) || CPU(X86)
-    return true;
-#else
-    return false;
-#endif
-}
-
-inline bool optimizeForARMv7s()
-{
-    return isARMv7s() && Options::enableArchitectureSpecificOptimizations();
-}
-
-inline bool optimizeForARM64()
-{
-    return isARM64() && Options::enableArchitectureSpecificOptimizations();
-}
-
-inline bool optimizeForX86()
-{
-    return isX86() && Options::enableArchitectureSpecificOptimizations();
-}
+#if ENABLE(ASSEMBLER)
 
+class AllowMacroScratchRegisterUsage;
+class DisallowMacroScratchRegisterUsage;
 class LinkBuffer;
-class RepatchBuffer;
 class Watchpoint;
 namespace DFG {
 struct OSRExit;
 }
 
-template 
+template 
 class AbstractMacroAssembler {
 public:
-    friend class JITWriteBarrierBase;
+    typedef AbstractMacroAssembler AbstractMacroAssemblerType;
     typedef AssemblerType AssemblerType_T;
 
     typedef MacroAssemblerCodePtr CodePtr;
@@ -101,11 +64,11 @@ public:
     typedef typename AssemblerType::RegisterID RegisterID;
     typedef typename AssemblerType::FPRegisterID FPRegisterID;
     
-    static RegisterID firstRegister() { return AssemblerType::firstRegister(); }
-    static RegisterID lastRegister() { return AssemblerType::lastRegister(); }
+    static constexpr RegisterID firstRegister() { return AssemblerType::firstRegister(); }
+    static constexpr RegisterID lastRegister() { return AssemblerType::lastRegister(); }
 
-    static FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); }
-    static FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); }
+    static constexpr FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); }
+    static constexpr FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); }
 
     // Section 1: MacroAssembler operand types
     //
@@ -125,7 +88,9 @@ public:
             return TimesFour;
         return TimesEight;
     }
-
+    
+    struct BaseIndex;
+    
     // Address:
     //
     // Describes a simple base-offset address.
@@ -140,7 +105,9 @@ public:
         {
             return Address(base, offset + additionalOffset);
         }
-
+        
+        BaseIndex indexedBy(RegisterID index, Scale) const;
+        
         RegisterID base;
         int32_t offset;
     };
@@ -198,11 +165,16 @@ public:
             , offset(offset)
         {
         }
-
+        
         RegisterID base;
         RegisterID index;
         Scale scale;
         int32_t offset;
+        
+        BaseIndex withOffset(int32_t additionalOffset)
+        {
+            return BaseIndex(base, index, scale, offset + additionalOffset);
+        }
     };
 
     // AbsoluteAddress:
@@ -354,7 +326,7 @@ public:
     // A Label records a point in the generated instruction stream, typically such that
     // it may be used as a destination for a jump.
     class Label {
-        template
+        template
         friend class AbstractMacroAssembler;
         friend struct DFG::OSRExit;
         friend class Jump;
@@ -367,12 +339,14 @@ public:
         {
         }
 
-        Label(AbstractMacroAssembler* masm)
+        Label(AbstractMacroAssemblerType* masm)
             : m_label(masm->m_assembler.label())
         {
             masm->invalidateAllTempRegisters();
         }
 
+        bool operator==(const Label& other) const { return m_label == other.m_label; }
+
         bool isSet() const { return m_label.isSet(); }
     private:
         AssemblerLabel m_label;
@@ -389,7 +363,7 @@ public:
     //
     // addPtr(TrustedImmPtr(i), a, b)
     class ConvertibleLoadLabel {
-        template
+        template
         friend class AbstractMacroAssembler;
         friend class LinkBuffer;
         
@@ -398,7 +372,7 @@ public:
         {
         }
         
-        ConvertibleLoadLabel(AbstractMacroAssembler* masm)
+        ConvertibleLoadLabel(AbstractMacroAssemblerType* masm)
             : m_label(masm->m_assembler.labelIgnoringWatchpoints())
         {
         }
@@ -413,7 +387,7 @@ public:
     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
     // patched after the code has been generated.
     class DataLabelPtr {
-        template
+        template
         friend class AbstractMacroAssembler;
         friend class LinkBuffer;
     public:
@@ -421,7 +395,7 @@ public:
         {
         }
 
-        DataLabelPtr(AbstractMacroAssembler* masm)
+        DataLabelPtr(AbstractMacroAssemblerType* masm)
             : m_label(masm->m_assembler.label())
         {
         }
@@ -434,10 +408,10 @@ public:
 
     // DataLabel32:
     //
-    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+    // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
     // patched after the code has been generated.
     class DataLabel32 {
-        template
+        template
         friend class AbstractMacroAssembler;
         friend class LinkBuffer;
     public:
@@ -445,7 +419,7 @@ public:
         {
         }
 
-        DataLabel32(AbstractMacroAssembler* masm)
+        DataLabel32(AbstractMacroAssemblerType* masm)
             : m_label(masm->m_assembler.label())
         {
         }
@@ -461,7 +435,7 @@ public:
     // A DataLabelCompact is used to refer to a location in the code containing a
     // compact immediate to be patched after the code has been generated.
     class DataLabelCompact {
-        template
+        template
         friend class AbstractMacroAssembler;
         friend class LinkBuffer;
     public:
@@ -469,7 +443,7 @@ public:
         {
         }
         
-        DataLabelCompact(AbstractMacroAssembler* masm)
+        DataLabelCompact(AbstractMacroAssemblerType* masm)
             : m_label(masm->m_assembler.label())
         {
         }
@@ -492,7 +466,7 @@ public:
     // relative offset such that when executed it will call to the desired
     // destination.
     class Call {
-        template
+        template
         friend class AbstractMacroAssembler;
 
     public:
@@ -500,7 +474,9 @@ public:
             None = 0x0,
             Linkable = 0x1,
             Near = 0x2,
+            Tail = 0x4,
             LinkableNear = 0x3,
+            LinkableNearTail = 0x7,
         };
 
         Call()
@@ -536,7 +512,7 @@ public:
     // relative offset such that when executed it will jump to the desired
     // destination.
     class Jump {
-        template
+        template
         friend class AbstractMacroAssembler;
         friend class Call;
         friend struct DFG::OSRExit;
@@ -581,12 +557,6 @@ public:
         {
             ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
         }
-#elif CPU(SH4)
-        Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
-            : m_label(jmp)
-            , m_type(type)
-        {
-        }
 #else
         Jump(AssemblerLabel jmp)    
             : m_label(jmp)
@@ -601,7 +571,7 @@ public:
             return result;
         }
 
-        void link(AbstractMacroAssembler* masm) const
+        void link(AbstractMacroAssemblerType* masm) const
         {
             masm->invalidateAllTempRegisters();
 
@@ -618,14 +588,12 @@ public:
                 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
             else
                 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
-#elif CPU(SH4)
-            masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
 #else
             masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
 #endif
         }
         
-        void linkTo(Label label, AbstractMacroAssembler* masm) const
+        void linkTo(Label label, AbstractMacroAssemblerType* masm) const
         {
 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
             masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
@@ -658,9 +626,6 @@ public:
         bool m_is64Bit;
         unsigned m_bitNumber;
         ARM64Assembler::RegisterID m_compareRegister;
-#endif
-#if CPU(SH4)
-        SH4Assembler::JumpType m_type;
 #endif
     };
 
@@ -684,8 +649,6 @@ public:
     // A JumpList is a set of Jump objects.
     // All jumps in the set will be linked to the same destination.
     class JumpList {
-        friend class LinkBuffer;
-
     public:
         typedef Vector JumpVector;
         
@@ -693,23 +656,22 @@ public:
         
         JumpList(Jump jump)
         {
-            append(jump);
+            if (jump.isSet())
+                append(jump);
         }
 
-        void link(AbstractMacroAssembler* masm)
+        void link(AbstractMacroAssemblerType* masm) const
         {
             size_t size = m_jumps.size();
             for (size_t i = 0; i < size; ++i)
                 m_jumps[i].link(masm);
-            m_jumps.clear();
         }
         
-        void linkTo(Label label, AbstractMacroAssembler* masm)
+        void linkTo(Label label, AbstractMacroAssemblerType* masm) const
         {
             size_t size = m_jumps.size();
             for (size_t i = 0; i < size; ++i)
                 m_jumps[i].linkTo(label, masm);
-            m_jumps.clear();
         }
         
         void append(Jump jump)
@@ -834,19 +796,215 @@ public:
         AssemblerType::cacheFlush(code, size);
     }
 
+#if ENABLE(MASM_PROBE)
+
+    struct CPUState {
+        #define DECLARE_REGISTER(_type, _regName) \
+            _type _regName;
+        FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
+        #undef DECLARE_REGISTER
+
+        static const char* gprName(RegisterID regID)
+        {
+            switch (regID) {
+                #define DECLARE_REGISTER(_type, _regName) \
+                case RegisterID::_regName: \
+                    return #_regName;
+                FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+                #undef DECLARE_REGISTER
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+
+        static const char* fprName(FPRegisterID regID)
+        {
+            switch (regID) {
+                #define DECLARE_REGISTER(_type, _regName) \
+                case FPRegisterID::_regName: \
+                    return #_regName;
+                FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+                #undef DECLARE_REGISTER
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+
+        void*& gpr(RegisterID regID)
+        {
+            switch (regID) {
+                #define DECLARE_REGISTER(_type, _regName) \
+                case RegisterID::_regName: \
+                    return _regName;
+                FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+                #undef DECLARE_REGISTER
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+
+        double& fpr(FPRegisterID regID)
+        {
+            switch (regID) {
+                #define DECLARE_REGISTER(_type, _regName) \
+                case FPRegisterID::_regName: \
+                    return _regName;
+                FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+                #undef DECLARE_REGISTER
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+    };
+
+    struct ProbeContext;
+    typedef void (*ProbeFunction)(struct ProbeContext*);
+
+    struct ProbeContext {
+        ProbeFunction probeFunction;
+        void* arg1;
+        void* arg2;
+        CPUState cpu;
+
+        // Convenience methods:
+        void*& gpr(RegisterID regID) { return cpu.gpr(regID); }
+        double& fpr(FPRegisterID regID) { return cpu.fpr(regID); }
+        const char* gprName(RegisterID regID) { return cpu.gprName(regID); }
+        const char* fprName(FPRegisterID regID) { return cpu.fprName(regID); }
+    };
+
+    // This function emits code to preserve the CPUState (e.g. registers),
+    // call a user supplied probe function, and restore the CPUState before
+    // continuing with other JIT generated code.
+    //
+    // The user supplied probe function will be called with a single pointer to
+    // a ProbeContext struct (defined above) which contains, among other things,
+    // the preserved CPUState. This allows the user probe function to inspect
+    // the CPUState at that point in the JIT generated code.
+    //
+    // If the user probe function alters the register values in the ProbeContext,
+    // the altered values will be loaded into the CPU registers when the probe
+    // returns.
+    //
+    // The ProbeContext is stack allocated and is only valid for the duration
+    // of the call to the user probe function.
+    //
+    // Note: probe() should be implemented by the target specific MacroAssembler.
+    // This prototype is only provided here to document the interface.
+
+    void probe(ProbeFunction, void* arg1, void* arg2);
+
+#endif // ENABLE(MASM_PROBE)
+
     AssemblerType m_assembler;
     
+    static void linkJump(void* code, Jump jump, CodeLocationLabel target)
+    {
+        AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
+    }
+
+    static void linkPointer(void* code, AssemblerLabel label, void* value)
+    {
+        AssemblerType::linkPointer(code, label, value);
+    }
+
+    static void* getLinkerAddress(void* code, AssemblerLabel label)
+    {
+        return AssemblerType::getRelocatedAddress(code, label);
+    }
+
+    static unsigned getLinkerCallReturnOffset(Call call)
+    {
+        return AssemblerType::getCallReturnOffset(call.m_label);
+    }
+
+    static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
+    {
+        AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+    }
+    
+    static void repatchJumpToNop(CodeLocationJump jump)
+    {
+        AssemblerType::relinkJumpToNop(jump.dataLocation());
+    }
+
+    static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+    {
+        switch (nearCall.callMode()) {
+        case NearCallMode::Tail:
+            AssemblerType::relinkJump(nearCall.dataLocation(), destination.dataLocation());
+            return;
+        case NearCallMode::Regular:
+            AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+            return;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+
+    static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+    {
+        AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
+    }
+    
+    static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
+    {
+        AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+    }
+
+    static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+    {
+        AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+    }
+    
+    static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
+    {
+        return AssemblerType::readPointer(dataLabelPtr.dataLocation());
+    }
+    
+    static void replaceWithLoad(CodeLocationConvertibleLoad label)
+    {
+        AssemblerType::replaceWithLoad(label.dataLocation());
+    }
+    
+    static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
+    {
+        AssemblerType::replaceWithAddressComputation(label.dataLocation());
+    }
+
+    template
+    void addLinkTask(const Functor& functor)
+    {
+        m_linkTasks.append(createSharedTask(functor));
+    }
+
+    void emitNops(size_t memoryToFillWithNopsInBytes)
+    {
+        AssemblerBuffer& buffer = m_assembler.buffer();
+        size_t startCodeSize = buffer.codeSize();
+        size_t targetCodeSize = startCodeSize + memoryToFillWithNopsInBytes;
+        buffer.ensureSpace(memoryToFillWithNopsInBytes);
+        bool isCopyingToExecutableMemory = false;
+        AssemblerType::fillNops(static_cast(buffer.data()) + startCodeSize, memoryToFillWithNopsInBytes, isCopyingToExecutableMemory);
+        buffer.setCodeSize(targetCodeSize);
+    }
+
 protected:
     AbstractMacroAssembler()
-        : m_randomSource(cryptographicallyRandomNumber())
+        : m_randomSource(0)
     {
+        invalidateAllTempRegisters();
     }
 
     uint32_t random()
     {
+        if (!m_randomSourceIsInitialized) {
+            m_randomSourceIsInitialized = true;
+            m_randomSource.setSeed(cryptographicallyRandomNumber());
+        }
         return m_randomSource.getUint32();
     }
 
+    bool m_randomSourceIsInitialized { false };
     WeakRandom m_randomSource;
 
 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
@@ -874,7 +1032,7 @@ protected:
         friend class Label;
 
     public:
-        CachedTempRegister(AbstractMacroAssembler* masm, RegisterID registerID)
+        CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID)
             : m_masm(masm)
             , m_registerID(registerID)
             , m_value(0)
@@ -902,7 +1060,7 @@ protected:
         ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
 
     private:
-        AbstractMacroAssembler* m_masm;
+        AbstractMacroAssemblerType* m_masm;
         RegisterID m_registerID;
         intptr_t m_value;
         unsigned m_validBit;
@@ -928,74 +1086,25 @@ protected:
         m_tempRegistersValidBits |= registerMask;
     }
 
+    friend class AllowMacroScratchRegisterUsage;
+    friend class DisallowMacroScratchRegisterUsage;
     unsigned m_tempRegistersValidBits;
+    bool m_allowScratchRegister { true };
 
-    friend class LinkBuffer;
-    friend class RepatchBuffer;
-
-    static void linkJump(void* code, Jump jump, CodeLocationLabel target)
-    {
-        AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
-    }
-
-    static void linkPointer(void* code, AssemblerLabel label, void* value)
-    {
-        AssemblerType::linkPointer(code, label, value);
-    }
-
-    static void* getLinkerAddress(void* code, AssemblerLabel label)
-    {
-        return AssemblerType::getRelocatedAddress(code, label);
-    }
-
-    static unsigned getLinkerCallReturnOffset(Call call)
-    {
-        return AssemblerType::getCallReturnOffset(call.m_label);
-    }
-
-    static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
-    {
-        AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
-    }
-
-    static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
-    {
-        AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
-    }
-
-    static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
-    {
-        AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
-    }
-    
-    static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
-    {
-        AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
-    }
+    Vector>> m_linkTasks;
 
-    static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
-    {
-        AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
-    }
-    
-    static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
-    {
-        return AssemblerType::readPointer(dataLabelPtr.dataLocation());
-    }
-    
-    static void replaceWithLoad(CodeLocationConvertibleLoad label)
-    {
-        AssemblerType::replaceWithLoad(label.dataLocation());
-    }
-    
-    static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
-    {
-        AssemblerType::replaceWithAddressComputation(label.dataLocation());
-    }
-};
+    friend class LinkBuffer;
+}; // class AbstractMacroAssembler
 
-} // namespace JSC
+template 
+inline typename AbstractMacroAssembler::BaseIndex
+AbstractMacroAssembler::Address::indexedBy(
+    typename AbstractMacroAssembler::RegisterID index,
+    typename AbstractMacroAssembler::Scale scale) const
+{
+    return BaseIndex(base, index, scale, offset);
+}
 
 #endif // ENABLE(ASSEMBLER)
 
-#endif // AbstractMacroAssembler_h
+} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/AllowMacroScratchRegisterUsage.h b/Source/JavaScriptCore/assembler/AllowMacroScratchRegisterUsage.h
new file mode 100644
index 000000000..ed7806ced
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/AllowMacroScratchRegisterUsage.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#include "MacroAssembler.h"
+
+namespace JSC {
+
+class AllowMacroScratchRegisterUsage {
+public:
+    AllowMacroScratchRegisterUsage(MacroAssembler& masm)
+        : m_masm(masm)
+        , m_oldValueOfAllowScratchRegister(masm.m_allowScratchRegister)
+    {
+        masm.m_allowScratchRegister = true;
+    }
+
+    ~AllowMacroScratchRegisterUsage()
+    {
+        m_masm.m_allowScratchRegister = m_oldValueOfAllowScratchRegister;
+    }
+
+private:
+    MacroAssembler& m_masm;
+    bool m_oldValueOfAllowScratchRegister;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/Source/JavaScriptCore/assembler/AssemblerBuffer.h b/Source/JavaScriptCore/assembler/AssemblerBuffer.h
index 120868d63..7340952d5 100644
--- a/Source/JavaScriptCore/assembler/AssemblerBuffer.h
+++ b/Source/JavaScriptCore/assembler/AssemblerBuffer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef AssemblerBuffer_h
-#define AssemblerBuffer_h
+#pragma once
 
 #if ENABLE(ASSEMBLER)
 
@@ -56,53 +55,113 @@ namespace JSC {
             return AssemblerLabel(m_offset + offset);
         }
 
+        bool operator==(const AssemblerLabel& other) const { return m_offset == other.m_offset; }
+
         uint32_t m_offset;
     };
 
-    class AssemblerBuffer {
-        static const int inlineCapacity = 128;
+    class AssemblerData {
+        WTF_MAKE_NONCOPYABLE(AssemblerData);
+        static const size_t InlineCapacity = 128;
     public:
-        AssemblerBuffer()
-            : m_storage(inlineCapacity)
-            , m_buffer(m_storage.begin())
-            , m_capacity(inlineCapacity)
-            , m_index(0)
+        AssemblerData()
+            : m_buffer(m_inlineBuffer)
+            , m_capacity(InlineCapacity)
         {
         }
 
-        ~AssemblerBuffer()
+        AssemblerData(size_t initialCapacity)
         {
+            if (initialCapacity <= InlineCapacity) {
+                m_capacity = InlineCapacity;
+                m_buffer = m_inlineBuffer;
+            } else {
+                m_capacity = initialCapacity;
+                m_buffer = static_cast(fastMalloc(m_capacity));
+            }
         }
 
-        bool isAvailable(int space)
+        AssemblerData(AssemblerData&& other)
         {
-            return m_index + space <= m_capacity;
+            if (other.isInlineBuffer()) {
+                ASSERT(other.m_capacity == InlineCapacity);
+                memcpy(m_inlineBuffer, other.m_inlineBuffer, InlineCapacity);
+                m_buffer = m_inlineBuffer;
+            } else
+                m_buffer = other.m_buffer;
+            m_capacity = other.m_capacity;
+
+            other.m_buffer = nullptr;
+            other.m_capacity = 0;
         }
 
-        void ensureSpace(int space)
+        AssemblerData& operator=(AssemblerData&& other)
         {
-            if (!isAvailable(space))
-                grow();
+            if (m_buffer && !isInlineBuffer())
+                fastFree(m_buffer);
+
+            if (other.isInlineBuffer()) {
+                ASSERT(other.m_capacity == InlineCapacity);
+                memcpy(m_inlineBuffer, other.m_inlineBuffer, InlineCapacity);
+                m_buffer = m_inlineBuffer;
+            } else
+                m_buffer = other.m_buffer;
+            m_capacity = other.m_capacity;
+
+            other.m_buffer = nullptr;
+            other.m_capacity = 0;
+            return *this;
         }
 
-        bool isAligned(int alignment) const
+        ~AssemblerData()
         {
-            return !(m_index & (alignment - 1));
+            if (m_buffer && !isInlineBuffer())
+                fastFree(m_buffer);
         }
 
-        template
-        void putIntegral(IntegralType value)
+        char* buffer() const { return m_buffer; }
+
+        unsigned capacity() const { return m_capacity; }
+
+        void grow(unsigned extraCapacity = 0)
         {
-            ensureSpace(sizeof(IntegralType));
-            putIntegralUnchecked(value);
+            m_capacity = m_capacity + m_capacity / 2 + extraCapacity;
+            if (isInlineBuffer()) {
+                m_buffer = static_cast(fastMalloc(m_capacity));
+                memcpy(m_buffer, m_inlineBuffer, InlineCapacity);
+            } else
+                m_buffer = static_cast(fastRealloc(m_buffer, m_capacity));
         }
 
-        template
-        void putIntegralUnchecked(IntegralType value)
+    private:
+        bool isInlineBuffer() const { return m_buffer == m_inlineBuffer; }
+        char* m_buffer;
+        char m_inlineBuffer[InlineCapacity];
+        unsigned m_capacity;
+    };
+
+    class AssemblerBuffer {
+    public:
+        AssemblerBuffer()
+            : m_storage()
+            , m_index(0)
         {
-            ASSERT(isAvailable(sizeof(IntegralType)));
-            *reinterpret_cast_ptr(m_buffer + m_index) = value;
-            m_index += sizeof(IntegralType);
+        }
+
+        bool isAvailable(unsigned space)
+        {
+            return m_index + space <= m_storage.capacity();
+        }
+
+        void ensureSpace(unsigned space)
+        {
+            while (!isAvailable(space))
+                outOfLineGrow();
+        }
+
+        bool isAligned(int alignment) const
+        {
+            return !(m_index & (alignment - 1));
         }
 
         void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
@@ -116,7 +175,7 @@ namespace JSC {
 
         void* data() const
         {
-            return m_buffer;
+            return m_storage.buffer();
         }
 
         size_t codeSize() const
@@ -124,6 +183,15 @@ namespace JSC {
             return m_index;
         }
 
+        void setCodeSize(size_t index)
+        {
+            // Warning: Only use this if you know exactly what you are doing.
+            // For example, say you want 40 bytes of nops, it's ok to grow
+            // and then fill 40 bytes of nops using bigger instructions.
+            m_index = index;
+            ASSERT(m_index <= m_storage.capacity());
+        }
+
         AssemblerLabel label() const
         {
             return AssemblerLabel(m_index);
@@ -131,33 +199,104 @@ namespace JSC {
 
         unsigned debugOffset() { return m_index; }
 
+        AssemblerData&& releaseAssemblerData() { return WTFMove(m_storage); }
+
+        // LocalWriter is a trick to keep the storage buffer and the index
+        // in memory while issuing multiple Stores.
+        // It is created in a block scope and its attribute can stay live
+        // between writes.
+        //
+        // LocalWriter *CANNOT* be mixed with other types of access to AssemblerBuffer.
+        // AssemblerBuffer cannot be used until its LocalWriter goes out of scope.
+        class LocalWriter {
+        public:
+            LocalWriter(AssemblerBuffer& buffer, unsigned requiredSpace)
+                : m_buffer(buffer)
+            {
+                buffer.ensureSpace(requiredSpace);
+                m_storageBuffer = buffer.m_storage.buffer();
+                m_index = buffer.m_index;
+#if !defined(NDEBUG)
+                m_initialIndex = m_index;
+                m_requiredSpace = requiredSpace;
+#endif
+            }
+
+            ~LocalWriter()
+            {
+                ASSERT(m_index - m_initialIndex <= m_requiredSpace);
+                ASSERT(m_buffer.m_index == m_initialIndex);
+                ASSERT(m_storageBuffer == m_buffer.m_storage.buffer());
+                m_buffer.m_index = m_index;
+            }
+
+            void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
+            void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); }
+            void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
+            void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); }
+        private:
+            template
+            void putIntegralUnchecked(IntegralType value)
+            {
+                ASSERT(m_index + sizeof(IntegralType) <= m_buffer.m_storage.capacity());
+                *reinterpret_cast_ptr(m_storageBuffer + m_index) = value;
+                m_index += sizeof(IntegralType);
+            }
+            AssemblerBuffer& m_buffer;
+            char* m_storageBuffer;
+            unsigned m_index;
+#if !defined(NDEBUG)
+            unsigned m_initialIndex;
+            unsigned m_requiredSpace;
+#endif
+        };
+
     protected:
+        template
+        void putIntegral(IntegralType value)
+        {
+            unsigned nextIndex = m_index + sizeof(IntegralType);
+            if (UNLIKELY(nextIndex > m_storage.capacity()))
+                outOfLineGrow();
+            ASSERT(isAvailable(sizeof(IntegralType)));
+            *reinterpret_cast_ptr(m_storage.buffer() + m_index) = value;
+            m_index = nextIndex;
+        }
+
+        template
+        void putIntegralUnchecked(IntegralType value)
+        {
+            ASSERT(isAvailable(sizeof(IntegralType)));
+            *reinterpret_cast_ptr(m_storage.buffer() + m_index) = value;
+            m_index += sizeof(IntegralType);
+        }
+
         void append(const char* data, int size)
         {
             if (!isAvailable(size))
                 grow(size);
 
-            memcpy(m_buffer + m_index, data, size);
+            memcpy(m_storage.buffer() + m_index, data, size);
             m_index += size;
         }
 
         void grow(int extraCapacity = 0)
         {
-            m_capacity += m_capacity / 2 + extraCapacity;
-
-            m_storage.grow(m_capacity);
-            m_buffer = m_storage.begin();
+            m_storage.grow(extraCapacity);
         }
 
     private:
-        Vector m_storage;
-        char* m_buffer;
-        int m_capacity;
-        int m_index;
+        NEVER_INLINE void outOfLineGrow()
+        {
+            m_storage.grow();
+        }
+
+        friend LocalWriter;
+
+        AssemblerData m_storage;
+        unsigned m_index;
     };
 
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
-
-#endif // AssemblerBuffer_h
diff --git a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
index 053884b01..3b6328864 100644
--- a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
+++ b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
@@ -24,8 +24,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef AssemblerBufferWithConstantPool_h
-#define AssemblerBufferWithConstantPool_h
+#pragma once
 
 #if ENABLE(ASSEMBLER)
 
@@ -332,5 +331,3 @@ private:
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
-
-#endif // AssemblerBufferWithConstantPool_h
diff --git a/Source/JavaScriptCore/assembler/AssemblerCommon.h b/Source/JavaScriptCore/assembler/AssemblerCommon.h
new file mode 100644
index 000000000..2c6cb35f3
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/AssemblerCommon.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+ALWAYS_INLINE bool isIOS()
+{
+#if PLATFORM(IOS)
+    return true;
+#else
+    return false;
+#endif
+}
+
+ALWAYS_INLINE bool isInt9(int32_t value)
+{
+    return value == ((value << 23) >> 23);
+}
+
+template
+ALWAYS_INLINE bool isUInt12(Type value)
+{
+    return !(value & ~static_cast(0xfff));
+}
+
+template
+ALWAYS_INLINE bool isValidScaledUImm12(int32_t offset)
+{
+    int32_t maxPImm = 4095 * (datasize / 8);
+    if (offset < 0)
+        return false;
+    if (offset > maxPImm)
+        return false;
+    if (offset & ((datasize / 8) - 1))
+        return false;
+    return true;
+}
+
+ALWAYS_INLINE bool isValidSignedImm9(int32_t value)
+{
+    return isInt9(value);
+}
+
+class ARM64LogicalImmediate {
+public:
+    static ARM64LogicalImmediate create32(uint32_t value)
+    {
+        // Check for 0, -1 - these cannot be encoded.
+        if (!value || !~value)
+            return InvalidLogicalImmediate;
+
+        // First look for a 32-bit pattern, then for repeating 16-bit
+        // patterns, 8-bit, 4-bit, and finally 2-bit.
+
+        unsigned hsb, lsb;
+        bool inverted;
+        if (findBitRange<32>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<32>(hsb, lsb, inverted);
+
+        if ((value & 0xffff) != (value >> 16))
+            return InvalidLogicalImmediate;
+        value &= 0xffff;
+
+        if (findBitRange<16>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<16>(hsb, lsb, inverted);
+
+        if ((value & 0xff) != (value >> 8))
+            return InvalidLogicalImmediate;
+        value &= 0xff;
+
+        if (findBitRange<8>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<8>(hsb, lsb, inverted);
+
+        if ((value & 0xf) != (value >> 4))
+            return InvalidLogicalImmediate;
+        value &= 0xf;
+
+        if (findBitRange<4>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<4>(hsb, lsb, inverted);
+
+        if ((value & 0x3) != (value >> 2))
+            return InvalidLogicalImmediate;
+        value &= 0x3;
+
+        if (findBitRange<2>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<2>(hsb, lsb, inverted);
+
+        return InvalidLogicalImmediate;
+    }
+
+    static ARM64LogicalImmediate create64(uint64_t value)
+    {
+        // Check for 0, -1 - these cannot be encoded.
+        if (!value || !~value)
+            return InvalidLogicalImmediate;
+
+        // Look for a contiguous bit range.
+        unsigned hsb, lsb;
+        bool inverted;
+        if (findBitRange<64>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<64>(hsb, lsb, inverted);
+
+        // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
+        if (static_cast(value) == static_cast(value >> 32))
+            return create32(static_cast(value));
+        return InvalidLogicalImmediate;
+    }
+
+    int value() const
+    {
+        ASSERT(isValid());
+        return m_value;
+    }
+
+    bool isValid() const
+    {
+        return m_value != InvalidLogicalImmediate;
+    }
+
+    bool is64bit() const
+    {
+        return m_value & (1 << 12);
+    }
+
+private:
+    ARM64LogicalImmediate(int value)
+        : m_value(value)
+    {
+    }
+
+    // Generate a mask with bits in the range hsb..0 set, for example:
+    //   hsb:63 = 0xffffffffffffffff
+    //   hsb:42 = 0x000007ffffffffff
+    //   hsb: 0 = 0x0000000000000001
+    static uint64_t mask(unsigned hsb)
+    {
+        ASSERT(hsb < 64);
+        return 0xffffffffffffffffull >> (63 - hsb);
+    }
+
+    template
+    static void partialHSB(uint64_t& value, unsigned&result)
+    {
+        if (value & (0xffffffffffffffffull << N)) {
+            result += N;
+            value >>= N;
+        }
+    }
+
+    // Find the bit number of the highest bit set in a non-zero value, for example:
+    //   0x8080808080808080 = hsb:63
+    //   0x0000000000000001 = hsb: 0
+    //   0x000007ffffe00000 = hsb:42
+    static unsigned highestSetBit(uint64_t value)
+    {
+        ASSERT(value);
+        unsigned hsb = 0;
+        partialHSB<32>(value, hsb);
+        partialHSB<16>(value, hsb);
+        partialHSB<8>(value, hsb);
+        partialHSB<4>(value, hsb);
+        partialHSB<2>(value, hsb);
+        partialHSB<1>(value, hsb);
+        return hsb;
+    }
+
+    // This function takes a value and a bit width, where value obeys the following constraints:
+    //   * bits outside of the width of the value must be zero.
+    //   * bits within the width of value must neither be all clear or all set.
+    // The input is inspected to detect values that consist of either two or three contiguous
+    // ranges of bits. The output range hsb..lsb will describe the second range of the value.
+    // if the range is set, inverted will be false, and if the range is clear, inverted will
+    // be true. For example (with width 8):
+    //   00001111 = hsb:3, lsb:0, inverted:false
+    //   11110000 = hsb:3, lsb:0, inverted:true
+    //   00111100 = hsb:5, lsb:2, inverted:false
+    //   11000011 = hsb:5, lsb:2, inverted:true
+    template
+    static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted)
+    {
+        ASSERT(value & mask(width - 1));
+        ASSERT(value != mask(width - 1));
+        ASSERT(!(value & ~mask(width - 1)));
+
+        // Detect cases where the top bit is set; if so, flip all the bits & set invert.
+        // This halves the number of patterns we need to look for.
+        const uint64_t msb = 1ull << (width - 1);
+        if ((inverted = (value & msb)))
+            value ^= mask(width - 1);
+
+        // Find the highest set bit in value, generate a corresponding mask & flip all
+        // bits under it.
+        hsb = highestSetBit(value);
+        value ^= mask(hsb);
+        if (!value) {
+            // If this cleared the value, then the range hsb..0 was all set.
+            lsb = 0;
+            return true;
+        }
+
+        // Try making one more mask, and flipping the bits!
+        lsb = highestSetBit(value);
+        value ^= mask(lsb);
+        if (!value) {
+            // Success - but lsb actually points to the hsb of a third range - add one
+            // to get to the lsb of the mid range.
+            ++lsb;
+            return true;
+        }
+
+        return false;
+    }
+
+    // Encodes the set of immN:immr:imms fields found in a logical immediate.
+    template
+    static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted)
+    {
+        // Check width is a power of 2!
+        ASSERT(!(width & (width -1)));
+        ASSERT(width <= 64 && width >= 2);
+        ASSERT(hsb >= lsb);
+        ASSERT(hsb < width);
+
+        int immN = 0;
+        int imms = 0;
+        int immr = 0;
+
+        // For 64-bit values this is easy - just set immN to true, and imms just
+        // contains the bit number of the highest set bit of the set range. For
+        // values with narrower widths, these are encoded by a leading set of
+        // one bits, followed by a zero bit, followed by the remaining set of bits
+        // being the high bit of the range. For a 32-bit immediate there are no
+        // leading one bits, just a zero followed by a five bit number. For a
+        // 16-bit immediate there is one one bit, a zero bit, and then a four bit
+        // bit-position, etc.
+        if (width == 64)
+            immN = 1;
+        else
+            imms = 63 & ~(width + width - 1);
+
+        if (inverted) {
+            // if width is 64 & hsb is 62, then we have a value something like:
+            //   0x80000000ffffffff (in this case with lsb 32).
+            // The ror should be by 1, imms (effectively set width minus 1) is
+            // 32. Set width is full width minus cleared width.
+            immr = (width - 1) - hsb;
+            imms |= (width - ((hsb - lsb) + 1)) - 1;
+        } else {
+            // if width is 64 & hsb is 62, then we have a value something like:
+            //   0x7fffffff00000000 (in this case with lsb 32).
+            // The value is effectively rol'ed by lsb, which is equivalent to
+            // a ror by width - lsb (or 0, in the case where lsb is 0). imms
+            // is hsb - lsb.
+            immr = (width - lsb) & (width - 1);
+            imms |= hsb - lsb;
+        }
+
+        return immN << 12 | immr << 6 | imms;
+    }
+
+    static const int InvalidLogicalImmediate = -1;
+
+    int m_value;
+};
+
+} // namespace JSC.
diff --git a/Source/JavaScriptCore/assembler/CPU.h b/Source/JavaScriptCore/assembler/CPU.h
new file mode 100644
index 000000000..8e8c82f9b
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/CPU.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "Options.h"
+
+namespace JSC {
+
+inline bool isARMv7IDIVSupported()
+{
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+    return true;
+#else
+    return false;
+#endif
+}
+
+inline bool isARM64()
+{
+#if CPU(ARM64)
+    return true;
+#else
+    return false;
+#endif
+}
+
+inline bool isX86()
+{
+#if CPU(X86_64) || CPU(X86)
+    return true;
+#else
+    return false;
+#endif
+}
+
+inline bool isX86_64()
+{
+#if CPU(X86_64)
+    return true;
+#else
+    return false;
+#endif
+}
+
+inline bool optimizeForARMv7IDIVSupported()
+{
+    return isARMv7IDIVSupported() && Options::useArchitectureSpecificOptimizations();
+}
+
+inline bool optimizeForARM64()
+{
+    return isARM64() && Options::useArchitectureSpecificOptimizations();
+}
+
+inline bool optimizeForX86()
+{
+    return isX86() && Options::useArchitectureSpecificOptimizations();
+}
+
+inline bool optimizeForX86_64()
+{
+    return isX86_64() && Options::useArchitectureSpecificOptimizations();
+}
+
+inline bool hasSensibleDoubleToInt()
+{
+    return optimizeForX86();
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/assembler/CodeLocation.h b/Source/JavaScriptCore/assembler/CodeLocation.h
index 86d1f2b75..a115ec3d6 100644
--- a/Source/JavaScriptCore/assembler/CodeLocation.h
+++ b/Source/JavaScriptCore/assembler/CodeLocation.h
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef CodeLocation_h
-#define CodeLocation_h
+#pragma once
 
 #include "MacroAssemblerCodeRef.h"
 
@@ -32,6 +31,8 @@
 
 namespace JSC {
 
+enum NearCallMode { Regular, Tail };
+
 class CodeLocationInstruction;
 class CodeLocationLabel;
 class CodeLocationJump;
@@ -59,7 +60,7 @@ public:
     CodeLocationLabel labelAtOffset(int offset);
     CodeLocationJump jumpAtOffset(int offset);
     CodeLocationCall callAtOffset(int offset);
-    CodeLocationNearCall nearCallAtOffset(int offset);
+    CodeLocationNearCall nearCallAtOffset(int offset, NearCallMode);
     CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
     CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
     CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset);
@@ -115,10 +116,13 @@ public:
 class CodeLocationNearCall : public CodeLocationCommon {
 public:
     CodeLocationNearCall() {}
-    explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
-        : CodeLocationCommon(location) {}
-    explicit CodeLocationNearCall(void* location)
-        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+    explicit CodeLocationNearCall(MacroAssemblerCodePtr location, NearCallMode callMode)
+        : CodeLocationCommon(location), m_callMode(callMode) { }
+    explicit CodeLocationNearCall(void* location, NearCallMode callMode)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)), m_callMode(callMode) { }
+    NearCallMode callMode() { return m_callMode; }
+private:
+    NearCallMode m_callMode = NearCallMode::Regular;
 };
 
 class CodeLocationDataLabel32 : public CodeLocationCommon {
@@ -181,10 +185,10 @@ inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
     return CodeLocationCall(reinterpret_cast(dataLocation()) + offset);
 }
 
-inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset, NearCallMode callMode)
 {
     ASSERT_VALID_CODE_OFFSET(offset);
-    return CodeLocationNearCall(reinterpret_cast(dataLocation()) + offset);
+    return CodeLocationNearCall(reinterpret_cast(dataLocation()) + offset, callMode);
 }
 
 inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
@@ -214,5 +218,3 @@ inline CodeLocationConvertibleLoad CodeLocationCommon::convertibleLoadAtOffset(i
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
-
-#endif // CodeLocation_h
diff --git a/Source/JavaScriptCore/assembler/DisallowMacroScratchRegisterUsage.h b/Source/JavaScriptCore/assembler/DisallowMacroScratchRegisterUsage.h
new file mode 100644
index 000000000..91f038942
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/DisallowMacroScratchRegisterUsage.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#include "MacroAssembler.h"
+
+namespace JSC {
+
+class DisallowMacroScratchRegisterUsage {
+public:
+    DisallowMacroScratchRegisterUsage(MacroAssembler& masm)
+        : m_masm(masm)
+        , m_oldValueOfAllowScratchRegister(masm.m_allowScratchRegister)
+    {
+        masm.m_allowScratchRegister = false;
+    }
+
+    ~DisallowMacroScratchRegisterUsage()
+    {
+        m_masm.m_allowScratchRegister = m_oldValueOfAllowScratchRegister;
+    }
+
+private:
+    MacroAssembler& m_masm;
+    bool m_oldValueOfAllowScratchRegister;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.cpp b/Source/JavaScriptCore/assembler/LinkBuffer.cpp
index a7f469da8..0309d585d 100644
--- a/Source/JavaScriptCore/assembler/LinkBuffer.cpp
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,111 +28,164 @@
 
 #if ENABLE(ASSEMBLER)
 
+#include "CodeBlock.h"
+#include "JITCode.h"
+#include "JSCInlines.h"
 #include "Options.h"
 #include "VM.h"
 #include 
 
 namespace JSC {
 
+bool shouldDumpDisassemblyFor(CodeBlock* codeBlock)
+{
+    if (codeBlock && JITCode::isOptimizingJIT(codeBlock->jitType()) && Options::dumpDFGDisassembly())
+        return true;
+    return Options::dumpDisassembly();
+}
+
 LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
 {
     performFinalization();
     
     ASSERT(m_didAllocate);
     if (m_executableMemory)
-        return CodeRef(m_executableMemory);
+        return CodeRef(*m_executableMemory);
     
     return CodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(m_code));
 }
 
 LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...)
 {
-    ASSERT(Options::showDisassembly() || Options::showDFGDisassembly());
-    
     CodeRef result = finalizeCodeWithoutDisassembly();
 
-#if ENABLE(DISASSEMBLER)
-    dataLogF("Generated JIT code for ");
+    if (m_alreadyDisassembled)
+        return result;
+    
+    StringPrintStream out;
+    out.printf("Generated JIT code for ");
     va_list argList;
     va_start(argList, format);
-    WTF::dataLogFV(format, argList);
+    out.vprintf(format, argList);
     va_end(argList);
-    dataLogF(":\n");
+    out.printf(":\n");
+
+    out.printf("    Code at [%p, %p):\n", result.code().executableAddress(), static_cast(result.code().executableAddress()) + result.size());
+    
+    CString header = out.toCString();
+    
+    if (Options::asyncDisassembly()) {
+        disassembleAsynchronously(header, result, m_size, "    ");
+        return result;
+    }
     
-    dataLogF("    Code at [%p, %p):\n", result.code().executableAddress(), static_cast(result.code().executableAddress()) + result.size());
+    dataLog(header);
     disassemble(result.code(), m_size, "    ", WTF::dataFile());
-#else
-    UNUSED_PARAM(format);
-#endif // ENABLE(DISASSEMBLER)
     
     return result;
 }
 
 #if ENABLE(BRANCH_COMPACTION)
+static ALWAYS_INLINE void recordLinkOffsets(AssemblerData& assemblerData, int32_t regionStart, int32_t regionEnd, int32_t offset)
+{
+    int32_t ptr = regionStart / sizeof(int32_t);
+    const int32_t end = regionEnd / sizeof(int32_t);
+    int32_t* offsets = reinterpret_cast_ptr(assemblerData.buffer());
+    while (ptr < end)
+        offsets[ptr++] = offset;
+}
+
 template 
-void LinkBuffer::copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort effort)
+void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
 {
-    m_initialSize = m_assembler->m_assembler.codeSize();
-    allocate(m_initialSize, ownerUID, effort);
-    uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
-    uint8_t* outData = reinterpret_cast(m_code);
+    allocate(macroAssembler, ownerUID, effort);
+    const size_t initialSize = macroAssembler.m_assembler.codeSize();
+    if (didFailToAllocate())
+        return;
+
+    Vector& jumpsToLink = macroAssembler.jumpsToLink();
+    m_assemblerStorage = macroAssembler.m_assembler.buffer().releaseAssemblerData();
+    uint8_t* inData = reinterpret_cast(m_assemblerStorage.buffer());
+
+    AssemblerData outBuffer(m_size);
+
+    uint8_t* outData = reinterpret_cast(outBuffer.buffer());
+    uint8_t* codeOutData = reinterpret_cast(m_code);
+
     int readPtr = 0;
     int writePtr = 0;
-    Vector& jumpsToLink = m_assembler->jumpsToLink();
     unsigned jumpCount = jumpsToLink.size();
-    for (unsigned i = 0; i < jumpCount; ++i) {
-        int offset = readPtr - writePtr;
-        ASSERT(!(offset & 1));
-            
-        // Copy the instructions from the last jump to the current one.
-        size_t regionSize = jumpsToLink[i].from() - readPtr;
-        InstructionType* copySource = reinterpret_cast_ptr(inData + readPtr);
-        InstructionType* copyEnd = reinterpret_cast_ptr(inData + readPtr + regionSize);
-        InstructionType* copyDst = reinterpret_cast_ptr(outData + writePtr);
-        ASSERT(!(regionSize % 2));
-        ASSERT(!(readPtr % 2));
-        ASSERT(!(writePtr % 2));
-        while (copySource != copyEnd)
-            *copyDst++ = *copySource++;
-        m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
-        readPtr += regionSize;
-        writePtr += regionSize;
-            
-        // Calculate absolute address of the jump target, in the case of backwards
-        // branches we need to be precise, forward branches we are pessimistic
-        const uint8_t* target;
-        if (jumpsToLink[i].to() >= jumpsToLink[i].from())
-            target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
-        else
-            target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
-            
-        JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target);
-        // Compact branch if we can...
-        if (m_assembler->canCompact(jumpsToLink[i].type())) {
-            // Step back in the write stream
-            int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
-            if (delta) {
-                writePtr -= delta;
-                m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+    if (m_shouldPerformBranchCompaction) {
+        for (unsigned i = 0; i < jumpCount; ++i) {
+            int offset = readPtr - writePtr;
+            ASSERT(!(offset & 1));
+                
+            // Copy the instructions from the last jump to the current one.
+            size_t regionSize = jumpsToLink[i].from() - readPtr;
+            InstructionType* copySource = reinterpret_cast_ptr(inData + readPtr);
+            InstructionType* copyEnd = reinterpret_cast_ptr(inData + readPtr + regionSize);
+            InstructionType* copyDst = reinterpret_cast_ptr(outData + writePtr);
+            ASSERT(!(regionSize % 2));
+            ASSERT(!(readPtr % 2));
+            ASSERT(!(writePtr % 2));
+            while (copySource != copyEnd)
+                *copyDst++ = *copySource++;
+            recordLinkOffsets(m_assemblerStorage, readPtr, jumpsToLink[i].from(), offset);
+            readPtr += regionSize;
+            writePtr += regionSize;
+                
+            // Calculate absolute address of the jump target, in the case of backwards
+            // branches we need to be precise, forward branches we are pessimistic
+            const uint8_t* target;
+            if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+                target = codeOutData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+            else
+                target = codeOutData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
+                
+            JumpLinkType jumpLinkType = MacroAssembler::computeJumpType(jumpsToLink[i], codeOutData + writePtr, target);
+            // Compact branch if we can...
+            if (MacroAssembler::canCompact(jumpsToLink[i].type())) {
+                // Step back in the write stream
+                int32_t delta = MacroAssembler::jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+                if (delta) {
+                    writePtr -= delta;
+                    recordLinkOffsets(m_assemblerStorage, jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+                }
             }
+            jumpsToLink[i].setFrom(writePtr);
+        }
+    } else {
+        if (!ASSERT_DISABLED) {
+            for (unsigned i = 0; i < jumpCount; ++i)
+                ASSERT(!MacroAssembler::canCompact(jumpsToLink[i].type()));
         }
-        jumpsToLink[i].setFrom(writePtr);
     }
     // Copy everything after the last jump
-    memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
-    m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
+    memcpy(outData + writePtr, inData + readPtr, initialSize - readPtr);
+    recordLinkOffsets(m_assemblerStorage, readPtr, initialSize, readPtr - writePtr);
         
     for (unsigned i = 0; i < jumpCount; ++i) {
-        uint8_t* location = outData + jumpsToLink[i].from();
-        uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
-        m_assembler->link(jumpsToLink[i], location, target);
+        uint8_t* location = codeOutData + jumpsToLink[i].from();
+        uint8_t* target = codeOutData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
+        MacroAssembler::link(jumpsToLink[i], outData + jumpsToLink[i].from(), location, target);
     }
 
     jumpsToLink.clear();
-    shrink(writePtr + m_initialSize - readPtr);
+
+    size_t compactSize = writePtr + initialSize - readPtr;
+    if (m_executableMemory) {
+        m_size = compactSize;
+        m_executableMemory->shrink(m_size);
+    } else {
+        size_t nopSizeInBytes = initialSize - compactSize;
+        bool isCopyingToExecutableMemory = false;
+        MacroAssembler::AssemblerType_T::fillNops(outData + compactSize, nopSizeInBytes, isCopyingToExecutableMemory);
+    }
+
+    performJITMemcpy(m_code, outData, m_size);
 
 #if DUMP_LINK_STATISTICS
-    dumpLinkStatistics(m_code, m_initialSize, m_size);
+    dumpLinkStatistics(m_code, initialSize, m_size);
 #endif
 #if DUMP_CODE
     dumpCode(m_code, m_size);
@@ -141,59 +194,63 @@ void LinkBuffer::copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort eff
 #endif
 
 
-void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
+void LinkBuffer::linkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
 {
+    // Ensure that the end of the last invalidation point does not extend beyond the end of the buffer.
+    macroAssembler.label();
+
 #if !ENABLE(BRANCH_COMPACTION)
 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
-    m_assembler->m_assembler.buffer().flushConstantPool(false);
+    macroAssembler.m_assembler.buffer().flushConstantPool(false);
 #endif
-    AssemblerBuffer& buffer = m_assembler->m_assembler.buffer();
-    allocate(buffer.codeSize(), ownerUID, effort);
+    allocate(macroAssembler, ownerUID, effort);
     if (!m_didAllocate)
         return;
     ASSERT(m_code);
+    AssemblerBuffer& buffer = macroAssembler.m_assembler.buffer();
 #if CPU(ARM_TRADITIONAL)
-    m_assembler->m_assembler.prepareExecutableCopy(m_code);
+    macroAssembler.m_assembler.prepareExecutableCopy(m_code);
 #endif
-    memcpy(m_code, buffer.data(), buffer.codeSize());
+    performJITMemcpy(m_code, buffer.data(), buffer.codeSize());
 #if CPU(MIPS)
-    m_assembler->m_assembler.relocateJumps(buffer.data(), m_code);
+    macroAssembler.m_assembler.relocateJumps(buffer.data(), m_code);
 #endif
 #elif CPU(ARM_THUMB2)
-    copyCompactAndLinkCode(ownerUID, effort);
+    copyCompactAndLinkCode(macroAssembler, ownerUID, effort);
 #elif CPU(ARM64)
-    copyCompactAndLinkCode(ownerUID, effort);
-#endif
+    copyCompactAndLinkCode(macroAssembler, ownerUID, effort);
+#endif // !ENABLE(BRANCH_COMPACTION)
+
+    m_linkTasks = WTFMove(macroAssembler.m_linkTasks);
 }
 
-void LinkBuffer::allocate(size_t initialSize, void* ownerUID, JITCompilationEffort effort)
+void LinkBuffer::allocate(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
 {
+    size_t initialSize = macroAssembler.m_assembler.codeSize();
     if (m_code) {
         if (initialSize > m_size)
             return;
         
+        size_t nopsToFillInBytes = m_size - initialSize;
+        macroAssembler.emitNops(nopsToFillInBytes);
         m_didAllocate = true;
-        m_size = initialSize;
         return;
     }
     
+    ASSERT(m_vm != nullptr);
     m_executableMemory = m_vm->executableAllocator.allocate(*m_vm, initialSize, ownerUID, effort);
     if (!m_executableMemory)
         return;
-    ExecutableAllocator::makeWritable(m_executableMemory->start(), m_executableMemory->sizeInBytes());
     m_code = m_executableMemory->start();
     m_size = initialSize;
     m_didAllocate = true;
 }
 
-void LinkBuffer::shrink(size_t newSize)
-{
-    m_size = newSize;
-    m_executableMemory->shrink(m_size);
-}
-
 void LinkBuffer::performFinalization()
 {
+    for (auto& task : m_linkTasks)
+        task->run(*this);
+
 #ifndef NDEBUG
     ASSERT(!isCompilationThread());
     ASSERT(!m_completed);
@@ -201,11 +258,6 @@ void LinkBuffer::performFinalization()
     m_completed = true;
 #endif
     
-#if ENABLE(BRANCH_COMPACTION)
-    ExecutableAllocator::makeExecutable(code(), m_initialSize);
-#else
-    ExecutableAllocator::makeExecutable(code(), m_size);
-#endif
     MacroAssembler::cacheFlush(code(), m_size);
 }
 
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.h b/Source/JavaScriptCore/assembler/LinkBuffer.h
index 8d4ce521f..efb26f9ce 100644
--- a/Source/JavaScriptCore/assembler/LinkBuffer.h
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010, 2012-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef LinkBuffer_h
-#define LinkBuffer_h
+#pragma once
 
 #if ENABLE(ASSEMBLER)
 
@@ -43,6 +42,7 @@
 
 namespace JSC {
 
+class CodeBlock;
 class VM;
 
 // LinkBuffer:
@@ -79,36 +79,33 @@ class LinkBuffer {
 #endif
 
 public:
-    LinkBuffer(VM& vm, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+    LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
         : m_size(0)
-#if ENABLE(BRANCH_COMPACTION)
-        , m_initialSize(0)
-#endif
         , m_didAllocate(false)
         , m_code(0)
-        , m_assembler(masm)
         , m_vm(&vm)
 #ifndef NDEBUG
         , m_completed(false)
 #endif
     {
-        linkCode(ownerUID, effort);
+        linkCode(macroAssembler, ownerUID, effort);
     }
 
-    LinkBuffer(VM& vm, MacroAssembler* masm, void* code, size_t size)
+    LinkBuffer(MacroAssembler& macroAssembler, void* code, size_t size, JITCompilationEffort effort = JITCompilationMustSucceed, bool shouldPerformBranchCompaction = true)
         : m_size(size)
-#if ENABLE(BRANCH_COMPACTION)
-        , m_initialSize(0)
-#endif
         , m_didAllocate(false)
         , m_code(code)
-        , m_assembler(masm)
-        , m_vm(&vm)
+        , m_vm(0)
 #ifndef NDEBUG
         , m_completed(false)
 #endif
     {
-        linkCode(0, JITCompilationCanFail);
+#if ENABLE(BRANCH_COMPACTION)
+        m_shouldPerformBranchCompaction = shouldPerformBranchCompaction;
+#else
+        UNUSED_PARAM(shouldPerformBranchCompaction);
+#endif
+        linkCode(macroAssembler, 0, effort);
     }
 
     ~LinkBuffer()
@@ -145,10 +142,10 @@ public:
         MacroAssembler::linkJump(code(), jump, label);
     }
 
-    void link(JumpList list, CodeLocationLabel label)
+    void link(const JumpList& list, CodeLocationLabel label)
     {
-        for (unsigned i = 0; i < list.m_jumps.size(); ++i)
-            link(list.m_jumps[i], label);
+        for (const Jump& jump : list.jumps())
+            link(jump, label);
     }
 
     void patch(DataLabelPtr label, void* value)
@@ -164,6 +161,11 @@ public:
     }
 
     // These methods are used to obtain handles to allow the code to be relinked / repatched later.
+    
+    CodeLocationLabel entrypoint()
+    {
+        return CodeLocationLabel(code());
+    }
 
     CodeLocationCall locationOf(Call call)
     {
@@ -176,7 +178,8 @@ public:
     {
         ASSERT(call.isFlagSet(Call::Linkable));
         ASSERT(call.isFlagSet(Call::Near));
-        return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+        return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)),
+            call.isFlagSet(Call::Tail) ? NearCallMode::Tail : NearCallMode::Regular);
     }
 
     CodeLocationLabel locationOf(PatchableJump jump)
@@ -244,34 +247,44 @@ public:
     {
         return m_code;
     }
+
+    size_t size() const { return m_size; }
     
-    size_t size()
-    {
-        return m_size;
-    }
+    bool wasAlreadyDisassembled() const { return m_alreadyDisassembled; }
+    void didAlreadyDisassemble() { m_alreadyDisassembled = true; }
+
+    VM& vm() { return *m_vm; }
 
 private:
+#if ENABLE(BRANCH_COMPACTION)
+    int executableOffsetFor(int location)
+    {
+        if (!location)
+            return 0;
+        return bitwise_cast(m_assemblerStorage.buffer())[location / sizeof(int32_t) - 1];
+    }
+#endif
+    
     template  T applyOffset(T src)
     {
 #if ENABLE(BRANCH_COMPACTION)
-        src.m_offset -= m_assembler->executableOffsetFor(src.m_offset);
+        src.m_offset -= executableOffsetFor(src.m_offset);
 #endif
         return src;
     }
-    
+
     // Keep this private! - the underlying code should only be obtained externally via finalizeCode().
     void* code()
     {
         return m_code;
     }
     
-    void allocate(size_t initialSize, void* ownerUID, JITCompilationEffort);
-    void shrink(size_t newSize);
+    void allocate(MacroAssembler&, void* ownerUID, JITCompilationEffort);
 
-    JS_EXPORT_PRIVATE void linkCode(void* ownerUID, JITCompilationEffort);
+    JS_EXPORT_PRIVATE void linkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort);
 #if ENABLE(BRANCH_COMPACTION)
     template 
-    void copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort);
+    void copyCompactAndLinkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort);
 #endif
 
     void performFinalization();
@@ -287,15 +300,17 @@ private:
     RefPtr m_executableMemory;
     size_t m_size;
 #if ENABLE(BRANCH_COMPACTION)
-    size_t m_initialSize;
+    AssemblerData m_assemblerStorage;
+    bool m_shouldPerformBranchCompaction { true };
 #endif
     bool m_didAllocate;
     void* m_code;
-    MacroAssembler* m_assembler;
     VM* m_vm;
 #ifndef NDEBUG
     bool m_completed;
 #endif
+    bool m_alreadyDisassembled { false };
+    Vector>> m_linkTasks;
 };
 
 #define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogFArgumentsForHeading)  \
@@ -303,6 +318,11 @@ private:
      ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogFArgumentsForHeading) \
      : (linkBufferReference).finalizeCodeWithoutDisassembly())
 
+bool shouldDumpDisassemblyFor(CodeBlock*);
+
+#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, dataLogFArgumentsForHeading)  \
+    FINALIZE_CODE_IF(shouldDumpDisassemblyFor(codeBlock) || Options::asyncDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+
 // Use this to finalize code, like so:
 //
 // CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number));
@@ -316,17 +336,15 @@ private:
 //
 // ... and so on.
 //
-// Note that the dataLogFArgumentsForHeading are only evaluated when showDisassembly
+// Note that the dataLogFArgumentsForHeading are only evaluated when dumpDisassembly
 // is true, so you can hide expensive disassembly-only computations inside there.
 
 #define FINALIZE_CODE(linkBufferReference, dataLogFArgumentsForHeading)  \
-    FINALIZE_CODE_IF(JSC::Options::showDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+    FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
 
 #define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading)  \
-    FINALIZE_CODE_IF((JSC::Options::showDisassembly() || Options::showDFGDisassembly()), linkBufferReference, dataLogFArgumentsForHeading)
+    FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpDFGDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
 
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
-
-#endif // LinkBuffer_h
diff --git a/Source/JavaScriptCore/assembler/MIPSAssembler.h b/Source/JavaScriptCore/assembler/MIPSAssembler.h
index b75b4d0af..b1c42326c 100644
--- a/Source/JavaScriptCore/assembler/MIPSAssembler.h
+++ b/Source/JavaScriptCore/assembler/MIPSAssembler.h
@@ -26,8 +26,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef MIPSAssembler_h
-#define MIPSAssembler_h
+#pragma once
 
 #if ENABLE(ASSEMBLER) && CPU(MIPS)
 
@@ -151,11 +150,11 @@ public:
     typedef MIPSRegisters::FPRegisterID FPRegisterID;
     typedef SegmentedVector Jumps;
 
-    static RegisterID firstRegister() { return MIPSRegisters::r0; }
-    static RegisterID lastRegister() { return MIPSRegisters::r31; }
+    static constexpr RegisterID firstRegister() { return MIPSRegisters::r0; }
+    static constexpr RegisterID lastRegister() { return MIPSRegisters::r31; }
 
-    static FPRegisterID firstFPRegister() { return MIPSRegisters::f0; }
-    static FPRegisterID lastFPRegister() { return MIPSRegisters::f31; }
+    static constexpr FPRegisterID firstFPRegister() { return MIPSRegisters::f0; }
+    static constexpr FPRegisterID lastFPRegister() { return MIPSRegisters::f31; }
 
     MIPSAssembler()
         : m_indexOfLastWatchpoint(INT_MIN)
@@ -240,6 +239,11 @@ public:
         emitInst(0x3c000000 | (rt << OP_SH_RT) | (imm & 0xffff));
     }
 
+    void clz(RegisterID rd, RegisterID rs)
+    {
+        emitInst(0x70000020 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rd << OP_SH_RT));
+    }
+
     void addiu(RegisterID rt, RegisterID rs, int imm)
     {
         emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
@@ -542,6 +546,11 @@ public:
         emitInst(0x46200004 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
     }
 
+    void absd(FPRegisterID fd, FPRegisterID fs)
+    {
+        emitInst(0x46200005 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+    }
+
     void movd(FPRegisterID fd, FPRegisterID fs)
     {
         emitInst(0x46200006 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
@@ -870,28 +879,8 @@ public:
 
     static void cacheFlush(void* code, size_t size)
     {
-#if GCC_VERSION_AT_LEAST(4, 3, 0)
-#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3)
-        int lineSize;
-        asm("rdhwr %0, $1" : "=r" (lineSize));
-        //
-        // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
-        // mips_expand_synci_loop that may execute synci one more time.
-        // "start" points to the fisrt byte of the cache line.
-        // "end" points to the last byte of the line before the last cache line.
-        // Because size is always a multiple of 4, this is safe to set
-        // "end" to the last byte.
-        //
-        intptr_t start = reinterpret_cast(code) & (-lineSize);
-        intptr_t end = ((reinterpret_cast(code) + size - 1) & (-lineSize)) - 1;
-        __builtin___clear_cache(reinterpret_cast(start), reinterpret_cast(end));
-#else
         intptr_t end = reinterpret_cast(code) + size;
         __builtin___clear_cache(reinterpret_cast(code), reinterpret_cast(end));
-#endif
-#else
-        _flush_cache(reinterpret_cast(code), size, BCACHE);
-#endif
     }
 
     static ptrdiff_t maxJumpReplacementSize()
@@ -1106,5 +1095,3 @@ private:
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER) && CPU(MIPS)
-
-#endif // MIPSAssembler_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.cpp b/Source/JavaScriptCore/assembler/MacroAssembler.cpp
index 2cff056d2..0cd5bcfb0 100644
--- a/Source/JavaScriptCore/assembler/MacroAssembler.cpp
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.cpp
@@ -28,11 +28,135 @@
 
 #if ENABLE(ASSEMBLER)
 
+#include 
+
 namespace JSC {
 
 const double MacroAssembler::twoToThe32 = (double)0x100000000ull;
 
+#if ENABLE(MASM_PROBE)
+static void stdFunctionCallback(MacroAssembler::ProbeContext* context)
+{
+    auto func = static_cast*>(context->arg1);
+    (*func)(context);
+}
+    
+void MacroAssembler::probe(std::function func)
+{
+    probe(stdFunctionCallback, new std::function(func), 0);
+}
+#endif // ENABLE(MASM_PROBE)
+
 } // namespace JSC
 
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, MacroAssembler::RelationalCondition cond)
+{
+    switch (cond) {
+    case MacroAssembler::Equal:
+        out.print("Equal");
+        return;
+    case MacroAssembler::NotEqual:
+        out.print("NotEqual");
+        return;
+    case MacroAssembler::Above:
+        out.print("Above");
+        return;
+    case MacroAssembler::AboveOrEqual:
+        out.print("AboveOrEqual");
+        return;
+    case MacroAssembler::Below:
+        out.print("Below");
+        return;
+    case MacroAssembler::BelowOrEqual:
+        out.print("BelowOrEqual");
+        return;
+    case MacroAssembler::GreaterThan:
+        out.print("GreaterThan");
+        return;
+    case MacroAssembler::GreaterThanOrEqual:
+        out.print("GreaterThanOrEqual");
+        return;
+    case MacroAssembler::LessThan:
+        out.print("LessThan");
+        return;
+    case MacroAssembler::LessThanOrEqual:
+        out.print("LessThanOrEqual");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, MacroAssembler::ResultCondition cond)
+{
+    switch (cond) {
+    case MacroAssembler::Overflow:
+        out.print("Overflow");
+        return;
+    case MacroAssembler::Signed:
+        out.print("Signed");
+        return;
+    case MacroAssembler::PositiveOrZero:
+        out.print("PositiveOrZero");
+        return;
+    case MacroAssembler::Zero:
+        out.print("Zero");
+        return;
+    case MacroAssembler::NonZero:
+        out.print("NonZero");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, MacroAssembler::DoubleCondition cond)
+{
+    switch (cond) {
+    case MacroAssembler::DoubleEqual:
+        out.print("DoubleEqual");
+        return;
+    case MacroAssembler::DoubleNotEqual:
+        out.print("DoubleNotEqual");
+        return;
+    case MacroAssembler::DoubleGreaterThan:
+        out.print("DoubleGreaterThan");
+        return;
+    case MacroAssembler::DoubleGreaterThanOrEqual:
+        out.print("DoubleGreaterThanOrEqual");
+        return;
+    case MacroAssembler::DoubleLessThan:
+        out.print("DoubleLessThan");
+        return;
+    case MacroAssembler::DoubleLessThanOrEqual:
+        out.print("DoubleLessThanOrEqual");
+        return;
+    case MacroAssembler::DoubleEqualOrUnordered:
+        out.print("DoubleEqualOrUnordered");
+        return;
+    case MacroAssembler::DoubleNotEqualOrUnordered:
+        out.print("DoubleNotEqualOrUnordered");
+        return;
+    case MacroAssembler::DoubleGreaterThanOrUnordered:
+        out.print("DoubleGreaterThanOrUnordered");
+        return;
+    case MacroAssembler::DoubleGreaterThanOrEqualOrUnordered:
+        out.print("DoubleGreaterThanOrEqualOrUnordered");
+        return;
+    case MacroAssembler::DoubleLessThanOrUnordered:
+        out.print("DoubleLessThanOrUnordered");
+        return;
+    case MacroAssembler::DoubleLessThanOrEqualOrUnordered:
+        out.print("DoubleLessThanOrEqualOrUnordered");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
 #endif // ENABLE(ASSEMBLER)
 
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h
index 4a43eb625..b6aba874d 100644
--- a/Source/JavaScriptCore/assembler/MacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,13 +23,12 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef MacroAssembler_h
-#define MacroAssembler_h
-
-#include 
+#pragma once
 
 #if ENABLE(ASSEMBLER)
 
+#include "JSCJSValue.h"
+
 #if CPU(ARM_THUMB2)
 #include "MacroAssemblerARMv7.h"
 namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
@@ -56,78 +55,53 @@ namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
 #include "MacroAssemblerX86_64.h"
 namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
 
-#elif CPU(SH4)
-#include "MacroAssemblerSH4.h"
-namespace JSC {
-typedef MacroAssemblerSH4 MacroAssemblerBase;
-};
-
 #else
 #error "The MacroAssembler is not supported on this platform."
 #endif
 
+#include "MacroAssemblerHelpers.h"
+
 namespace JSC {
 
 class MacroAssembler : public MacroAssemblerBase {
 public:
 
-    static bool isStackRelated(RegisterID reg)
-    {
-        return reg == stackPointerRegister || reg == framePointerRegister;
-    }
-    
-    static RegisterID firstRealRegister()
-    {
-        RegisterID firstRegister = MacroAssembler::firstRegister();
-        while (MacroAssembler::isStackRelated(firstRegister))
-            firstRegister = static_cast(firstRegister + 1);
-        return firstRegister;
-    }
-    
-    static RegisterID nextRegister(RegisterID reg)
-    {
-        RegisterID result = static_cast(reg + 1);
-        while (MacroAssembler::isStackRelated(result))
-            result = static_cast(result + 1);
-        return result;
-    }
-    
-    static RegisterID secondRealRegister()
+    static constexpr RegisterID nextRegister(RegisterID reg)
     {
-        return nextRegister(firstRealRegister());
+        return static_cast(reg + 1);
     }
     
-    static FPRegisterID nextFPRegister(FPRegisterID reg)
+    static constexpr FPRegisterID nextFPRegister(FPRegisterID reg)
     {
         return static_cast(reg + 1);
     }
     
-    static unsigned numberOfRegisters()
+    static constexpr unsigned numberOfRegisters()
     {
         return lastRegister() - firstRegister() + 1;
     }
     
-    static unsigned registerIndex(RegisterID reg)
+    static constexpr unsigned registerIndex(RegisterID reg)
     {
         return reg - firstRegister();
     }
     
-    static unsigned numberOfFPRegisters()
+    static constexpr unsigned numberOfFPRegisters()
     {
         return lastFPRegister() - firstFPRegister() + 1;
     }
     
-    static unsigned fpRegisterIndex(FPRegisterID reg)
+    static constexpr unsigned fpRegisterIndex(FPRegisterID reg)
     {
         return reg - firstFPRegister();
     }
     
-    static unsigned registerIndex(FPRegisterID reg)
+    static constexpr unsigned registerIndex(FPRegisterID reg)
     {
         return fpRegisterIndex(reg) + numberOfRegisters();
     }
     
-    static unsigned totalNumberOfRegisters()
+    static constexpr unsigned totalNumberOfRegisters()
     {
         return numberOfRegisters() + numberOfFPRegisters();
     }
@@ -135,14 +109,16 @@ public:
     using MacroAssemblerBase::pop;
     using MacroAssemblerBase::jump;
     using MacroAssemblerBase::branch32;
+    using MacroAssemblerBase::compare32;
     using MacroAssemblerBase::move;
     using MacroAssemblerBase::add32;
+    using MacroAssemblerBase::mul32;
     using MacroAssemblerBase::and32;
     using MacroAssemblerBase::branchAdd32;
     using MacroAssemblerBase::branchMul32;
-#if CPU(X86_64)
+#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL) || CPU(X86_64)
     using MacroAssemblerBase::branchPtr;
-#endif // CPU(X86_64)
+#endif
     using MacroAssemblerBase::branchSub32;
     using MacroAssemblerBase::lshift32;
     using MacroAssemblerBase::or32;
@@ -160,7 +136,6 @@ public:
     static const double twoToThe32; // This is super useful for some double code.
 
     // Utilities used by the DFG JIT.
-#if ENABLE(DFG_JIT)
     using MacroAssemblerBase::invert;
     
     static DoubleCondition invert(DoubleCondition cond)
@@ -190,10 +165,9 @@ public:
             return DoubleGreaterThanOrEqual;
         case DoubleLessThanOrEqualOrUnordered:
             return DoubleGreaterThan;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-            return DoubleEqual; // make compiler happy
         }
+        RELEASE_ASSERT_NOT_REACHED();
+        return DoubleEqual; // make compiler happy
     }
     
     static bool isInvertible(ResultCondition cond)
@@ -201,6 +175,8 @@ public:
         switch (cond) {
         case Zero:
         case NonZero:
+        case Signed:
+        case PositiveOrZero:
             return true;
         default:
             return false;
@@ -214,14 +190,65 @@ public:
             return NonZero;
         case NonZero:
             return Zero;
+        case Signed:
+            return PositiveOrZero;
+        case PositiveOrZero:
+            return Signed;
         default:
             RELEASE_ASSERT_NOT_REACHED();
             return Zero; // Make compiler happy for release builds.
         }
     }
-#endif
 
-    // Platform agnostic onvenience functions,
+    static RelationalCondition flip(RelationalCondition cond)
+    {
+        switch (cond) {
+        case Equal:
+        case NotEqual:
+            return cond;
+        case Above:
+            return Below;
+        case AboveOrEqual:
+            return BelowOrEqual;
+        case Below:
+            return Above;
+        case BelowOrEqual:
+            return AboveOrEqual;
+        case GreaterThan:
+            return LessThan;
+        case GreaterThanOrEqual:
+            return LessThanOrEqual;
+        case LessThan:
+            return GreaterThan;
+        case LessThanOrEqual:
+            return GreaterThanOrEqual;
+        }
+
+        RELEASE_ASSERT_NOT_REACHED();
+        return Equal;
+    }
+
+    static bool isSigned(RelationalCondition cond)
+    {
+        return MacroAssemblerHelpers::isSigned(cond);
+    }
+
+    static bool isUnsigned(RelationalCondition cond)
+    {
+        return MacroAssemblerHelpers::isUnsigned(cond);
+    }
+
+    static bool isSigned(ResultCondition cond)
+    {
+        return MacroAssemblerHelpers::isSigned(cond);
+    }
+
+    static bool isUnsigned(ResultCondition cond)
+    {
+        return MacroAssemblerHelpers::isUnsigned(cond);
+    }
+
+    // Platform agnostic convenience functions,
     // described in terms of other macro assembly methods.
     void pop()
     {
@@ -258,6 +285,10 @@ public:
     {
         push(src);
     }
+    void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+    {
+        push(imm);
+    }
     void popToRestore(RegisterID dest)
     {
         pop(dest);
@@ -272,6 +303,8 @@ public:
         loadDouble(stackPointerRegister, dest);
         addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
     }
+    
+    static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
 #endif // !CPU(ARM64)
 
 #if CPU(X86_64) || CPU(ARM64)
@@ -316,6 +349,11 @@ public:
         branchPtr(cond, op1, imm).linkTo(target, this);
     }
 
+    Jump branch32(RelationalCondition cond, RegisterID left, AbsoluteAddress right)
+    {
+        return branch32(flip(cond), right, left);
+    }
+
     void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
     {
         branch32(cond, op1, op2).linkTo(target, this);
@@ -346,6 +384,11 @@ public:
         return branch32(commute(cond), right, left);
     }
 
+    void compare32(RelationalCondition cond, Imm32 left, RegisterID right, RegisterID dest)
+    {
+        compare32(commute(cond), right, left, dest);
+    }
+
     void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
     {
         branchTestPtr(cond, reg).linkTo(target, this);
@@ -362,6 +405,11 @@ public:
         return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
     }
 
+    PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
+    }
+
 #if !CPU(ARM_TRADITIONAL)
     PatchableJump patchableJump()
     {
@@ -377,6 +425,11 @@ public:
     {
         return PatchableJump(branch32(cond, reg, imm));
     }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
+    {
+        return PatchableJump(branch32(cond, address, imm));
+    }
 #endif
 #endif
 
@@ -414,6 +467,19 @@ public:
         return condition;
     }
 
+    void oops()
+    {
+        abortWithReason(B3Oops);
+    }
+
+    // B3 has additional pseudo-opcodes for returning, when it wants to signal that the return
+    // consumes some register in some way.
+    void retVoid() { ret(); }
+    void ret32(RegisterID) { ret(); }
+    void ret64(RegisterID) { ret(); }
+    void retFloat(FPRegisterID) { ret(); }
+    void retDouble(FPRegisterID) { ret(); }
+
     static const unsigned BlindingModulus = 64;
     bool shouldConsiderBlinding()
     {
@@ -439,6 +505,11 @@ public:
         add32(src, dest);
     }
 
+    void addPtr(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        add32(left, right, dest);
+    }
+
     void addPtr(TrustedImm32 imm, RegisterID srcDest)
     {
         add32(imm, srcDest);
@@ -474,6 +545,21 @@ public:
         and32(TrustedImm32(imm), srcDest);
     }
 
+    void lshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        lshift32(trustedImm32ForShift(imm), srcDest);
+    }
+    
+    void rshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        rshift32(trustedImm32ForShift(imm), srcDest);
+    }
+
+    void urshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        urshift32(trustedImm32ForShift(imm), srcDest);
+    }
+
     void negPtr(RegisterID dest)
     {
         neg32(dest);
@@ -595,6 +681,11 @@ public:
         store32(TrustedImm32(imm), address);
     }
 
+    void storePtr(TrustedImm32 imm, ImplicitAddress address)
+    {
+        store32(imm, address);
+    }
+
     void storePtr(TrustedImmPtr imm, BaseIndex address)
     {
         store32(TrustedImm32(imm), address);
@@ -691,6 +782,11 @@ public:
     {
         add64(src, dest);
     }
+
+    void addPtr(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        add64(left, right, dest);
+    }
     
     void addPtr(Address src, RegisterID dest)
     {
@@ -747,6 +843,16 @@ public:
         lshift64(trustedImm32ForShift(imm), srcDest);
     }
 
+    void rshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        rshift64(trustedImm32ForShift(imm), srcDest);
+    }
+
+    void urshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        urshift64(trustedImm32ForShift(imm), srcDest);
+    }
+
     void negPtr(RegisterID dest)
     {
         neg64(dest);
@@ -857,6 +963,11 @@ public:
         store64(TrustedImm64(imm), address);
     }
 
+    void storePtr(TrustedImm32 imm, ImplicitAddress address)
+    {
+        store64(imm, address);
+    }
+
     void storePtr(TrustedImmPtr imm, BaseIndex address)
     {
         store64(TrustedImm64(imm), address);
@@ -986,7 +1097,7 @@ public:
         if (bitwise_cast(value * 1.0) != bitwise_cast(value))
             return shouldConsiderBlinding();
 
-        value = abs(value);
+        value = fabs(value);
         // Only allow a limited set of fractional components
         double scaledValue = value * 8;
         if (scaledValue / 8 != value)
@@ -1137,7 +1248,7 @@ public:
 
     void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
     {
-        if (shouldBlind(imm)) {
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
             RegisterID scratchRegister = scratchRegisterForBlinding();
             loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
             convertInt32ToDouble(scratchRegister, dest);
@@ -1173,7 +1284,7 @@ public:
 
     Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
     {
-        if (shouldBlind(right)) {
+        if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
             RegisterID scratchRegister = scratchRegisterForBlinding();
             loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
             return branchPtr(cond, left, scratchRegister);
@@ -1183,7 +1294,7 @@ public:
     
     void storePtr(ImmPtr imm, Address dest)
     {
-        if (shouldBlind(imm)) {
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
             RegisterID scratchRegister = scratchRegisterForBlinding();
             loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
             storePtr(scratchRegister, dest);
@@ -1193,7 +1304,7 @@ public:
 
     void store64(Imm64 imm, Address dest)
     {
-        if (shouldBlind(imm)) {
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
             RegisterID scratchRegister = scratchRegisterForBlinding();
             loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
             store64(scratchRegister, dest);
@@ -1203,6 +1314,37 @@ public:
 
 #endif // !CPU(X86_64)
 
+#if ENABLE(B3_JIT)
+    // We should implement this the right way eventually, but for now, it's fine because it arises so
+    // infrequently.
+    void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
+    {
+        move(TrustedImm32(0), dest);
+        Jump falseCase = branchDouble(invert(cond), left, right);
+        move(TrustedImm32(1), dest);
+        falseCase.link(this);
+    }
+    void compareFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
+    {
+        move(TrustedImm32(0), dest);
+        Jump falseCase = branchFloat(invert(cond), left, right);
+        move(TrustedImm32(1), dest);
+        falseCase.link(this);
+    }
+#endif
+
+    void lea32(Address address, RegisterID dest)
+    {
+        add32(TrustedImm32(address.offset), address.base, dest);
+    }
+
+#if CPU(X86_64) || CPU(ARM64)
+    void lea64(Address address, RegisterID dest)
+    {
+        add64(TrustedImm32(address.offset), address.base, dest);
+    }
+#endif // CPU(X86_64) || CPU(ARM64)
+
     bool shouldBlind(Imm32 imm)
     {
 #if ENABLE(FORCED_JIT_BLINDING)
@@ -1316,6 +1458,16 @@ public:
         } else
             add32(imm.asTrustedImm32(), dest);
     }
+
+    void add32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = additionBlindedConstant(imm);
+            add32(key.value1, src, dest);
+            add32(key.value2, dest);
+        } else
+            add32(imm.asTrustedImm32(), src, dest);
+    }
     
     void addPtr(Imm32 imm, RegisterID dest)
     {
@@ -1327,6 +1479,27 @@ public:
             addPtr(imm.asTrustedImm32(), dest);
     }
 
+    void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            if (src != dest || haveScratchRegisterForBlinding()) {
+                if (src == dest) {
+                    move(src, scratchRegisterForBlinding());
+                    src = scratchRegisterForBlinding();
+                }
+                loadXorBlindedConstant(xorBlindConstant(imm), dest);
+                mul32(src, dest);
+                return;
+            }
+            // If we don't have a scratch register available for use, we'll just
+            // place a random number of nops.
+            uint32_t nopCount = random() & 3;
+            while (nopCount--)
+                nop();
+        }
+        mul32(imm.asTrustedImm32(), src, dest);
+    }
+
     void and32(Imm32 imm, RegisterID dest)
     {
         if (shouldBlind(imm)) {
@@ -1486,23 +1659,50 @@ public:
         return branch32(cond, left, right.asTrustedImm32());
     }
 
-    Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+    void compare32(RelationalCondition cond, RegisterID left, Imm32 right, RegisterID dest)
     {
-        if (src == dest)
-            ASSERT(haveScratchRegisterForBlinding());
+        if (shouldBlind(right)) {
+            if (left != dest || haveScratchRegisterForBlinding()) {
+                RegisterID blindedConstantReg = dest;
+                if (left == dest)
+                    blindedConstantReg = scratchRegisterForBlinding();
+                loadXorBlindedConstant(xorBlindConstant(right), blindedConstantReg);
+                compare32(cond, left, blindedConstantReg, dest);
+                return;
+            }
+            // If we don't have a scratch register available for use, we'll just
+            // place a random number of nops.
+            uint32_t nopCount = random() & 3;
+            while (nopCount--)
+                nop();
+            compare32(cond, left, right.asTrustedImm32(), dest);
+            return;
+        }
+
+        compare32(cond, left, right.asTrustedImm32(), dest);
+    }
 
+    Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+    {
         if (shouldBlind(imm)) {
-            if (src == dest) {
-                move(src, scratchRegisterForBlinding());
-                src = scratchRegisterForBlinding();
+            if (src != dest || haveScratchRegisterForBlinding()) {
+                if (src == dest) {
+                    move(src, scratchRegisterForBlinding());
+                    src = scratchRegisterForBlinding();
+                }
+                loadXorBlindedConstant(xorBlindConstant(imm), dest);
+                return branchAdd32(cond, src, dest);
             }
-            loadXorBlindedConstant(xorBlindConstant(imm), dest);
-            return branchAdd32(cond, src, dest);  
+            // If we don't have a scratch register available for use, we'll just
+            // place a random number of nops.
+            uint32_t nopCount = random() & 3;
+            while (nopCount--)
+                nop();
         }
         return branchAdd32(cond, src, imm.asTrustedImm32(), dest);            
     }
     
-    Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
+    Jump branchMul32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
     {
         if (src == dest)
             ASSERT(haveScratchRegisterForBlinding());
@@ -1515,7 +1715,7 @@ public:
             loadXorBlindedConstant(xorBlindConstant(imm), dest);
             return branchMul32(cond, src, dest);  
         }
-        return branchMul32(cond, imm.asTrustedImm32(), src, dest);
+        return branchMul32(cond, src, imm.asTrustedImm32(), dest);
     }
 
     // branchSub32 takes a scratch register as 32 bit platforms make use of this,
@@ -1560,12 +1760,35 @@ public:
     {
         urshift32(src, trustedImm32ForShift(amount), dest);
     }
+
+#if ENABLE(MASM_PROBE)
+    using MacroAssemblerBase::probe;
+
+    // Let's you print from your JIT generated code.
+    // See comments in MacroAssemblerPrinter.h for examples of how to use this.
+    template
+    void print(Arguments... args);
+
+    void probe(std::function);
+#endif
 };
 
 } // namespace JSC
 
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::MacroAssembler::RelationalCondition);
+void printInternal(PrintStream&, JSC::MacroAssembler::ResultCondition);
+void printInternal(PrintStream&, JSC::MacroAssembler::DoubleCondition);
+
+} // namespace WTF
+
 #else // ENABLE(ASSEMBLER)
 
+namespace JSC {
+
 // If there is no assembler for this platform, at least allow code to make references to
 // some of the things it would otherwise define, albeit without giving that code any way
 // of doing anything useful.
@@ -1579,6 +1802,6 @@ public:
     enum FPRegisterID { NoFPRegister };
 };
 
-#endif // ENABLE(ASSEMBLER)
+} // namespace JSC
 
-#endif // MacroAssembler_h
+#endif // ENABLE(ASSEMBLER)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp
index a6f3e65c0..9b1440fed 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc.
+ * Copyright (C) 2013-2015 Apple Inc.
  * Copyright (C) 2009 University of Szeged
  * All rights reserved.
  *
@@ -31,9 +31,7 @@
 
 #include "MacroAssemblerARM.h"
 
-#if USE(MASM_PROBE)
-#include 
-#endif
+#include 
 
 #if OS(LINUX)
 #include 
@@ -50,7 +48,7 @@ static bool isVFPPresent()
 {
 #if OS(LINUX)
     int fd = open("/proc/self/auxv", O_RDONLY);
-    if (fd > 0) {
+    if (fd != -1) {
         Elf32_auxv_t aux;
         while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
             if (aux.a_type == AT_HWCAP) {
@@ -62,7 +60,7 @@ static bool isVFPPresent()
     }
 #endif // OS(LINUX)
 
-#if (COMPILER(GCC) && defined(__VFP_FP__))
+#if (COMPILER(GCC_OR_CLANG) && defined(__VFP_FP__))
     return true;
 #else
     return false;
@@ -99,51 +97,256 @@ void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, Register
 }
 #endif // CPU(ARMV5_OR_LOWER)
 
-#if USE(MASM_PROBE)
+#if ENABLE(MASM_PROBE)
 
-void MacroAssemblerARM::ProbeContext::dumpCPURegisters(const char* indentation)
-{
-    #define DUMP_GPREGISTER(_type, _regName) { \
-        int32_t value = reinterpret_cast(cpu._regName); \
-        dataLogF("%s    %5s: 0x%08x   %d\n", indentation, #_regName, value, value) ; \
-    }
-    FOR_EACH_CPU_GPREGISTER(DUMP_GPREGISTER)
-    FOR_EACH_CPU_SPECIAL_REGISTER(DUMP_GPREGISTER)
-    #undef DUMP_GPREGISTER
-
-    #define DUMP_FPREGISTER(_type, _regName) { \
-        uint32_t* u = reinterpret_cast(&cpu._regName); \
-        double* d = reinterpret_cast(&cpu._regName); \
-        dataLogF("%s    %5s: 0x %08x %08x   %12g\n", \
-            indentation, #_regName, u[1], u[0], d[0]); \
-    }
-    FOR_EACH_CPU_FPREGISTER(DUMP_FPREGISTER)
-    #undef DUMP_FPREGISTER
-}
+extern "C" void ctiMasmProbeTrampoline();
 
-void MacroAssemblerARM::ProbeContext::dump(const char* indentation)
-{
-    if (!indentation)
-        indentation = "";
+#if COMPILER(GCC_OR_CLANG)
+    
+// The following are offsets for MacroAssemblerARM::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
 
-    dataLogF("%sProbeContext %p {\n", indentation, this);
-    dataLogF("%s  probeFunction: %p\n", indentation, probeFunction);
-    dataLogF("%s  arg1: %p %llu\n", indentation, arg1, reinterpret_cast(arg1));
-    dataLogF("%s  arg2: %p %llu\n", indentation, arg2, reinterpret_cast(arg2));
-    dataLogF("%s  cpu: {\n", indentation);
+#define PTR_SIZE 4
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
 
-    dumpCPURegisters(indentation);
+#define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE)
 
-    dataLogF("%s  }\n", indentation);
-    dataLogF("%s}\n", indentation);
-}
+#define GPREG_SIZE 4
+#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE))
+#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE))
+#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE))
+#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE))
+#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE))
+#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE))
+#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE))
+#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE))
+#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE))
+#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE))
+#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE))
+#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE))
+#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE))
+#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE))
+#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE))
+#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE))
 
+#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE))
+#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE))
 
-extern "C" void ctiMasmProbeTrampoline();
+#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE))
+
+#define FPREG_SIZE 8
+#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE))
+#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE))
+#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE))
+#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE))
+#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE))
+#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE))
+#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE))
+#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE))
+#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE))
+#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE))
+#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE))
+#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE))
+#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE))
+#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE))
+#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE))
+#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE))
+
+#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARM::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(sizeof(MacroAssemblerARM::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+#undef PROBE_OFFSETOF
+
+asm (
+    ".text" "\n"
+    ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+    HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+    INLINE_ARM_FUNCTION(ctiMasmProbeTrampoline) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+    // MacroAssemblerARM::probe() has already generated code to store some values.
+    // The top of stack now looks like this:
+    //     esp[0 * ptrSize]: probeFunction
+    //     esp[1 * ptrSize]: arg1
+    //     esp[2 * ptrSize]: arg2
+    //     esp[3 * ptrSize]: saved r3 / S0
+    //     esp[4 * ptrSize]: saved ip
+    //     esp[5 * ptrSize]: saved lr
+    //     esp[6 * ptrSize]: saved sp
+
+    "mov       ip, sp" "\n"
+    "mov       r3, sp" "\n"
+    "sub       r3, r3, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n"
+
+    // The ARM EABI specifies that the stack needs to be 16 byte aligned.
+    "bic       r3, r3, #0xf" "\n"
+    "mov       sp, r3" "\n"
+
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "add       lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "\n"
+    "stmia     lr, { r0-r11 }" "\n"
+    "mrs       lr, APSR" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "vmrs      lr, FPSCR" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+
+    "ldr       lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R3_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n"
+    "vstmia.64 ip, { d0-d15 }" "\n"
+
+    "mov       fp, sp" "\n" // Save the ProbeContext*.
+
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+    "mov       r0, sp" "\n" // the ProbeContext* arg.
+    "blx       ip" "\n"
+
+    "mov       sp, fp" "\n"
+
+    // To enable probes to modify register state, we copy all registers
+    // out of the ProbeContext before returning.
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D15_OFFSET + FPREG_SIZE) "\n"
+    "vldmdb.64 ip!, { d0-d15 }" "\n"
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n"
+    "ldmdb     ip, { r0-r11 }" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+    "vmsr      FPSCR, ip" "\n"
+
+    // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr.
+    // There are 2 issues that complicate the restoration of these last few
+    // registers:
+    //
+    // 1. Normal ARM calling convention relies on moving lr to pc to return to
+    //    the caller. In our case, the address to return to is specified by
+    //    ProbeContext.cpu.pc. And at that moment, we won't have any available
+    //    scratch registers to hold the return address (lr needs to hold
+    //    ProbeContext.cpu.lr, not the return address).
+    //
+    //    The solution is to store the return address on the stack and load the
+    //     pc from there.
+    //
+    // 2. Issue 1 means we will need to write to the stack location at
+    //    ProbeContext.cpu.sp - 4. But if the user probe function had  modified
+    //    the value of ProbeContext.cpu.sp to point in the range between
+    //    &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for
+    //    Issue 1 may trash the values to be restored before we can restore
+    //    them.
+    //
+    //    The solution is to check if ProbeContext.cpu.sp contains a value in
+    //    the undesirable range. If so, we copy the remaining ProbeContext
+    //    register data to a safe range (at memory lower than where
+    //    ProbeContext.cpu.sp points) first, and restore the remaining register
+    //    from this new range.
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n"
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "cmp       lr, ip" "\n"
+    "bgt     " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+    // We get here because the new expected stack pointer location is lower
+    // than where it's supposed to be. This means the safe range of stack
+    // memory where we'll be copying the remaining register restore values to
+    // might be in a region of memory below the sp i.e. unallocated stack
+    // memory. This in turn makes it vulnerable to interrupts potentially
+    // trashing the copied values. To prevent that, we must first allocate the
+    // needed stack memory by adjusting the sp before the copying.
+
+    "sub       lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE)
+    " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n"
+
+    "mov       ip, sp" "\n"
+    "mov       sp, lr" "\n"
+    "mov       lr, ip" "\n"
+
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+
+    SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "sub       lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n"
+    "str       ip, [lr]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "msr       APSR, ip" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "mov       lr, ip" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
 
-// For details on "What code is emitted for the probe?" and "What values are in
-// the saved registers?", see comment for MacroAssemblerX86::probe() in
-// MacroAssemblerX86_64.h.
+    "pop       { pc }" "\n"
+);
+#endif // COMPILER(GCC_OR_CLANG)
 
 void MacroAssemblerARM::probe(MacroAssemblerARM::ProbeFunction function, void* arg1, void* arg2)
 {
@@ -160,7 +363,7 @@ void MacroAssemblerARM::probe(MacroAssemblerARM::ProbeFunction function, void* a
     m_assembler.blx(RegisterID::S0);
 
 }
-#endif // USE(MASM_PROBE)
+#endif // ENABLE(MASM_PROBE)
 
 } // namespace JSC
 
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
index 7eae2ee01..7d36034a3 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2013 Apple Inc.
+ * Copyright (C) 2008, 2013-2016 Apple Inc.
  * Copyright (C) 2009, 2010 University of Szeged
  * All rights reserved.
  *
@@ -25,8 +25,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef MacroAssemblerARM_h
-#define MacroAssemblerARM_h
+#pragma once
 
 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
 
@@ -35,11 +34,14 @@
 
 namespace JSC {
 
-class MacroAssemblerARM : public AbstractMacroAssembler {
+class MacroAssemblerARM : public AbstractMacroAssembler {
     static const int DoubleConditionMask = 0x0f;
     static const int DoubleConditionBitSpecial = 0x10;
     COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
 public:
+    static const unsigned numGPRs = 16;
+    static const unsigned numFPRs = 16;
+    
     typedef ARMRegisters::FPRegisterID FPRegisterID;
 
     enum RelationalCondition {
@@ -228,13 +230,31 @@ public:
         store32(ARMRegisters::S1, ARMRegisters::S0);
     }
 
+    void or32(TrustedImm32 imm, AbsoluteAddress dest)
+    {
+        move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+        load32(Address(ARMRegisters::S0), ARMRegisters::S1);
+        or32(imm, ARMRegisters::S1); // It uses S0 as temporary register, we need to reload the address.
+        move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+        store32(ARMRegisters::S1, ARMRegisters::S0);
+    }
+
+    void or32(TrustedImm32 imm, Address address)
+    {
+        load32(address, ARMRegisters::S0);
+        or32(imm, ARMRegisters::S0, ARMRegisters::S0);
+        store32(ARMRegisters::S0, address);
+    }
+
     void or32(TrustedImm32 imm, RegisterID dest)
     {
+        ASSERT(dest != ARMRegisters::S0);
         m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
     }
 
     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
+        ASSERT(src != ARMRegisters::S0);
         m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
     }
 
@@ -263,7 +283,10 @@ public:
 
     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
-        m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f));
+        if (!imm.m_value)
+            move(src, dest);
+        else
+            m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f));
     }
 
     void urshift32(RegisterID shiftAmount, RegisterID dest)
@@ -286,7 +309,10 @@ public:
     
     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
-        m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
+        if (!imm.m_value)
+            move(src, dest);
+        else
+            m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
     }
 
     void sub32(RegisterID src, RegisterID dest)
@@ -294,6 +320,11 @@ public:
         m_assembler.subs(dest, dest, src);
     }
 
+    void sub32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.subs(dest, left, right);
+    }
+
     void sub32(TrustedImm32 imm, RegisterID dest)
     {
         m_assembler.subs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
@@ -370,7 +401,12 @@ public:
         m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, ARMRegisters::S0, 0);
     }
 
-    void load8Signed(BaseIndex address, RegisterID dest)
+    void load8SignedExtendTo32(Address address, RegisterID dest)
+    {
+        m_assembler.dataTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.offset);
+    }
+
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
         m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast(address.scale), address.offset);
     }
@@ -385,7 +421,7 @@ public:
         m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast(address.scale), address.offset);
     }
 
-    void load16Signed(BaseIndex address, RegisterID dest)
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
         m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast(address.scale), address.offset);
     }
@@ -414,6 +450,18 @@ public:
         load16(address, dest);
     }
 
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), ARMRegisters::S0);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm32(misc), ARMRegisters::S1);
+        abortWithReason(reason);
+    }
+
     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
     {
         ConvertibleLoadLabel result(this);
@@ -459,16 +507,29 @@ public:
         m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast(address.scale), address.offset);
     }
 
+    void store8(RegisterID src, ImplicitAddress address)
+    {
+        m_assembler.dtrUp(ARMAssembler::StoreUint8, src, address.base, address.offset);
+    }
+
     void store8(RegisterID src, const void* address)
     {
         move(TrustedImmPtr(address), ARMRegisters::S0);
         m_assembler.dtrUp(ARMAssembler::StoreUint8, src, ARMRegisters::S0, 0);
     }
 
+    void store8(TrustedImm32 imm, ImplicitAddress address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        move(imm8, ARMRegisters::S1);
+        store8(ARMRegisters::S1, address);
+    }
+
     void store8(TrustedImm32 imm, const void* address)
     {
+        TrustedImm32 imm8(static_cast(imm.m_value));
         move(TrustedImm32(reinterpret_cast(address)), ARMRegisters::S0);
-        move(imm, ARMRegisters::S1);
+        move(imm8, ARMRegisters::S1);
         m_assembler.dtrUp(ARMAssembler::StoreUint8, ARMRegisters::S1, ARMRegisters::S0, 0);
     }
 
@@ -517,6 +578,12 @@ public:
         m_assembler.pop(dest);
     }
 
+    void popPair(RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.pop(dest1);
+        m_assembler.pop(dest2);
+    }
+
     void push(RegisterID src)
     {
         m_assembler.push(src);
@@ -534,6 +601,12 @@ public:
         push(ARMRegisters::S0);
     }
 
+    void pushPair(RegisterID src1, RegisterID src2)
+    {
+        m_assembler.push(src2);
+        m_assembler.push(src1);
+    }
+
     void move(TrustedImm32 imm, RegisterID dest)
     {
         m_assembler.moveImm(imm.m_value, dest);
@@ -571,21 +644,29 @@ public:
 
     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
     {
-        load8(left, ARMRegisters::S1);
-        return branch32(cond, ARMRegisters::S1, right);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right8);
     }
 
     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
     {
-        ASSERT(!(right.m_value & 0xFFFFFF00));
-        load8(left, ARMRegisters::S1);
-        return branch32(cond, ARMRegisters::S1, right);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right8);
     }
 
     Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
     {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
         move(TrustedImmPtr(left.m_ptr), ARMRegisters::S1);
-        load8(Address(ARMRegisters::S1), ARMRegisters::S1);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(ARMRegisters::S1), ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right8);
+    }
+
+    Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+    {
+        load32(left, ARMRegisters::S1);
         return branch32(cond, ARMRegisters::S1, right);
     }
 
@@ -633,33 +714,36 @@ public:
 
     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        load8(address, ARMRegisters::S1);
-        return branchTest32(cond, ARMRegisters::S1, mask);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1);
+        return branchTest32(cond, ARMRegisters::S1, mask8);
     }
 
     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        load8(address, ARMRegisters::S1);
-        return branchTest32(cond, ARMRegisters::S1, mask);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1);
+        return branchTest32(cond, ARMRegisters::S1, mask8);
     }
 
     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
         move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
-        load8(Address(ARMRegisters::S1), ARMRegisters::S1);
-        return branchTest32(cond, ARMRegisters::S1, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(ARMRegisters::S1), ARMRegisters::S1);
+        return branchTest32(cond, ARMRegisters::S1, mask8);
     }
 
     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
     {
-        ASSERT((cond == Zero) || (cond == NonZero));
+        ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
         m_assembler.tst(reg, mask);
         return Jump(m_assembler.jmp(ARMCondition(cond)));
     }
 
     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
     {
-        ASSERT((cond == Zero) || (cond == NonZero));
+        ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
         ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
         if (w & ARMAssembler::Op2InvertedImmediate)
             m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate);
@@ -790,7 +874,7 @@ public:
         return branchMul32(cond, src, dest, dest);
     }
 
-    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
         ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
         if (cond == Overflow) {
@@ -858,6 +942,14 @@ public:
         return PatchableJump(jump);
     }
 
+    PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
+    {
+        internalCompare32(address, imm);
+        Jump jump(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMCondition(cond), false));
+        m_assembler.bx(ARMRegisters::S1, ARMCondition(cond));
+        return PatchableJump(jump);
+    }
+
     void breakpoint()
     {
         m_assembler.bkpt(0);
@@ -869,6 +961,11 @@ public:
         return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
     }
 
+    Call nearTailCall()
+    {
+        return Call(m_assembler.jmp(), Call::LinkableNearTail);
+    }
+
     Call call(RegisterID target)
     {
         return Call(m_assembler.blx(target), Call::None);
@@ -900,14 +997,15 @@ public:
 
     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
     {
-        load8(left, ARMRegisters::S1);
-        compare32(cond, ARMRegisters::S1, right, dest);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1);
+        compare32(cond, ARMRegisters::S1, right8, dest);
     }
 
     void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
     {
         if (mask.m_value == -1)
-            m_assembler.cmp(0, reg);
+            m_assembler.tst(reg, reg);
         else
             m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
         m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
@@ -922,8 +1020,9 @@ public:
 
     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
     {
-        load8(address, ARMRegisters::S1);
-        test32(cond, ARMRegisters::S1, mask, dest);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1);
+        test32(cond, ARMRegisters::S1, mask8, dest);
     }
 
     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
@@ -1021,6 +1120,13 @@ public:
         return dataLabel;
     }
 
+    DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
+    {
+        DataLabel32 dataLabel(this);
+        m_assembler.ldrUniqueImmediate(dest, static_cast(initialValue.m_value));
+        return dataLabel;
+    }
+
     Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
     {
         ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
@@ -1038,6 +1144,15 @@ public:
         return jump;
     }
 
+    Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        load32(left, ARMRegisters::S1);
+        ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+        dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
+        Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
+        return jump;
+    }
+
     DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
     {
         DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
@@ -1066,6 +1181,7 @@ public:
         return s_isVFPPresent;
     }
     static bool supportsFloatingPointAbs() { return false; }
+    static bool supportsFloatingPointRounding() { return false; }
 
     void loadFloat(BaseIndex address, FPRegisterID dest)
     {
@@ -1082,12 +1198,30 @@ public:
         m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast(address.scale), address.offset);
     }
 
-    void loadDouble(const void* address, FPRegisterID dest)
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
     {
-        move(TrustedImm32(reinterpret_cast(address)), ARMRegisters::S0);
+        move(TrustedImm32(reinterpret_cast(address.m_value)), ARMRegisters::S0);
         m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0);
     }
 
+    NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
     void storeFloat(FPRegisterID src, BaseIndex address)
     {
         m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast(address.scale), address.offset);
@@ -1103,9 +1237,9 @@ public:
         m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast(address.scale), address.offset);
     }
 
-    void storeDouble(FPRegisterID src, const void* address)
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
     {
-        move(TrustedImm32(reinterpret_cast(address)), ARMRegisters::S0);
+        move(TrustedImm32(reinterpret_cast(address.m_value)), ARMRegisters::S0);
         m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0);
     }
 
@@ -1115,6 +1249,12 @@ public:
             m_assembler.vmov_f64(dest, src);
     }
 
+    void moveZeroToDouble(FPRegisterID reg)
+    {
+        static double zeroConstant = 0.;
+        loadDouble(TrustedImmPtr(&zeroConstant), reg);
+    }
+
     void addDouble(FPRegisterID src, FPRegisterID dest)
     {
         m_assembler.vadd_f64(dest, dest, src);
@@ -1133,7 +1273,7 @@ public:
 
     void addDouble(AbsoluteAddress address, FPRegisterID dest)
     {
-        loadDouble(address.m_ptr, ARMRegisters::SD0);
+        loadDouble(TrustedImmPtr(address.m_ptr), ARMRegisters::SD0);
         addDouble(ARMRegisters::SD0, dest);
     }
 
@@ -1330,6 +1470,11 @@ public:
         m_assembler.dmbSY();
     }
 
+    void storeFence()
+    {
+        m_assembler.dmbISHST();
+    }
+
     static FunctionPtr readCallTarget(CodeLocationCall call)
     {
         return FunctionPtr(reinterpret_cast(ARMAssembler::readCallTarget(call.dataLocation())));
@@ -1342,11 +1487,22 @@ public:
     
     static ptrdiff_t maxJumpReplacementSize()
     {
-        ARMAssembler::maxJumpReplacementSize();
-        return 0;
+        return ARMAssembler::maxJumpReplacementSize();
+    }
+
+    static ptrdiff_t patchableJumpSize()
+    {
+        return ARMAssembler::patchableJumpSize();
     }
 
     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
 
     static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
     {
@@ -1364,36 +1520,29 @@ public:
         ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast(initialValue) & 0xffff);
     }
 
-    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
     {
         UNREACHABLE_FOR_PLATFORM();
     }
 
-#if USE(MASM_PROBE)
-    struct CPUState {
-        #define DECLARE_REGISTER(_type, _regName) \
-            _type _regName;
-        FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
-        #undef DECLARE_REGISTER
-    };
-
-    struct ProbeContext;
-    typedef void (*ProbeFunction)(struct ProbeContext*);
+    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
 
-    struct ProbeContext {
-        ProbeFunction probeFunction;
-        void* arg1;
-        void* arg2;
-        CPUState cpu;
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
 
-        void dump(const char* indentation = 0);
-    private:
-        void dumpCPURegisters(const char* indentation);
-    };
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
 
-    // For details about probe(), see comment in MacroAssemblerX86_64.h.
-    void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
-#endif // USE(MASM_PROBE)
+#if ENABLE(MASM_PROBE)
+    void probe(ProbeFunction, void* arg1, void* arg2);
+#endif // ENABLE(MASM_PROBE)
 
 protected:
     ARMAssembler::Condition ARMCondition(RelationalCondition cond)
@@ -1424,7 +1573,6 @@ protected:
 
 private:
     friend class LinkBuffer;
-    friend class RepatchBuffer;
 
     void internalCompare32(RegisterID left, TrustedImm32 right)
     {
@@ -1435,22 +1583,26 @@ private:
             m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
     }
 
-    static void linkCall(void* code, Call call, FunctionPtr function)
+    void internalCompare32(Address left, TrustedImm32 right)
     {
-        ARMAssembler::linkCall(code, call.m_label, function.value());
+        ARMWord tmp = (static_cast(right.m_value) == 0x80000000) ? ARMAssembler::InvalidImmediate : m_assembler.getOp2(-right.m_value);
+        load32(left, ARMRegisters::S1);
+        if (tmp != ARMAssembler::InvalidImmediate)
+            m_assembler.cmn(ARMRegisters::S1, tmp);
+        else
+            m_assembler.cmp(ARMRegisters::S1, m_assembler.getImm(right.m_value, ARMRegisters::S0));
     }
 
-    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+        if (call.isFlagSet(Call::Tail))
+            ARMAssembler::linkJump(code, call.m_label, function.value());
+        else
+            ARMAssembler::linkCall(code, call.m_label, function.value());
     }
 
-    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
-    {
-        ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
-    }
 
-#if USE(MASM_PROBE)
+#if ENABLE(MASM_PROBE)
     inline TrustedImm32 trustedImm32FromPtr(void* ptr)
     {
         return TrustedImm32(TrustedImmPtr(ptr));
@@ -1470,8 +1622,6 @@ private:
     static const bool s_isVFPPresent;
 };
 
-}
+} // namespace JSC
 
 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
-
-#endif // MacroAssemblerARM_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp
new file mode 100644
index 000000000..8e7b51b9f
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp
@@ -0,0 +1,507 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM64)
+#include "MacroAssemblerARM64.h"
+
+#include 
+
+namespace JSC {
+
+#if ENABLE(MASM_PROBE)
+
+extern "C" void ctiMasmProbeTrampoline();
+
+using namespace ARM64Registers;
+
+#if COMPILER(GCC_OR_CLANG)
+
+// The following are offsets for MacroAssemblerARM64::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
+#define PTR_SIZE 8
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
+
+#define PROBE_FIRST_GPREG_OFFSET (3 * PTR_SIZE)
+
+#define GPREG_SIZE 8
+#define PROBE_CPU_X0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE))
+#define PROBE_CPU_X1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE))
+#define PROBE_CPU_X2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE))
+#define PROBE_CPU_X3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE))
+#define PROBE_CPU_X4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE))
+#define PROBE_CPU_X5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE))
+#define PROBE_CPU_X6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE))
+#define PROBE_CPU_X7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE))
+#define PROBE_CPU_X8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE))
+#define PROBE_CPU_X9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE))
+#define PROBE_CPU_X10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE))
+#define PROBE_CPU_X11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE))
+#define PROBE_CPU_X12_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE))
+#define PROBE_CPU_X13_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE))
+#define PROBE_CPU_X14_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE))
+#define PROBE_CPU_X15_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE))
+#define PROBE_CPU_X16_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE))
+#define PROBE_CPU_X17_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE))
+#define PROBE_CPU_X18_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE))
+#define PROBE_CPU_X19_OFFSET (PROBE_FIRST_GPREG_OFFSET + (19 * GPREG_SIZE))
+#define PROBE_CPU_X20_OFFSET (PROBE_FIRST_GPREG_OFFSET + (20 * GPREG_SIZE))
+#define PROBE_CPU_X21_OFFSET (PROBE_FIRST_GPREG_OFFSET + (21 * GPREG_SIZE))
+#define PROBE_CPU_X22_OFFSET (PROBE_FIRST_GPREG_OFFSET + (22 * GPREG_SIZE))
+#define PROBE_CPU_X23_OFFSET (PROBE_FIRST_GPREG_OFFSET + (23 * GPREG_SIZE))
+#define PROBE_CPU_X24_OFFSET (PROBE_FIRST_GPREG_OFFSET + (24 * GPREG_SIZE))
+#define PROBE_CPU_X25_OFFSET (PROBE_FIRST_GPREG_OFFSET + (25 * GPREG_SIZE))
+#define PROBE_CPU_X26_OFFSET (PROBE_FIRST_GPREG_OFFSET + (26 * GPREG_SIZE))
+#define PROBE_CPU_X27_OFFSET (PROBE_FIRST_GPREG_OFFSET + (27 * GPREG_SIZE))
+#define PROBE_CPU_X28_OFFSET (PROBE_FIRST_GPREG_OFFSET + (28 * GPREG_SIZE))
+#define PROBE_CPU_FP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (29 * GPREG_SIZE))
+#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (30 * GPREG_SIZE))
+#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (31 * GPREG_SIZE))
+
+#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (32 * GPREG_SIZE))
+#define PROBE_CPU_NZCV_OFFSET (PROBE_FIRST_GPREG_OFFSET + (33 * GPREG_SIZE))
+#define PROBE_CPU_FPSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (34 * GPREG_SIZE))
+
+#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (35 * GPREG_SIZE))
+
+#define FPREG_SIZE 8
+#define PROBE_CPU_Q0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE))
+#define PROBE_CPU_Q1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE))
+#define PROBE_CPU_Q2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE))
+#define PROBE_CPU_Q3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE))
+#define PROBE_CPU_Q4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE))
+#define PROBE_CPU_Q5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE))
+#define PROBE_CPU_Q6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE))
+#define PROBE_CPU_Q7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE))
+#define PROBE_CPU_Q8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE))
+#define PROBE_CPU_Q9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE))
+#define PROBE_CPU_Q10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE))
+#define PROBE_CPU_Q11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE))
+#define PROBE_CPU_Q12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE))
+#define PROBE_CPU_Q13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE))
+#define PROBE_CPU_Q14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE))
+#define PROBE_CPU_Q15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE))
+#define PROBE_CPU_Q16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
+#define PROBE_CPU_Q17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE))
+#define PROBE_CPU_Q18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE))
+#define PROBE_CPU_Q19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE))
+#define PROBE_CPU_Q20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE))
+#define PROBE_CPU_Q21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE))
+#define PROBE_CPU_Q22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE))
+#define PROBE_CPU_Q23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE))
+#define PROBE_CPU_Q24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE))
+#define PROBE_CPU_Q25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE))
+#define PROBE_CPU_Q26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE))
+#define PROBE_CPU_Q27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE))
+#define PROBE_CPU_Q28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE))
+#define PROBE_CPU_Q29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE))
+#define PROBE_CPU_Q30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE))
+#define PROBE_CPU_Q31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE))
+#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE))
+#define SAVED_CALLER_SP PROBE_SIZE
+#define PROBE_SIZE_PLUS_SAVED_CALLER_SP (SAVED_CALLER_SP + PTR_SIZE)
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARM64::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x0) == PROBE_CPU_X0_OFFSET, ProbeContext_cpu_x0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x1) == PROBE_CPU_X1_OFFSET, ProbeContext_cpu_x1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x2) == PROBE_CPU_X2_OFFSET, ProbeContext_cpu_x2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x3) == PROBE_CPU_X3_OFFSET, ProbeContext_cpu_x3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x4) == PROBE_CPU_X4_OFFSET, ProbeContext_cpu_x4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x5) == PROBE_CPU_X5_OFFSET, ProbeContext_cpu_x5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x6) == PROBE_CPU_X6_OFFSET, ProbeContext_cpu_x6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x7) == PROBE_CPU_X7_OFFSET, ProbeContext_cpu_x7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x8) == PROBE_CPU_X8_OFFSET, ProbeContext_cpu_x8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x9) == PROBE_CPU_X9_OFFSET, ProbeContext_cpu_x9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x10) == PROBE_CPU_X10_OFFSET, ProbeContext_cpu_x10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x11) == PROBE_CPU_X11_OFFSET, ProbeContext_cpu_x11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x12) == PROBE_CPU_X12_OFFSET, ProbeContext_cpu_x12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x13) == PROBE_CPU_X13_OFFSET, ProbeContext_cpu_x13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x14) == PROBE_CPU_X14_OFFSET, ProbeContext_cpu_x14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x15) == PROBE_CPU_X15_OFFSET, ProbeContext_cpu_x15_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x16) == PROBE_CPU_X16_OFFSET, ProbeContext_cpu_x16_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x17) == PROBE_CPU_X17_OFFSET, ProbeContext_cpu_x17_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x18) == PROBE_CPU_X18_OFFSET, ProbeContext_cpu_x18_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x19) == PROBE_CPU_X19_OFFSET, ProbeContext_cpu_x19_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x20) == PROBE_CPU_X20_OFFSET, ProbeContext_cpu_x20_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x21) == PROBE_CPU_X21_OFFSET, ProbeContext_cpu_x21_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x22) == PROBE_CPU_X22_OFFSET, ProbeContext_cpu_x22_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x23) == PROBE_CPU_X23_OFFSET, ProbeContext_cpu_x23_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x24) == PROBE_CPU_X24_OFFSET, ProbeContext_cpu_x24_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x25) == PROBE_CPU_X25_OFFSET, ProbeContext_cpu_x25_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x26) == PROBE_CPU_X26_OFFSET, ProbeContext_cpu_x26_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x27) == PROBE_CPU_X27_OFFSET, ProbeContext_cpu_x27_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x28) == PROBE_CPU_X28_OFFSET, ProbeContext_cpu_x28_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fp) == PROBE_CPU_FP_OFFSET, ProbeContext_cpu_fp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.nzcv) == PROBE_CPU_NZCV_OFFSET, ProbeContext_cpu_nzcv_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpsr) == PROBE_CPU_FPSR_OFFSET, ProbeContext_cpu_fpsr_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q0) == PROBE_CPU_Q0_OFFSET, ProbeContext_cpu_q0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q1) == PROBE_CPU_Q1_OFFSET, ProbeContext_cpu_q1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q2) == PROBE_CPU_Q2_OFFSET, ProbeContext_cpu_q2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q3) == PROBE_CPU_Q3_OFFSET, ProbeContext_cpu_q3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q4) == PROBE_CPU_Q4_OFFSET, ProbeContext_cpu_q4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q5) == PROBE_CPU_Q5_OFFSET, ProbeContext_cpu_q5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q6) == PROBE_CPU_Q6_OFFSET, ProbeContext_cpu_q6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q7) == PROBE_CPU_Q7_OFFSET, ProbeContext_cpu_q7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q8) == PROBE_CPU_Q8_OFFSET, ProbeContext_cpu_q8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q9) == PROBE_CPU_Q9_OFFSET, ProbeContext_cpu_q9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q10) == PROBE_CPU_Q10_OFFSET, ProbeContext_cpu_q10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q11) == PROBE_CPU_Q11_OFFSET, ProbeContext_cpu_q11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q12) == PROBE_CPU_Q12_OFFSET, ProbeContext_cpu_q12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q13) == PROBE_CPU_Q13_OFFSET, ProbeContext_cpu_q13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q14) == PROBE_CPU_Q14_OFFSET, ProbeContext_cpu_q14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q15) == PROBE_CPU_Q15_OFFSET, ProbeContext_cpu_q15_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q16) == PROBE_CPU_Q16_OFFSET, ProbeContext_cpu_q16_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q17) == PROBE_CPU_Q17_OFFSET, ProbeContext_cpu_q17_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q18) == PROBE_CPU_Q18_OFFSET, ProbeContext_cpu_q18_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q19) == PROBE_CPU_Q19_OFFSET, ProbeContext_cpu_q19_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q20) == PROBE_CPU_Q20_OFFSET, ProbeContext_cpu_q20_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q21) == PROBE_CPU_Q21_OFFSET, ProbeContext_cpu_q21_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q22) == PROBE_CPU_Q22_OFFSET, ProbeContext_cpu_q22_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q23) == PROBE_CPU_Q23_OFFSET, ProbeContext_cpu_q23_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q24) == PROBE_CPU_Q24_OFFSET, ProbeContext_cpu_q24_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q25) == PROBE_CPU_Q25_OFFSET, ProbeContext_cpu_q25_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q26) == PROBE_CPU_Q26_OFFSET, ProbeContext_cpu_q26_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q27) == PROBE_CPU_Q27_OFFSET, ProbeContext_cpu_q27_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q28) == PROBE_CPU_Q28_OFFSET, ProbeContext_cpu_q28_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q29) == PROBE_CPU_Q29_OFFSET, ProbeContext_cpu_q29_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q30) == PROBE_CPU_Q30_OFFSET, ProbeContext_cpu_q30_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q31) == PROBE_CPU_Q31_OFFSET, ProbeContext_cpu_q31_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(sizeof(MacroAssemblerARM64::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+
+#undef PROBE_OFFSETOF
+
+asm (
+    ".text" "\n"
+    ".align 2" "\n"
+    ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+    HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+    // MacroAssemblerARM64::probe() has already generated code to store some values.
+    // The top of stack (the caller save buffer) now looks like this:
+    //     sp[0 * ptrSize]: probeFunction
+    //     sp[1 * ptrSize]: arg1
+    //     sp[2 * ptrSize]: arg2
+    //     sp[3 * ptrSize]: address of arm64ProbeTrampoline()
+    //     sp[4 * ptrSize]: saved x27
+    //     sp[5 * ptrSize]: saved x28
+    //     sp[6 * ptrSize]: saved lr
+    //     sp[7 * ptrSize]: saved sp
+
+    "mov       x27, sp" "\n"
+    "mov       x28, sp" "\n"
+
+    "sub       x28, x28, #" STRINGIZE_VALUE_OF(PROBE_SIZE_PLUS_SAVED_CALLER_SP) "\n"
+
+    // The ARM EABI specifies that the stack needs to be 16 byte aligned.
+    "bic       x28, x28, #0xf" "\n"
+    "mov       sp, x28" "\n"
+
+    "str       x27, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n"
+
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X0_OFFSET) "]" "\n"
+    "str       x1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X1_OFFSET) "]" "\n"
+    "str       x2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X2_OFFSET) "]" "\n"
+    "str       x3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X3_OFFSET) "]" "\n"
+    "str       x4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X4_OFFSET) "]" "\n"
+    "str       x5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X5_OFFSET) "]" "\n"
+    "str       x6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X6_OFFSET) "]" "\n"
+    "str       x7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X7_OFFSET) "]" "\n"
+    "str       x8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X8_OFFSET) "]" "\n"
+    "str       x9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X9_OFFSET) "]" "\n"
+    "str       x10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X10_OFFSET) "]" "\n"
+    "str       x11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X11_OFFSET) "]" "\n"
+    "str       x12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X12_OFFSET) "]" "\n"
+    "str       x13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X13_OFFSET) "]" "\n"
+    "str       x14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X14_OFFSET) "]" "\n"
+    "str       x15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X15_OFFSET) "]" "\n"
+    "str       x16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X16_OFFSET) "]" "\n"
+    "str       x17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X17_OFFSET) "]" "\n"
+    "str       x18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X18_OFFSET) "]" "\n"
+    "str       x19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X19_OFFSET) "]" "\n"
+    "str       x20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X20_OFFSET) "]" "\n"
+    "str       x21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X21_OFFSET) "]" "\n"
+    "str       x22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X22_OFFSET) "]" "\n"
+    "str       x23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X23_OFFSET) "]" "\n"
+    "str       x24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X24_OFFSET) "]" "\n"
+    "str       x25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X25_OFFSET) "]" "\n"
+    "str       x26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X26_OFFSET) "]" "\n"
+
+    "ldr       x0, [x27, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n"
+    "ldr       x0, [x27, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n"
+
+    "str       fp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FP_OFFSET) "]" "\n"
+
+    "ldr       x0, [x27, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "ldr       x0, [x27, #7 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+
+    "mrs       x0, nzcv" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_NZCV_OFFSET) "]" "\n"
+    "mrs       x0, fpsr" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSR_OFFSET) "]" "\n"
+
+    "ldr       x0, [x27, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+    "ldr       x0, [x27, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n"
+    "ldr       x0, [x27, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n"
+
+    "str       d0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q0_OFFSET) "]" "\n"
+    "str       d1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q1_OFFSET) "]" "\n"
+    "str       d2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q2_OFFSET) "]" "\n"
+    "str       d3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q3_OFFSET) "]" "\n"
+    "str       d4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q4_OFFSET) "]" "\n"
+    "str       d5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q5_OFFSET) "]" "\n"
+    "str       d6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q6_OFFSET) "]" "\n"
+    "str       d7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q7_OFFSET) "]" "\n"
+    "str       d8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q8_OFFSET) "]" "\n"
+    "str       d9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q9_OFFSET) "]" "\n"
+    "str       d10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q10_OFFSET) "]" "\n"
+    "str       d11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q11_OFFSET) "]" "\n"
+    "str       d12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q12_OFFSET) "]" "\n"
+    "str       d13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q13_OFFSET) "]" "\n"
+    "str       d14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q14_OFFSET) "]" "\n"
+    "str       d15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q15_OFFSET) "]" "\n"
+    "str       d16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q16_OFFSET) "]" "\n"
+    "str       d17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q17_OFFSET) "]" "\n"
+    "str       d18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q18_OFFSET) "]" "\n"
+    "str       d19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q19_OFFSET) "]" "\n"
+    "str       d20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q20_OFFSET) "]" "\n"
+    "str       d21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q21_OFFSET) "]" "\n"
+    "str       d22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q22_OFFSET) "]" "\n"
+    "str       d23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q23_OFFSET) "]" "\n"
+    "str       d24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q24_OFFSET) "]" "\n"
+    "str       d25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q25_OFFSET) "]" "\n"
+    "str       d26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q26_OFFSET) "]" "\n"
+    "str       d27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q27_OFFSET) "]" "\n"
+    "str       d28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q28_OFFSET) "]" "\n"
+    "str       d29, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q29_OFFSET) "]" "\n"
+    "str       d30, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q30_OFFSET) "]" "\n"
+    "str       d31, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q31_OFFSET) "]" "\n"
+
+    "mov       x28, sp" "\n" // Save the ProbeContext*.
+
+    "mov       x0, sp" "\n" // the ProbeContext* arg.
+    "ldr       x27, [x27, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "blr       x27" "\n"
+
+    "mov       sp, x28" "\n"
+
+    // To enable probes to modify register state, we copy all registers
+    // out of the ProbeContext before returning. That is except for x18, pc and sp.
+
+    // x18 is "reserved for the platform. Conforming software should not make use of it."
+    // Hence, the JITs would not be using it, and the probe should also not be modifying it.
+    // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html.
+
+    // We can't modify the pc, because the only way to set its value on ARM64 is via
+    // an indirect branch or a ret, which means we'll need a free register to do so.
+    // The probe mechanism is required to not perturb any registers that the caller
+    // may use. Hence, we don't have this free register available.
+
+    // In order to return to the caller, we need to ret via lr. The probe mechanism will
+    // restore lr's value after returning to the caller by loading the restore value
+    // from the caller save buffer. The caller expects to access the caller save buffer via
+    // sp. Hence, we cannot allow sp to be modified by the probe.
+
+    "ldr       d0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q0_OFFSET) "]" "\n"
+    "ldr       d1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q1_OFFSET) "]" "\n"
+    "ldr       d2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q2_OFFSET) "]" "\n"
+    "ldr       d3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q3_OFFSET) "]" "\n"
+    "ldr       d4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q4_OFFSET) "]" "\n"
+    "ldr       d5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q5_OFFSET) "]" "\n"
+    "ldr       d6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q6_OFFSET) "]" "\n"
+    "ldr       d7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q7_OFFSET) "]" "\n"
+    "ldr       d8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q8_OFFSET) "]" "\n"
+    "ldr       d9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q9_OFFSET) "]" "\n"
+    "ldr       d10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q10_OFFSET) "]" "\n"
+    "ldr       d11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q11_OFFSET) "]" "\n"
+    "ldr       d12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q12_OFFSET) "]" "\n"
+    "ldr       d13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q13_OFFSET) "]" "\n"
+    "ldr       d14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q14_OFFSET) "]" "\n"
+    "ldr       d15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q15_OFFSET) "]" "\n"
+    "ldr       d16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q16_OFFSET) "]" "\n"
+    "ldr       d17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q17_OFFSET) "]" "\n"
+    "ldr       d18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q18_OFFSET) "]" "\n"
+    "ldr       d19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q19_OFFSET) "]" "\n"
+    "ldr       d20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q20_OFFSET) "]" "\n"
+    "ldr       d21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q21_OFFSET) "]" "\n"
+    "ldr       d22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q22_OFFSET) "]" "\n"
+    "ldr       d23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q23_OFFSET) "]" "\n"
+    "ldr       d24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q24_OFFSET) "]" "\n"
+    "ldr       d25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q25_OFFSET) "]" "\n"
+    "ldr       d26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q26_OFFSET) "]" "\n"
+    "ldr       d27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q27_OFFSET) "]" "\n"
+    "ldr       d28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q28_OFFSET) "]" "\n"
+    "ldr       d29, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q29_OFFSET) "]" "\n"
+    "ldr       d30, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q30_OFFSET) "]" "\n"
+    "ldr       d31, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q31_OFFSET) "]" "\n"
+
+    "ldr       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X0_OFFSET) "]" "\n"
+    "ldr       x1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X1_OFFSET) "]" "\n"
+    "ldr       x2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X2_OFFSET) "]" "\n"
+    "ldr       x3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X3_OFFSET) "]" "\n"
+    "ldr       x4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X4_OFFSET) "]" "\n"
+    "ldr       x5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X5_OFFSET) "]" "\n"
+    "ldr       x6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X6_OFFSET) "]" "\n"
+    "ldr       x7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X7_OFFSET) "]" "\n"
+    "ldr       x8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X8_OFFSET) "]" "\n"
+    "ldr       x9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X9_OFFSET) "]" "\n"
+    "ldr       x10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X10_OFFSET) "]" "\n"
+    "ldr       x11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X11_OFFSET) "]" "\n"
+    "ldr       x12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X12_OFFSET) "]" "\n"
+    "ldr       x13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X13_OFFSET) "]" "\n"
+    "ldr       x14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X14_OFFSET) "]" "\n"
+    "ldr       x15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X15_OFFSET) "]" "\n"
+    "ldr       x16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X16_OFFSET) "]" "\n"
+    "ldr       x17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X17_OFFSET) "]" "\n"
+    // x18 should not be modified by the probe. See comment above for details.
+    "ldr       x19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X19_OFFSET) "]" "\n"
+    "ldr       x20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X20_OFFSET) "]" "\n"
+    "ldr       x21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X21_OFFSET) "]" "\n"
+    "ldr       x22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X22_OFFSET) "]" "\n"
+    "ldr       x23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X23_OFFSET) "]" "\n"
+    "ldr       x24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X24_OFFSET) "]" "\n"
+    "ldr       x25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X25_OFFSET) "]" "\n"
+    "ldr       x26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X26_OFFSET) "]" "\n"
+
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSR_OFFSET) "]" "\n"
+    "msr       fpsr, x27" "\n"
+
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_NZCV_OFFSET) "]" "\n"
+    "msr       nzcv, x27" "\n"
+    "ldr       fp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FP_OFFSET) "]" "\n"
+
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n"
+    "ldr       x28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n"
+
+    // There are 4 more registers left to restore: x27, x28, lr, sp, and pc.
+    // The JIT code's lr and sp will be restored by the caller.
+
+    // Restore pc by loading it into lr. The ret below will put in the pc.
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+
+    // We need x27 as a scratch register to help with popping the ProbeContext.
+    // Hence, before we pop the ProbeContext, we need to copy the restore value
+    // for x27 from the ProbeContext to the caller save buffer.
+    "ldr       x28, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n"
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n"
+    "str       x27, [x28, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+
+    // Since lr is also restored by the caller, we need to copy its restore
+    // value to the caller save buffer too.
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "str       x27, [x28, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+
+    // We're now done with x28, and can restore its value.
+    "ldr       x28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n"
+
+    // We're now done with the ProbeContext, and can pop it to restore sp so that
+    // it points to the caller save buffer.
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n"
+    "mov       sp, x27" "\n"
+
+    // We're now done with x27, and can restore it.
+    "ldr       x27, [sp, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+
+    "ret" "\n"
+);
+#endif // COMPILER(GCC_OR_CLANG)
+
+static void arm64ProbeTrampoline(MacroAssemblerARM64::ProbeContext* context)
+{
+    void* origSP = context->cpu.sp;
+    void* origPC = context->cpu.pc;
+    
+    context->probeFunction(context);
+    
+    if (context->cpu.sp != origSP) {
+        dataLog("MacroAssembler probe ERROR: ARM64 does not support the probe changing the SP. The change will be ignored\n");
+        context->cpu.sp = origSP;
+    }
+    if (context->cpu.pc != origPC) {
+        dataLog("MacroAssembler probe ERROR: ARM64 does not support the probe changing the PC. The change will be ignored\n");
+        context->cpu.pc = origPC;
+    }
+}
+
+void MacroAssemblerARM64::probe(MacroAssemblerARM64::ProbeFunction function, void* arg1, void* arg2)
+{
+    sub64(TrustedImm32(8 * 8), sp);
+
+    store64(x27, Address(sp, 4 * 8));
+    store64(x28, Address(sp, 5 * 8));
+    store64(lr, Address(sp, 6 * 8));
+
+    add64(TrustedImm32(8 * 8), sp, x28);
+    store64(x28, Address(sp, 7 * 8)); // Save original sp value.
+
+    move(TrustedImmPtr(reinterpret_cast(function)), x28);
+    store64(x28, Address(sp));
+    move(TrustedImmPtr(arg1), x28);
+    store64(x28, Address(sp, 1 * 8));
+    move(TrustedImmPtr(arg2), x28);
+    store64(x28, Address(sp, 2 * 8));
+    move(TrustedImmPtr(reinterpret_cast(arm64ProbeTrampoline)), x28);
+    store64(x28, Address(sp, 3 * 8));
+
+    move(TrustedImmPtr(reinterpret_cast(ctiMasmProbeTrampoline)), x28);
+    m_assembler.blr(x28);
+
+    // ctiMasmProbeTrampoline should have restored every register except for
+    // lr and the sp.
+    load64(Address(sp, 6 * 8), lr);
+    add64(TrustedImm32(8 * 8), sp);
+}
+#endif // ENABLE(MASM_PROBE)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM64)
+
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
index a128923fc..f4cdd36c0 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,20 +23,32 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef MacroAssemblerARM64_h
-#define MacroAssemblerARM64_h
+#pragma once
 
 #if ENABLE(ASSEMBLER)
 
 #include "ARM64Assembler.h"
 #include "AbstractMacroAssembler.h"
 #include 
+#include 
 
 namespace JSC {
 
-class MacroAssemblerARM64 : public AbstractMacroAssembler {
+class MacroAssemblerARM64 : public AbstractMacroAssembler {
+public:
+    static const unsigned numGPRs = 32;
+    static const unsigned numFPRs = 32;
+    
     static const RegisterID dataTempRegister = ARM64Registers::ip0;
     static const RegisterID memoryTempRegister = ARM64Registers::ip1;
+
+    RegisterID scratchRegister()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return getCachedDataTempRegisterIDAndInvalidate();
+    }
+
+private:
     static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
     static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
     static const intptr_t maskHalfWord0 = 0xffffl;
@@ -64,13 +76,11 @@ public:
 
     Vector& jumpsToLink() { return m_assembler.jumpsToLink(); }
     void* unlinkedCode() { return m_assembler.unlinkedCode(); }
-    bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
-    JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
-    JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
-    void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
-    int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
-    void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
-    int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); }
+    static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
+    static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
+    static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
+    static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
+    static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARM64Assembler::link(record, from, fromInstruction, to); }
 
     static const Scale ScalePtr = TimesEight;
 
@@ -130,10 +140,15 @@ public:
     // FIXME: Get reasonable implementations for these
     static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
     static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
-    static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
 
     // Integer operations:
 
+    void add32(RegisterID a, RegisterID b, RegisterID dest)
+    {
+        ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp);
+        m_assembler.add<32>(dest, a, b);
+    }
+
     void add32(RegisterID src, RegisterID dest)
     {
         m_assembler.add<32>(dest, dest, src);
@@ -150,7 +165,10 @@ public:
             m_assembler.add<32>(dest, src, UInt12(imm.m_value));
         else if (isUInt12(-imm.m_value))
             m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
-        else {
+        else if (src != dest) {
+            move(imm, dest);
+            add32(src, dest);
+        } else {
             move(imm, getCachedDataTempRegisterIDAndInvalidate());
             m_assembler.add<32>(dest, src, dataTempRegister);
         }
@@ -199,9 +217,20 @@ public:
         add32(dataTempRegister, dest);
     }
 
+    void add64(RegisterID a, RegisterID b, RegisterID dest)
+    {
+        ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp);
+        if (b == ARM64Registers::sp)
+            std::swap(a, b);
+        m_assembler.add<64>(dest, a, b);
+    }
+
     void add64(RegisterID src, RegisterID dest)
     {
-        m_assembler.add<64>(dest, dest, src);
+        if (src == ARM64Registers::sp)
+            m_assembler.add<64>(dest, src, dest);
+        else
+            m_assembler.add<64>(dest, dest, src);
     }
 
     void add64(TrustedImm32 imm, RegisterID dest)
@@ -288,6 +317,11 @@ public:
         store64(dataTempRegister, address.m_ptr);
     }
 
+    void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
+    {
+        add64(imm, srcDest);
+    }
+
     void add64(Address src, RegisterID dest)
     {
         load64(src, getCachedDataTempRegisterIDAndInvalidate());
@@ -334,6 +368,24 @@ public:
         and32(dataTempRegister, dest);
     }
 
+    void and64(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        m_assembler.and_<64>(dest, src1, src2);
+    }
+
+    void and64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+        if (logicalImm.isValid()) {
+            m_assembler.and_<64>(dest, src, logicalImm);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.and_<64>(dest, src, dataTempRegister);
+    }
+
     void and64(RegisterID src, RegisterID dest)
     {
         m_assembler.and_<64>(dest, dest, src);
@@ -370,6 +422,31 @@ public:
         m_assembler.clz<32>(dest, src);
     }
 
+    void countLeadingZeros64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.clz<64>(dest, src);
+    }
+
+    void countTrailingZeros32(RegisterID src, RegisterID dest)
+    {
+        // Arm does not have a count trailing zeros only a count leading zeros.
+        m_assembler.rbit<32>(dest, src);
+        m_assembler.clz<32>(dest, dest);
+    }
+
+    void countTrailingZeros64(RegisterID src, RegisterID dest)
+    {
+        // Arm does not have a count trailing zeros only a count leading zeros.
+        m_assembler.rbit<64>(dest, src);
+        m_assembler.clz<64>(dest, dest);
+    }
+
+    // Only used for testing purposes.
+    void illegalInstruction()
+    {
+        m_assembler.illegalInstruction();
+    }
+
     void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
     {
         m_assembler.lsl<32>(dest, src, shiftAmount);
@@ -409,21 +486,81 @@ public:
     {
         lshift64(dest, imm, dest);
     }
+
+    void mul32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.mul<32>(dest, left, right);
+    }
     
     void mul32(RegisterID src, RegisterID dest)
     {
         m_assembler.mul<32>(dest, dest, src);
     }
-    
+
+    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.mul<32>(dest, src, dataTempRegister);
+    }
+
     void mul64(RegisterID src, RegisterID dest)
     {
         m_assembler.mul<64>(dest, dest, src);
     }
 
-    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    void mul64(RegisterID left, RegisterID right, RegisterID dest)
     {
-        move(imm, getCachedDataTempRegisterIDAndInvalidate());
-        m_assembler.mul<32>(dest, src, dataTempRegister);
+        m_assembler.mul<64>(dest, left, right);
+    }
+
+    void multiplyAdd32(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
+    {
+        m_assembler.madd<32>(dest, mulLeft, mulRight, summand);
+    }
+
+    void multiplySub32(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
+    {
+        m_assembler.msub<32>(dest, mulLeft, mulRight, minuend);
+    }
+
+    void multiplyNeg32(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
+    {
+        m_assembler.msub<32>(dest, mulLeft, mulRight, ARM64Registers::zr);
+    }
+
+    void multiplyAdd64(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
+    {
+        m_assembler.madd<64>(dest, mulLeft, mulRight, summand);
+    }
+
+    void multiplySub64(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
+    {
+        m_assembler.msub<64>(dest, mulLeft, mulRight, minuend);
+    }
+
+    void multiplyNeg64(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
+    {
+        m_assembler.msub<64>(dest, mulLeft, mulRight, ARM64Registers::zr);
+    }
+
+    void div32(RegisterID dividend, RegisterID divisor, RegisterID dest)
+    {
+        m_assembler.sdiv<32>(dest, dividend, divisor);
+    }
+
+    void div64(RegisterID dividend, RegisterID divisor, RegisterID dest)
+    {
+        m_assembler.sdiv<64>(dest, dividend, divisor);
+    }
+
+    void uDiv32(RegisterID dividend, RegisterID divisor, RegisterID dest)
+    {
+        m_assembler.udiv<32>(dest, dividend, divisor);
+    }
+
+    void uDiv64(RegisterID dividend, RegisterID divisor, RegisterID dest)
+    {
+        m_assembler.udiv<64>(dest, dividend, divisor);
     }
 
     void neg32(RegisterID dest)
@@ -460,6 +597,7 @@ public:
             return;
         }
 
+        ASSERT(src != dataTempRegister);
         move(imm, getCachedDataTempRegisterIDAndInvalidate());
         m_assembler.orr<32>(dest, src, dataTempRegister);
     }
@@ -471,6 +609,27 @@ public:
         store32(dataTempRegister, address.m_ptr);
     }
 
+    void or32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+        if (logicalImm.isValid()) {
+            load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm);
+            store32(dataTempRegister, address.m_ptr);
+        } else {
+            load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+            or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate());
+            store32(dataTempRegister, address.m_ptr);
+        }
+    }
+
+    void or32(TrustedImm32 imm, Address address)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+        or32(imm, dataTempRegister, dataTempRegister);
+        store32(dataTempRegister, address);
+    }
+
     void or64(RegisterID src, RegisterID dest)
     {
         or64(dest, src, dest);
@@ -491,14 +650,27 @@ public:
         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value)));
 
         if (logicalImm.isValid()) {
-            m_assembler.orr<64>(dest, dest, logicalImm);
+            m_assembler.orr<64>(dest, src, logicalImm);
             return;
         }
 
         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
         m_assembler.orr<64>(dest, src, dataTempRegister);
     }
-    
+
+    void or64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+        if (logicalImm.isValid()) {
+            m_assembler.orr<64>(dest, src, logicalImm);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.orr<64>(dest, src, dataTempRegister);
+    }
+
     void or64(TrustedImm64 imm, RegisterID dest)
     {
         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value)));
@@ -512,9 +684,34 @@ public:
         m_assembler.orr<64>(dest, dest, dataTempRegister);
     }
 
+    void rotateRight32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.ror<32>(dest, src, imm.m_value & 31);
+    }
+
+    void rotateRight32(TrustedImm32 imm, RegisterID srcDst)
+    {
+        rotateRight32(srcDst, imm, srcDst);
+    }
+
+    void rotateRight32(RegisterID src, RegisterID shiftAmmount, RegisterID dest)
+    {
+        m_assembler.ror<32>(dest, src, shiftAmmount);
+    }
+
+    void rotateRight64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.ror<64>(dest, src, imm.m_value & 63);
+    }
+
     void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
     {
-        m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
+        rotateRight64(srcDst, imm, srcDst);
+    }
+
+    void rotateRight64(RegisterID src, RegisterID shiftAmmount, RegisterID dest)
+    {
+        m_assembler.ror<64>(dest, src, shiftAmmount);
     }
 
     void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
@@ -539,12 +736,12 @@ public:
     
     void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
     {
-        m_assembler.lsr<64>(dest, src, shiftAmount);
+        m_assembler.asr<64>(dest, src, shiftAmount);
     }
     
     void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
-        m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
+        m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
     }
     
     void rshift64(RegisterID shiftAmount, RegisterID dest)
@@ -562,6 +759,11 @@ public:
         m_assembler.sub<32>(dest, dest, src);
     }
 
+    void sub32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.sub<32>(dest, left, right);
+    }
+
     void sub32(TrustedImm32 imm, RegisterID dest)
     {
         if (isUInt12(imm.m_value)) {
@@ -624,6 +826,11 @@ public:
     {
         m_assembler.sub<64>(dest, dest, src);
     }
+
+    void sub64(RegisterID a, RegisterID b, RegisterID dest)
+    {
+        m_assembler.sub<64>(dest, a, b);
+    }
     
     void sub64(TrustedImm32 imm, RegisterID dest)
     {
@@ -677,6 +884,26 @@ public:
         urshift32(dest, imm, dest);
     }
 
+    void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.lsr<64>(dest, src, shiftAmount);
+    }
+    
+    void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
+    }
+
+    void urshift64(RegisterID shiftAmount, RegisterID dest)
+    {
+        urshift64(dest, shiftAmount, dest);
+    }
+    
+    void urshift64(TrustedImm32 imm, RegisterID dest)
+    {
+        urshift64(dest, imm, dest);
+    }
+
     void xor32(RegisterID src, RegisterID dest)
     {
         xor32(dest, src, dest);
@@ -700,7 +927,7 @@ public:
             LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
 
             if (logicalImm.isValid()) {
-                m_assembler.eor<32>(dest, dest, logicalImm);
+                m_assembler.eor<32>(dest, src, logicalImm);
                 return;
             }
 
@@ -731,6 +958,23 @@ public:
         xor64(imm, dest, dest);
     }
 
+    void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.mvn<64>(dest, src);
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+            if (logicalImm.isValid()) {
+                m_assembler.eor<64>(dest, src, logicalImm);
+                return;
+            }
+
+            move(imm, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.eor<64>(dest, src, dataTempRegister);
+        }
+    }
+
     void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
         if (imm.m_value == -1)
@@ -739,7 +983,7 @@ public:
             LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value)));
 
             if (logicalImm.isValid()) {
-                m_assembler.eor<64>(dest, dest, logicalImm);
+                m_assembler.eor<64>(dest, src, logicalImm);
                 return;
             }
 
@@ -748,6 +992,20 @@ public:
         }
     }
 
+    void not32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.mvn<32>(dest, src);
+    }
+
+    void not64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.mvn<64>(dest, src);
+    }
+
+    void not64(RegisterID srcDst)
+    {
+        m_assembler.mvn<64>(srcDst, srcDst);
+    }
 
     // Memory access operations:
 
@@ -777,6 +1035,11 @@ public:
         load<64>(address, dest);
     }
 
+    void load64(RegisterID src, PostIndex simm, RegisterID dest)
+    {
+        m_assembler.ldr<64>(dest, src, simm);
+    }
+
     DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
     {
         DataLabel32 label(this);
@@ -793,6 +1056,38 @@ public:
         return label;
     }
 
+    void loadPair64(RegisterID src, RegisterID dest1, RegisterID dest2)
+    {
+        loadPair64(src, TrustedImm32(0), dest1, dest2);
+    }
+
+    void loadPair64(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.ldp<64>(dest1, dest2, src, offset.m_value);
+    }
+
+    void loadPair64WithNonTemporalAccess(RegisterID src, RegisterID dest1, RegisterID dest2)
+    {
+        loadPair64WithNonTemporalAccess(src, TrustedImm32(0), dest1, dest2);
+    }
+
+    void loadPair64WithNonTemporalAccess(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.ldnp<64>(dest1, dest2, src, offset.m_value);
+    }
+
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), dataTempRegister);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm64(misc), memoryTempRegister);
+        abortWithReason(reason);
+    }
+
     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
     {
         ConvertibleLoadLabel result(this);
@@ -874,16 +1169,35 @@ public:
         load16(address, dest);
     }
 
-    void load16Signed(BaseIndex address, RegisterID dest)
+    void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
         if (!address.offset && (!address.scale || address.scale == 1)) {
-            m_assembler.ldrsh<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
             return;
         }
 
         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
-        m_assembler.ldrsh<64>(dest, address.base, memoryTempRegister);
+        m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void zeroExtend16To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.uxth<32>(dest, src);
+    }
+
+    void signExtend16To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sxth<32>(dest, src);
     }
 
     void load8(ImplicitAddress address, RegisterID dest)
@@ -909,22 +1223,54 @@ public:
     
     void load8(const void* address, RegisterID dest)
     {
-        moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+        moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
         m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
         if (dest == memoryTempRegister)
-            m_cachedMemoryTempRegister.invalidate();
+            cachedMemoryTempRegister().invalidate();
+    }
+
+    void load8(RegisterID src, PostIndex simm, RegisterID dest)
+    {
+        m_assembler.ldrb(dest, src, simm);
+    }
+
+    void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
     }
 
-    void load8Signed(BaseIndex address, RegisterID dest)
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
         if (!address.offset && !address.scale) {
-            m_assembler.ldrsb<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
             return;
         }
 
         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
-        m_assembler.ldrsb<64>(dest, address.base, memoryTempRegister);
+        m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void load8SignedExtendTo32(const void* address, RegisterID dest)
+    {
+        moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
+        m_assembler.ldrsb<32>(dest, memoryTempRegister, ARM64Registers::zr);
+        if (dest == memoryTempRegister)
+            cachedMemoryTempRegister().invalidate();
+    }
+
+    void zeroExtend8To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.uxtb<32>(dest, src);
+    }
+
+    void signExtend8To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sxtb<32>(dest, src);
     }
 
     void store64(RegisterID src, ImplicitAddress address)
@@ -953,6 +1299,11 @@ public:
         store<64>(src, address);
     }
 
+    void store64(TrustedImm32 imm, ImplicitAddress address)
+    {
+        store64(TrustedImm64(imm.m_value), address);
+    }
+
     void store64(TrustedImm64 imm, ImplicitAddress address)
     {
         if (!imm.m_value) {
@@ -960,7 +1311,7 @@ public:
             return;
         }
 
-        moveToCachedReg(imm, m_dataMemoryTempRegister);
+        moveToCachedReg(imm, dataMemoryTempRegister());
         store64(dataTempRegister, address);
     }
 
@@ -971,9 +1322,14 @@ public:
             return;
         }
 
-        moveToCachedReg(imm, m_dataMemoryTempRegister);
+        moveToCachedReg(imm, dataMemoryTempRegister());
         store64(dataTempRegister, address);
     }
+
+    void store64(RegisterID src, RegisterID dest, PostIndex simm)
+    {
+        m_assembler.str<64>(src, dest, simm);
+    }
     
     DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
     {
@@ -983,6 +1339,26 @@ public:
         return label;
     }
 
+    void storePair64(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        storePair64(src1, src2, dest, TrustedImm32(0));
+    }
+
+    void storePair64(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
+    {
+        m_assembler.stp<64>(src1, src2, dest, offset.m_value);
+    }
+
+    void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        storePair64WithNonTemporalAccess(src1, src2, dest, TrustedImm32(0));
+    }
+
+    void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
+    {
+        m_assembler.stnp<64>(src1, src2, dest, offset.m_value);
+    }
+
     void store32(RegisterID src, ImplicitAddress address)
     {
         if (tryStoreWithOffset<32>(src, address.base, address.offset))
@@ -1016,7 +1392,7 @@ public:
             return;
         }
 
-        moveToCachedReg(imm, m_dataMemoryTempRegister);
+        moveToCachedReg(imm, dataMemoryTempRegister());
         store32(dataTempRegister, address);
     }
 
@@ -1027,7 +1403,7 @@ public:
             return;
         }
 
-        moveToCachedReg(imm, m_dataMemoryTempRegister);
+        moveToCachedReg(imm, dataMemoryTempRegister());
         store32(dataTempRegister, address);
     }
 
@@ -1038,10 +1414,20 @@ public:
             return;
         }
 
-        moveToCachedReg(imm, m_dataMemoryTempRegister);
+        moveToCachedReg(imm, dataMemoryTempRegister());
         store32(dataTempRegister, address);
     }
 
+    void storeZero32(ImplicitAddress address)
+    {
+        store32(ARM64Registers::zr, address);
+    }
+
+    void storeZero32(BaseIndex address)
+    {
+        store32(ARM64Registers::zr, address);
+    }
+
     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
     {
         DataLabel32 label(this);
@@ -1050,6 +1436,15 @@ public:
         return label;
     }
 
+    void store16(RegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<16>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.strh(src, address.base, memoryTempRegister);
+    }
+
     void store16(RegisterID src, BaseIndex address)
     {
         if (!address.offset && (!address.scale || address.scale == 1)) {
@@ -1080,17 +1475,43 @@ public:
         m_assembler.strb(src, memoryTempRegister, 0);
     }
 
+    void store8(RegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<8>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.strb(src, address.base, memoryTempRegister);
+    }
+
     void store8(TrustedImm32 imm, void* address)
     {
-        if (!imm.m_value) {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        if (!imm8.m_value) {
             store8(ARM64Registers::zr, address);
             return;
         }
 
-        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        move(imm8, getCachedDataTempRegisterIDAndInvalidate());
         store8(dataTempRegister, address);
     }
 
+    void store8(TrustedImm32 imm, ImplicitAddress address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        if (!imm8.m_value) {
+            store8(ARM64Registers::zr, address);
+            return;
+        }
+
+        move(imm8, getCachedDataTempRegisterIDAndInvalidate());
+        store8(dataTempRegister, address);
+    }
+
+    void store8(RegisterID src, RegisterID dest, PostIndex simm)
+    {
+        m_assembler.strb(src, dest, simm);
+    }
 
     // Floating-point operations:
 
@@ -1098,6 +1519,7 @@ public:
     static bool supportsFloatingPointTruncate() { return true; }
     static bool supportsFloatingPointSqrt() { return true; }
     static bool supportsFloatingPointAbs() { return true; }
+    static bool supportsFloatingPointRounding() { return true; }
 
     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
 
@@ -1106,6 +1528,11 @@ public:
         m_assembler.fabs<64>(dest, src);
     }
 
+    void absFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fabs<32>(dest, src);
+    }
+
     void addDouble(FPRegisterID src, FPRegisterID dest)
     {
         addDouble(dest, src, dest);
@@ -1124,20 +1551,56 @@ public:
 
     void addDouble(AbsoluteAddress address, FPRegisterID dest)
     {
-        loadDouble(address.m_ptr, fpTempRegister);
+        loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
         addDouble(fpTempRegister, dest);
     }
 
+    void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fadd<32>(dest, op1, op2);
+    }
+
     void ceilDouble(FPRegisterID src, FPRegisterID dest)
     {
         m_assembler.frintp<64>(dest, src);
     }
 
+    void ceilFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintp<32>(dest, src);
+    }
+
     void floorDouble(FPRegisterID src, FPRegisterID dest)
     {
         m_assembler.frintm<64>(dest, src);
     }
 
+    void floorFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintm<32>(dest, src);
+    }
+
+    void roundTowardNearestIntDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintn<64>(dest, src);
+    }
+
+    void roundTowardNearestIntFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintn<32>(dest, src);
+    }
+
+    void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintz<64>(dest, src);
+    }
+
+    void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintz<32>(dest, src);
+    }
+
+
     // Convert 'src' to an integer, and places the resulting 'dest'.
     // If the result is not representable as a 32 bit value, branch.
     // May also branch for some values that are representable in 32 bits
@@ -1150,32 +1613,26 @@ public:
         m_assembler.scvtf<64, 32>(fpTempRegister, dest);
         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
 
-        // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
-        if (negZeroCheck)
-            failureCases.append(branchTest32(Zero, dest));
+        // Test for negative zero.
+        if (negZeroCheck) {
+            Jump valueIsNonZero = branchTest32(NonZero, dest);
+            RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
+            m_assembler.fmov<64>(scratch, src);
+            failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
+            valueIsNonZero.link(this);
+        }
     }
 
     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
     {
         m_assembler.fcmp<64>(left, right);
+        return jumpAfterFloatingPointCompare(cond);
+    }
 
-        if (cond == DoubleNotEqual) {
-            // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
-            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
-            Jump result = makeBranch(ARM64Assembler::ConditionNE);
-            unordered.link(this);
-            return result;
-        }
-        if (cond == DoubleEqualOrUnordered) {
-            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
-            Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
-            unordered.link(this);
-            // We get here if either unordered or equal.
-            Jump result = jump();
-            notEqual.link(this);
-            return result;
-        }
-        return makeBranch(cond);
+    Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        m_assembler.fcmp<32>(left, right);
+        return jumpAfterFloatingPointCompare(cond);
     }
 
     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
@@ -1204,7 +1661,7 @@ public:
         // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
         m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
         zeroExtend32ToPtr(dataTempRegister, dest);
-        // Check thlow 32-bits sign extend to be equal to the full value.
+        // Check the low 32-bits sign extend to be equal to the full value.
         m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
         return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
     }
@@ -1241,12 +1698,32 @@ public:
         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
         convertInt32ToDouble(dataTempRegister, dest);
     }
+
+    void convertInt32ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.scvtf<32, 32>(dest, src);
+    }
     
     void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
     {
         m_assembler.scvtf<64, 64>(dest, src);
     }
-    
+
+    void convertInt64ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.scvtf<32, 64>(dest, src);
+    }
+
+    void convertUInt64ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.ucvtf<64, 64>(dest, src);
+    }
+
+    void convertUInt64ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.ucvtf<32, 64>(dest, src);
+    }
+
     void divDouble(FPRegisterID src, FPRegisterID dest)
     {
         divDouble(dest, src, dest);
@@ -1257,6 +1734,11 @@ public:
         m_assembler.fdiv<64>(dest, op1, op2);
     }
 
+    void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fdiv<32>(dest, op1, op2);
+    }
+
     void loadDouble(ImplicitAddress address, FPRegisterID dest)
     {
         if (tryLoadWithOffset<64>(dest, address.base, address.offset))
@@ -1278,12 +1760,21 @@ public:
         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
     }
     
-    void loadDouble(const void* address, FPRegisterID dest)
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
     {
-        moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+        moveToCachedReg(address, cachedMemoryTempRegister());
         m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
     }
 
+    void loadFloat(ImplicitAddress address, FPRegisterID dest)
+    {
+        if (tryLoadWithOffset<32>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+    }
+
     void loadFloat(BaseIndex address, FPRegisterID dest)
     {
         if (!address.offset && (!address.scale || address.scale == 2)) {
@@ -1301,16 +1792,130 @@ public:
         m_assembler.fmov<64>(dest, src);
     }
 
+    void moveZeroToDouble(FPRegisterID reg)
+    {
+        m_assembler.fmov<64>(reg, ARM64Registers::zr);
+    }
+
     void moveDoubleTo64(FPRegisterID src, RegisterID dest)
     {
         m_assembler.fmov<64>(dest, src);
     }
 
+    void moveFloatTo32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fmov<32>(dest, src);
+    }
+
     void move64ToDouble(RegisterID src, FPRegisterID dest)
     {
         m_assembler.fmov<64>(dest, src);
     }
 
+    void move32ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fmov<32>(dest, src);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.fcmp<64>(left, right);
+        moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.fcmp<64>(left, right);
+        moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+    }
+
+    void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.fcmp<32>(left, right);
+        moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
+    }
+
+    void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.fcmp<32>(left, right);
+        moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+    }
+
+    template
+    void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest)
+    {
+        if (cond == DoubleNotEqual) {
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            m_assembler.csel(dest, src, dest, ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            return;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            // If the compare is unordered, src is copied to dest and the
+            // next csel has all arguments equal to src.
+            // If the compare is ordered, dest is unchanged and EQ decides
+            // what value to set.
+            m_assembler.csel(dest, src, dest, ARM64Assembler::ConditionVS);
+            m_assembler.csel(dest, src, dest, ARM64Assembler::ConditionEQ);
+            return;
+        }
+        m_assembler.csel(dest, src, dest, ARM64Condition(cond));
+    }
+
+    template
+    void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        if (cond == DoubleNotEqual) {
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            m_assembler.csel(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            return;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            // If the compare is unordered, thenCase is copied to elseCase and the
+            // next csel has all arguments equal to thenCase.
+            // If the compare is ordered, dest is unchanged and EQ decides
+            // what value to set.
+            m_assembler.csel(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
+            m_assembler.csel(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
+            return;
+        }
+        m_assembler.csel(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    template
+    void moveDoubleConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        if (cond == DoubleNotEqual) {
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            m_assembler.fcsel(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            return;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            // If the compare is unordered, thenCase is copied to elseCase and the
+            // next csel has all arguments equal to thenCase.
+            // If the compare is ordered, dest is unchanged and EQ decides
+            // what value to set.
+            m_assembler.fcsel(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
+            m_assembler.fcsel(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
+            return;
+        }
+        m_assembler.fcsel(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.fcmp<64>(left, right);
+        moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+    }
+
+    void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.fcmp<32>(left, right);
+        moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+    }
+
     void mulDouble(FPRegisterID src, FPRegisterID dest)
     {
         mulDouble(dest, src, dest);
@@ -1327,16 +1932,51 @@ public:
         mulDouble(fpTempRegister, dest);
     }
 
+    void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fmul<32>(dest, op1, op2);
+    }
+
+    void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vand<64>(dest, op1, op2);
+    }
+
+    void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        andDouble(op1, op2, dest);
+    }
+
+    void orDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vorr<64>(dest, op1, op2);
+    }
+
+    void orFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        orDouble(op1, op2, dest);
+    }
+
     void negateDouble(FPRegisterID src, FPRegisterID dest)
     {
         m_assembler.fneg<64>(dest, src);
     }
 
+    void negateFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fneg<32>(dest, src);
+    }
+
     void sqrtDouble(FPRegisterID src, FPRegisterID dest)
     {
         m_assembler.fsqrt<64>(dest, src);
     }
 
+    void sqrtFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fsqrt<32>(dest, src);
+    }
+
     void storeDouble(FPRegisterID src, ImplicitAddress address)
     {
         if (tryStoreWithOffset<64>(src, address.base, address.offset))
@@ -1346,9 +1986,9 @@ public:
         m_assembler.str<64>(src, address.base, memoryTempRegister);
     }
 
-    void storeDouble(FPRegisterID src, const void* address)
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
     {
-        moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+        moveToCachedReg(address, cachedMemoryTempRegister());
         m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
     }
 
@@ -1363,6 +2003,15 @@ public:
         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
         m_assembler.str<64>(src, address.base, memoryTempRegister);
     }
+
+    void storeFloat(FPRegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<32>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<32>(src, address.base, memoryTempRegister);
+    }
     
     void storeFloat(FPRegisterID src, BaseIndex address)
     {
@@ -1392,6 +2041,11 @@ public:
         subDouble(fpTempRegister, dest);
     }
 
+    void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fsub<32>(dest, op1, op2);
+    }
+
     // Result is undefined if the value is outside of the integer range.
     void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
     {
@@ -1403,6 +2057,45 @@ public:
         m_assembler.fcvtzu<32, 64>(dest, src);
     }
 
+    void truncateDoubleToInt64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzs<64, 64>(dest, src);
+    }
+
+    void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID, FPRegisterID)
+    {
+        truncateDoubleToUint64(src, dest);
+    }
+
+    void truncateDoubleToUint64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzu<64, 64>(dest, src);
+    }
+
+    void truncateFloatToInt32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzs<32, 32>(dest, src);
+    }
+
+    void truncateFloatToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzu<32, 32>(dest, src);
+    }
+
+    void truncateFloatToInt64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzs<64, 32>(dest, src);
+    }
+
+    void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID, FPRegisterID)
+    {
+        truncateFloatToUint64(src, dest);
+    }
+
+    void truncateFloatToUint64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzu<64, 32>(dest, src);
+    }
 
     // Stack manipulation operations:
     //
@@ -1437,6 +2130,16 @@ public:
         CRASH();
     }
 
+    void popPair(RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
+    }
+
+    void pushPair(RegisterID src1, RegisterID src2)
+    {
+        m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
+    }
+
     void popToRestore(RegisterID dest)
     {
         m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
@@ -1446,6 +2149,15 @@ public:
     {
         m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
     }
+    
+    void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+    {
+        RegisterID reg = dataTempRegister;
+        pushPair(reg, reg);
+        move(imm, reg);
+        store64(reg, stackPointerRegister);
+        load64(Address(stackPointerRegister, 8), reg);
+    }
 
     void pushToSave(Address address)
     {
@@ -1471,6 +2183,7 @@ public:
         storeDouble(src, stackPointerRegister);
     }
 
+    static ptrdiff_t pushToSaveByteOffset() { return 16; }
 
     // Register move operations:
 
@@ -1501,6 +2214,11 @@ public:
         move(reg2, reg1);
         move(dataTempRegister, reg2);
     }
+
+    void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
+    {
+        move(TrustedImmPtr(reinterpret_cast(static_cast(imm.m_value))), dest);
+    }
     
     void signExtend32ToPtr(RegisterID src, RegisterID dest)
     {
@@ -1512,6 +2230,169 @@ public:
         m_assembler.uxtw(dest, src);
     }
 
+    void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.cmp<32>(left, right);
+        m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
+    }
+
+    void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.cmp<32>(left, right);
+        m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<32>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<32>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<32>(left, dataTempRegister);
+        }
+        m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.cmp<64>(left, right);
+        m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.cmp<64>(left, right);
+        m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<64>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<64>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<64>(left, dataTempRegister);
+        }
+        m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+    {
+        m_assembler.tst<32>(testReg, mask);
+        m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
+    }
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.tst<32>(left, right);
+        m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        test32(left, right);
+        m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+    {
+        m_assembler.tst<64>(testReg, mask);
+        m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
+    }
+
+    void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.tst<64>(left, right);
+        m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.cmp<32>(left, right);
+        m_assembler.fcsel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveDoubleConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<32>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<32>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<32>(left, dataTempRegister);
+        }
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.cmp<64>(left, right);
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveDoubleConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<64>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<64>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<64>(left, dataTempRegister);
+        }
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.tst<32>(left, right);
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        test32(left, right);
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.tst<64>(left, right);
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
 
     // Forwards / external control flow operations:
     //
@@ -1539,12 +2420,17 @@ public:
 
     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
     {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+                return branchTest32(*resultCondition, left, left);
+        }
+
         if (isUInt12(right.m_value))
             m_assembler.cmp<32>(left, UInt12(right.m_value));
         else if (isUInt12(-right.m_value))
             m_assembler.cmn<32>(left, UInt12(-right.m_value));
         else {
-            moveToCachedReg(right, m_dataMemoryTempRegister);
+            moveToCachedReg(right, dataMemoryTempRegister());
             m_assembler.cmp<32>(left, dataTempRegister);
         }
         return Jump(makeBranch(cond));
@@ -1588,19 +2474,52 @@ public:
 
     Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
     {
+        if (right == ARM64Registers::sp) {
+            if (cond == Equal && left != ARM64Registers::sp) {
+                // CMP can only use SP for the left argument, since we are testing for equality, the order
+                // does not matter here.
+                std::swap(left, right);
+            } else {
+                move(right, getCachedDataTempRegisterIDAndInvalidate());
+                right = dataTempRegister;
+            }
+        }
         m_assembler.cmp<64>(left, right);
         return Jump(makeBranch(cond));
     }
 
+    Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+                return branchTest64(*resultCondition, left, left);
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<64>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<64>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<64>(left, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
     Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
     {
         intptr_t immediate = right.m_value;
+        if (!immediate) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+                return branchTest64(*resultCondition, left, left);
+        }
+
         if (isUInt12(immediate))
             m_assembler.cmp<64>(left, UInt12(static_cast(immediate)));
         else if (isUInt12(-immediate))
             m_assembler.cmn<64>(left, UInt12(static_cast(-immediate)));
         else {
-            moveToCachedReg(right, m_dataMemoryTempRegister);
+            moveToCachedReg(right, dataMemoryTempRegister());
             m_assembler.cmp<64>(left, dataTempRegister);
         }
         return Jump(makeBranch(cond));
@@ -1630,33 +2549,62 @@ public:
         return branch64(cond, memoryTempRegister, right);
     }
 
+    Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+    {
+        load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch64(cond, memoryTempRegister, right);
+    }
+
     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
     {
-        ASSERT(!(0xffffff00 & right.m_value));
-        load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
-        return branch32(cond, memoryTempRegister, right);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right8);
     }
 
     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
     {
-        ASSERT(!(0xffffff00 & right.m_value));
-        load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
-        return branch32(cond, memoryTempRegister, right);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right8);
     }
     
     Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
     {
-        ASSERT(!(0xffffff00 & right.m_value));
-        load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
-        return branch32(cond, memoryTempRegister, right);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right8);
     }
     
     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
     {
+        if (reg == mask && (cond == Zero || cond == NonZero))
+            return Jump(makeCompareAndBranch<32>(static_cast(cond), reg));
         m_assembler.tst<32>(reg, mask);
         return Jump(makeBranch(cond));
     }
 
+    void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1)
+            m_assembler.tst<32>(reg, reg);
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
+
+            if (logicalImm.isValid())
+                m_assembler.tst<32>(reg, logicalImm);
+            else {
+                move(mask, getCachedDataTempRegisterIDAndInvalidate());
+                m_assembler.tst<32>(reg, dataTempRegister);
+            }
+        }
+    }
+
+    Jump branch(ResultCondition cond)
+    {
+        return Jump(makeBranch(cond));
+    }
+
     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
     {
         if (mask.m_value == -1) {
@@ -1666,13 +2614,10 @@ public:
         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast(cond)));
         else {
-            if ((cond == Zero) || (cond == NonZero)) {
-                LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
-
-                if (logicalImm.isValid()) {
-                    m_assembler.tst<32>(reg, logicalImm);
-                    return Jump(makeBranch(cond));
-                }
+            LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
+            if (logicalImm.isValid()) {
+                m_assembler.tst<32>(reg, logicalImm);
+                return Jump(makeBranch(cond));
             }
 
             move(mask, getCachedDataTempRegisterIDAndInvalidate());
@@ -1695,6 +2640,8 @@ public:
 
     Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
     {
+        if (reg == mask && (cond == Zero || cond == NonZero))
+            return Jump(makeCompareAndBranch<64>(static_cast(cond), reg));
         m_assembler.tst<64>(reg, mask);
         return Jump(makeBranch(cond));
     }
@@ -1708,13 +2655,11 @@ public:
         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast(cond)));
         else {
-            if ((cond == Zero) || (cond == NonZero)) {
-                LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
+            LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
 
-                if (logicalImm.isValid()) {
-                    m_assembler.tst<64>(reg, logicalImm);
-                    return Jump(makeBranch(cond));
-                }
+            if (logicalImm.isValid()) {
+                m_assembler.tst<64>(reg, logicalImm);
+                return Jump(makeBranch(cond));
             }
 
             signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
@@ -1723,6 +2668,28 @@ public:
         return Jump(makeBranch(cond));
     }
 
+    Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
+    {
+        if (mask.m_value == -1) {
+            if ((cond == Zero) || (cond == NonZero))
+                return Jump(makeCompareAndBranch<64>(static_cast(cond), reg));
+            m_assembler.tst<64>(reg, reg);
+        } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
+            return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast(cond)));
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
+
+            if (logicalImm.isValid()) {
+                m_assembler.tst<64>(reg, logicalImm);
+                return Jump(makeBranch(cond));
+            }
+
+            move(mask, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.tst<64>(reg, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
     Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
     {
         load64(address, getCachedDataTempRegisterIDAndInvalidate());
@@ -1749,27 +2716,36 @@ public:
 
     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        load8(address, getCachedDataTempRegisterIDAndInvalidate());
-        return branchTest32(cond, dataTempRegister, mask);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest32(cond, dataTempRegister, mask8);
     }
 
     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
-        return branchTest32(cond, dataTempRegister, mask);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest32(cond, dataTempRegister, mask8);
     }
 
     Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
         move(TrustedImmPtr(reinterpret_cast(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
-        m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
-        return branchTest32(cond, dataTempRegister, mask);
+
+        if (MacroAssemblerHelpers::isUnsigned(cond))
+            m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
+        else
+            m_assembler.ldrsb<32>(dataTempRegister, address.base, dataTempRegister);
+
+        return branchTest32(cond, dataTempRegister, mask8);
     }
 
     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        load8(address, getCachedDataTempRegisterIDAndInvalidate());
-        return branchTest32(cond, dataTempRegister, mask);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest32(cond, dataTempRegister, mask8);
     }
 
     Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
@@ -1875,7 +2851,14 @@ public:
         return branchAdd64(cond, dest, imm, dest);
     }
 
-    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    Jump branchAdd64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT(isUInt12(imm.m_value));
+        m_assembler.add<64, S>(dest, dest, UInt12(imm.m_value));
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
     {
         ASSERT(cond != Signed);
 
@@ -1886,14 +2869,19 @@ public:
 
         // This is a signed multiple of two 32-bit values, producing a 64-bit result.
         m_assembler.smull(dest, src1, src2);
-        // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
-        m_assembler.asr<64>(getCachedDataTempRegisterIDAndInvalidate(), dest, 32);
-        // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
-        m_assembler.asr<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 31);
+        // Copy bits 63..32 of the result to bits 31..0 of scratch1.
+        m_assembler.asr<64>(scratch1, dest, 32);
+        // Splat bit 31 of the result to bits 31..0 of scratch2.
+        m_assembler.asr<32>(scratch2, dest, 31);
         // After a mul32 the top 32 bits of the register should be clear.
         zeroExtend32ToPtr(dest, dest);
         // Check that bits 31..63 of the original result were all equal.
-        return branch32(NotEqual, memoryTempRegister, dataTempRegister);
+        return branch32(NotEqual, scratch2, scratch1);
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
     }
 
     Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
@@ -1901,13 +2889,13 @@ public:
         return branchMul32(cond, dest, src, dest);
     }
 
-    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
         move(imm, getCachedDataTempRegisterIDAndInvalidate());
         return branchMul32(cond, dataTempRegister, src, dest);
     }
 
-    Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
     {
         ASSERT(cond != Signed);
 
@@ -1917,12 +2905,17 @@ public:
         if (cond != Overflow)
             return branchTest64(cond, dest);
 
-        // Compute bits 127..64 of the result into dataTempRegister.
-        m_assembler.smulh(getCachedDataTempRegisterIDAndInvalidate(), src1, src2);
-        // Splat bit 63 of the result to bits 63..0 of memoryTempRegister.
-        m_assembler.asr<64>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 63);
+        // Compute bits 127..64 of the result into scratch1.
+        m_assembler.smulh(scratch1, src1, src2);
+        // Splat bit 63 of the result to bits 63..0 of scratch2.
+        m_assembler.asr<64>(scratch2, dest, 63);
         // Check that bits 31..63 of the original result were all equal.
-        return branch64(NotEqual, memoryTempRegister, dataTempRegister);
+        return branch64(NotEqual, scratch2, scratch1);
+    }
+
+    Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
     }
 
     Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
@@ -2010,6 +3003,13 @@ public:
         return branchSub64(cond, dest, imm, dest);
     }
 
+    Jump branchSub64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT(isUInt12(imm.m_value));
+        m_assembler.sub<64, S>(dest, dest, UInt12(imm.m_value));
+        return Jump(makeBranch(cond));
+    }
+
 
     // Jumps, calls, returns
 
@@ -2054,6 +3054,12 @@ public:
         load64(address, getCachedDataTempRegisterIDAndInvalidate());
         m_assembler.br(dataTempRegister);
     }
+    
+    void jump(BaseIndex address)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.br(dataTempRegister);
+    }
 
     void jump(AbsoluteAddress address)
     {
@@ -2074,6 +3080,13 @@ public:
         return Call(m_assembler.label(), Call::LinkableNear);
     }
 
+    ALWAYS_INLINE Call nearTailCall()
+    {
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.b();
+        return Call(label, Call::LinkableNearTail);
+    }
+
     ALWAYS_INLINE void ret()
     {
         m_assembler.ret();
@@ -2108,8 +3121,21 @@ public:
 
     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
     {
-        move(right, getCachedDataTempRegisterIDAndInvalidate());
-        m_assembler.cmp<32>(left, dataTempRegister);
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                test32(*resultCondition, left, left, dest);
+                return;
+            }
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<32>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<32>(left, UInt12(-right.m_value));
+        else {
+            move(right, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.cmp<32>(left, dataTempRegister);
+        }
         m_assembler.cset<32>(dest, ARM64Condition(cond));
     }
 
@@ -2121,6 +3147,13 @@ public:
     
     void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
     {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                test64(*resultCondition, left, left, dest);
+                return;
+            }
+        }
+
         signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
         m_assembler.cmp<64>(left, dataTempRegister);
         m_assembler.cset<32>(dest, ARM64Condition(cond));
@@ -2128,32 +3161,35 @@ public:
 
     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
     {
-        load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
-        move(right, getCachedDataTempRegisterIDAndInvalidate());
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
+        move(right8, getCachedDataTempRegisterIDAndInvalidate());
         compare32(cond, memoryTempRegister, dataTempRegister, dest);
     }
-    
+
+    void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest)
+    {
+        m_assembler.tst<32>(src, mask);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
     void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
     {
-        if (mask.m_value == -1)
-            m_assembler.tst<32>(src, src);
-        else {
-            signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
-            m_assembler.tst<32>(src, dataTempRegister);
-        }
+        test32(src, mask);
         m_assembler.cset<32>(dest, ARM64Condition(cond));
     }
 
     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
     {
-        load32(address, getCachedDataTempRegisterIDAndInvalidate());
-        test32(cond, dataTempRegister, mask, dest);
+        load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
+        test32(cond, memoryTempRegister, mask, dest);
     }
 
     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
     {
-        load8(address, getCachedDataTempRegisterIDAndInvalidate());
-        test32(cond, dataTempRegister, mask, dest);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedMemoryTempRegisterIDAndInvalidate());
+        test32(cond, memoryTempRegister, mask8, dest);
     }
 
     void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
@@ -2173,6 +3209,10 @@ public:
         m_assembler.cset<32>(dest, ARM64Condition(cond));
     }
 
+    void setCarry(RegisterID dest)
+    {
+        m_assembler.cset<32>(dest, ARM64Assembler::ConditionCS);
+    }
 
     // Patchable operations
 
@@ -2204,10 +3244,17 @@ public:
         return branch64(cond, left, dataTempRegister);
     }
 
-    PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+    ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        dataLabel = DataLabel32(this);
+        moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+        return branch32(cond, left, dataTempRegister);
+    }
+
+    PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
     {
         m_makeJumpPatchable = true;
-        Jump result = branch32(cond, left, TrustedImm32(right));
+        Jump result = branch64(cond, left, TrustedImm64(right));
         m_makeJumpPatchable = false;
         return PatchableJump(result);
     }
@@ -2228,6 +3275,30 @@ public:
         return PatchableJump(result);
     }
 
+    PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32(cond, left, imm);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch64(cond, reg, imm);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch64(cond, left, right);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
     PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
     {
         m_makeJumpPatchable = true;
@@ -2236,6 +3307,14 @@ public:
         return PatchableJump(result);
     }
 
+    PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
     PatchableJump patchableJump()
     {
         m_makeJumpPatchable = true;
@@ -2274,11 +3353,26 @@ public:
         m_assembler.nop();
     }
     
+    // We take memoryFence to mean acqrel. This has acqrel semantics on ARM64.
     void memoryFence()
     {
-        m_assembler.dmbSY();
+        m_assembler.dmbISH();
+    }
+
+    // We take this to mean that it prevents motion of normal stores. That's a store fence on ARM64 (hence the "ST").
+    void storeFence()
+    {
+        m_assembler.dmbISHST();
     }
 
+    // We take this to mean that it prevents motion of normal loads. Ideally we'd have expressed this
+    // using dependencies or half fences, but there are cases where this is as good as it gets. The only
+    // way to get a standalone load fence instruction on ARM is to use the ISH fence, which is just like
+    // the memoryFence().
+    void loadFence()
+    {
+        m_assembler.dmbISH();
+    }
 
     // Misc helper functions.
 
@@ -2288,6 +3382,23 @@ public:
         return static_cast(ARM64Assembler::invert(static_cast(cond)));
     }
 
+    static std::optional commuteCompareToZeroIntoTest(RelationalCondition cond)
+    {
+        switch (cond) {
+        case Equal:
+            return Zero;
+        case NotEqual:
+            return NonZero;
+        case LessThan:
+            return Signed;
+        case GreaterThanOrEqual:
+            return PositiveOrZero;
+            break;
+        default:
+            return std::nullopt;
+        }
+    }
+
     static FunctionPtr readCallTarget(CodeLocationCall call)
     {
         return FunctionPtr(reinterpret_cast(ARM64Assembler::readCallTarget(call.dataLocation())));
@@ -2303,9 +3414,20 @@ public:
         return ARM64Assembler::maxJumpReplacementSize();
     }
 
-    RegisterID scratchRegisterForBlinding() { return getCachedDataTempRegisterIDAndInvalidate(); }
+    static ptrdiff_t patchableJumpSize()
+    {
+        return ARM64Assembler::patchableJumpSize();
+    }
+
+    RegisterID scratchRegisterForBlinding()
+    {
+        // We *do not* have a scratch register for blinding.
+        RELEASE_ASSERT_NOT_REACHED();
+        return getCachedDataTempRegisterIDAndInvalidate();
+    }
 
     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
     
     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
     {
@@ -2318,6 +3440,12 @@ public:
         return CodeLocationLabel();
     }
     
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+    
     static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
     {
         reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
@@ -2328,6 +3456,25 @@ public:
         UNREACHABLE_FOR_PLATFORM();
     }
 
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+    }
+
+#if ENABLE(MASM_PROBE)
+    void probe(ProbeFunction, void* arg1, void* arg2);
+#endif // ENABLE(MASM_PROBE)
+
 protected:
     ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
     {
@@ -2381,8 +3528,26 @@ protected:
     }
     
 private:
-    ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister.registerIDInvalidate(); }
-    ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister.registerIDInvalidate(); }
+    ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return dataMemoryTempRegister().registerIDInvalidate();
+    }
+    ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return cachedMemoryTempRegister().registerIDInvalidate();
+    }
+    ALWAYS_INLINE CachedTempRegister& dataMemoryTempRegister()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return m_dataMemoryTempRegister;
+    }
+    ALWAYS_INLINE CachedTempRegister& cachedMemoryTempRegister()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return m_cachedMemoryTempRegister;
+    }
 
     ALWAYS_INLINE bool isInIntRange(intptr_t value)
     {
@@ -2460,6 +3625,18 @@ private:
         m_assembler.ldur(rt, rn, simm);
     }
 
+    template
+    ALWAYS_INLINE void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        loadUnsignedImmediate(rt, rn, pimm);
+    }
+
+    template
+    ALWAYS_INLINE void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+    {
+        loadUnscaledImmediate(rt, rn, simm);
+    }
+
     template
     ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
     {
@@ -2498,21 +3675,16 @@ private:
         }
     }
 
-    void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
-    {
-        move(TrustedImmPtr(reinterpret_cast(static_cast(imm.m_value))), dest);
-    }
-
     template
     ALWAYS_INLINE void load(const void* address, RegisterID dest)
     {
         intptr_t currentRegisterContents;
-        if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
+        if (cachedMemoryTempRegister().value(currentRegisterContents)) {
             intptr_t addressAsInt = reinterpret_cast(address);
             intptr_t addressDelta = addressAsInt - currentRegisterContents;
 
             if (dest == memoryTempRegister)
-                m_cachedMemoryTempRegister.invalidate();
+                cachedMemoryTempRegister().invalidate();
 
             if (isInIntRange(addressDelta)) {
                 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
@@ -2528,7 +3700,7 @@ private:
 
             if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
                 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
-                m_cachedMemoryTempRegister.setValue(reinterpret_cast(address));
+                cachedMemoryTempRegister().setValue(reinterpret_cast(address));
                 m_assembler.ldr(dest, memoryTempRegister, ARM64Registers::zr);
                 return;
             }
@@ -2536,17 +3708,18 @@ private:
 
         move(TrustedImmPtr(address), memoryTempRegister);
         if (dest == memoryTempRegister)
-            m_cachedMemoryTempRegister.invalidate();
+            cachedMemoryTempRegister().invalidate();
         else
-            m_cachedMemoryTempRegister.setValue(reinterpret_cast(address));
+            cachedMemoryTempRegister().setValue(reinterpret_cast(address));
         m_assembler.ldr(dest, memoryTempRegister, ARM64Registers::zr);
     }
 
     template
     ALWAYS_INLINE void store(RegisterID src, const void* address)
     {
+        ASSERT(src != memoryTempRegister);
         intptr_t currentRegisterContents;
-        if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
+        if (cachedMemoryTempRegister().value(currentRegisterContents)) {
             intptr_t addressAsInt = reinterpret_cast(address);
             intptr_t addressDelta = addressAsInt - currentRegisterContents;
 
@@ -2564,14 +3737,14 @@ private:
 
             if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
                 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
-                m_cachedMemoryTempRegister.setValue(reinterpret_cast(address));
+                cachedMemoryTempRegister().setValue(reinterpret_cast(address));
                 m_assembler.str(src, memoryTempRegister, ARM64Registers::zr);
                 return;
             }
         }
 
         move(TrustedImmPtr(address), memoryTempRegister);
-        m_cachedMemoryTempRegister.setValue(reinterpret_cast(address));
+        cachedMemoryTempRegister().setValue(reinterpret_cast(address));
         m_assembler.str(src, memoryTempRegister, ARM64Registers::zr);
     }
 
@@ -2647,6 +3820,20 @@ private:
         return false;
     }
 
+    template
+    ALWAYS_INLINE bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+    {
+        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+            loadSignedAddressedByUnscaledImmediate(rt, rn, offset);
+            return true;
+        }
+        if (ARM64Assembler::canEncodePImmOffset(offset)) {
+            loadSignedAddressedByUnsignedImmediate(rt, rn, static_cast(offset));
+            return true;
+        }
+        return false;
+    }
+
     template
     ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
     {
@@ -2689,25 +3876,37 @@ private:
         return false;
     }
 
-    friend class LinkBuffer;
-    friend class RepatchBuffer;
-
-    static void linkCall(void* code, Call call, FunctionPtr function)
+    Jump jumpAfterFloatingPointCompare(DoubleCondition cond)
     {
-        if (call.isFlagSet(Call::Near))
-            ARM64Assembler::linkCall(code, call.m_label, function.value());
-        else
-            ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
+        if (cond == DoubleNotEqual) {
+            // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            Jump result = makeBranch(ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            return result;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            // We get here if either unordered or equal.
+            Jump result = jump();
+            notEqual.link(this);
+            return result;
+        }
+        return makeBranch(cond);
     }
 
-    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
-    {
-        ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
-    }
+    friend class LinkBuffer;
 
-    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+        if (!call.isFlagSet(Call::Near))
+            ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
+        else if (call.isFlagSet(Call::Tail))
+            ARM64Assembler::linkJump(code, call.m_label, function.value());
+        else
+            ARM64Assembler::linkCall(code, call.m_label, function.value());
     }
 
     CachedTempRegister m_dataMemoryTempRegister;
@@ -2728,6 +3927,18 @@ ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt,
     m_assembler.ldrh(rt, rn, pimm);
 }
 
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.ldrsb<64>(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.ldrsh<64>(rt, rn, pimm);
+}
+
 template<>
 ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
 {
@@ -2740,6 +3951,18 @@ ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt,
     m_assembler.ldurh(rt, rn, simm);
 }
 
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.ldursb<64>(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.ldursh<64>(rt, rn, simm);
+}
+
 template<>
 ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
 {
@@ -2767,5 +3990,3 @@ ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
-
-#endif // MacroAssemblerARM64_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp
new file mode 100644
index 000000000..7119697bb
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+
+#include 
+
+namespace JSC {
+
+#if ENABLE(MASM_PROBE)
+
+extern "C" void ctiMasmProbeTrampoline();
+
+#if COMPILER(GCC_OR_CLANG)
+
+// The following are offsets for MacroAssemblerARMv7::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
+
+#define PTR_SIZE 4
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
+
+#define PROBE_FIRST_GPREG_OFFSET (3 * PTR_SIZE)
+
+#define GPREG_SIZE 4
+#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE))
+#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE))
+#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE))
+#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE))
+#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE))
+#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE))
+#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE))
+#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE))
+#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE))
+#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE))
+#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE))
+#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE))
+#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE))
+#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE))
+#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE))
+#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE))
+
+#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE))
+#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE))
+
+#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE))
+
+#define FPREG_SIZE 8
+#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE))
+#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE))
+#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE))
+#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE))
+#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE))
+#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE))
+#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE))
+#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE))
+#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE))
+#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE))
+#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE))
+#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE))
+#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE))
+#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE))
+#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE))
+#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE))
+#define PROBE_CPU_D16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
+#define PROBE_CPU_D17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE))
+#define PROBE_CPU_D18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE))
+#define PROBE_CPU_D19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE))
+#define PROBE_CPU_D20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE))
+#define PROBE_CPU_D21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE))
+#define PROBE_CPU_D22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE))
+#define PROBE_CPU_D23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE))
+#define PROBE_CPU_D24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE))
+#define PROBE_CPU_D25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE))
+#define PROBE_CPU_D26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE))
+#define PROBE_CPU_D27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE))
+#define PROBE_CPU_D28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE))
+#define PROBE_CPU_D29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE))
+#define PROBE_CPU_D30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE))
+#define PROBE_CPU_D31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE))
+#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE))
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARMv7::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d16) == PROBE_CPU_D16_OFFSET, ProbeContext_cpu_d16_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d17) == PROBE_CPU_D17_OFFSET, ProbeContext_cpu_d17_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d18) == PROBE_CPU_D18_OFFSET, ProbeContext_cpu_d18_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d19) == PROBE_CPU_D19_OFFSET, ProbeContext_cpu_d19_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d20) == PROBE_CPU_D20_OFFSET, ProbeContext_cpu_d20_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d21) == PROBE_CPU_D21_OFFSET, ProbeContext_cpu_d21_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d22) == PROBE_CPU_D22_OFFSET, ProbeContext_cpu_d22_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d23) == PROBE_CPU_D23_OFFSET, ProbeContext_cpu_d23_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d24) == PROBE_CPU_D24_OFFSET, ProbeContext_cpu_d24_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d25) == PROBE_CPU_D25_OFFSET, ProbeContext_cpu_d25_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d26) == PROBE_CPU_D26_OFFSET, ProbeContext_cpu_d26_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d27) == PROBE_CPU_D27_OFFSET, ProbeContext_cpu_d27_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d28) == PROBE_CPU_D28_OFFSET, ProbeContext_cpu_d28_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d29) == PROBE_CPU_D29_OFFSET, ProbeContext_cpu_d29_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d30) == PROBE_CPU_D30_OFFSET, ProbeContext_cpu_d30_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d31) == PROBE_CPU_D31_OFFSET, ProbeContext_cpu_d31_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(sizeof(MacroAssemblerARMv7::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+
+#undef PROBE_OFFSETOF
+    
+asm (
+    ".text" "\n"
+    ".align 2" "\n"
+    ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+    HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+    ".thumb" "\n"
+    ".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampoline) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+    // MacroAssemblerARMv7::probe() has already generated code to store some values.
+    // The top of stack now looks like this:
+    //     esp[0 * ptrSize]: probeFunction
+    //     esp[1 * ptrSize]: arg1
+    //     esp[2 * ptrSize]: arg2
+    //     esp[3 * ptrSize]: saved r0
+    //     esp[4 * ptrSize]: saved ip
+    //     esp[5 * ptrSize]: saved lr
+    //     esp[6 * ptrSize]: saved sp
+
+    "mov       ip, sp" "\n"
+    "mov       r0, sp" "\n"
+    "sub       r0, r0, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n"
+
+    // The ARM EABI specifies that the stack needs to be 16 byte aligned.
+    "bic       r0, r0, #0xf" "\n"
+    "mov       sp, r0" "\n"
+
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "add       lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R1_OFFSET) "\n"
+    "stmia     lr, { r1-r11 }" "\n"
+    "mrs       lr, APSR" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "vmrs      lr, FPSCR" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+
+    "ldr       lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n"
+    "vstmia.64 ip!, { d0-d15 }" "\n"
+    "vstmia.64 ip!, { d16-d31 }" "\n"
+
+    "mov       fp, sp" "\n" // Save the ProbeContext*.
+
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+    "mov       r0, sp" "\n" // the ProbeContext* arg.
+    "blx       ip" "\n"
+
+    "mov       sp, fp" "\n"
+
+    // To enable probes to modify register state, we copy all registers
+    // out of the ProbeContext before returning.
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D31_OFFSET + FPREG_SIZE) "\n"
+    "vldmdb.64 ip!, { d16-d31 }" "\n"
+    "vldmdb.64 ip!, { d0-d15 }" "\n"
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n"
+    "ldmdb     ip, { r0-r11 }" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+    "vmsr      FPSCR, ip" "\n"
+
+    // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr.
+    // There are 2 issues that complicate the restoration of these last few
+    // registers:
+    //
+    // 1. Normal ARM calling convention relies on moving lr to pc to return to
+    //    the caller. In our case, the address to return to is specified by
+    //    ProbeContext.cpu.pc. And at that moment, we won't have any available
+    //    scratch registers to hold the return address (lr needs to hold
+    //    ProbeContext.cpu.lr, not the return address).
+    //
+    //    The solution is to store the return address on the stack and load the
+    //    pc from there.
+    //
+    // 2. Issue 1 means we will need to write to the stack location at
+    //    ProbeContext.cpu.sp - 4. But if the user probe function had modified
+    //    the value of ProbeContext.cpu.sp to point in the range between
+    //    &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for
+    //    Issue 1 may trash the values to be restored before we can restore
+    //    them.
+    //
+    //    The solution is to check if ProbeContext.cpu.sp contains a value in
+    //    the undesirable range. If so, we copy the remaining ProbeContext
+    //    register data to a safe range (at memory lower than where
+    //    ProbeContext.cpu.sp points) first, and restore the remaining register
+    //    from this new range.
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n"
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "cmp       lr, ip" "\n"
+    "it        gt" "\n"
+    "bgt     " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+    // We get here because the new expected stack pointer location is lower
+    // than where it's supposed to be. This means the safe range of stack
+    // memory where we'll be copying the remaining register restore values to
+    // might be in a region of memory below the sp i.e. unallocated stack
+    // memory. This, in turn, makes it vulnerable to interrupts potentially
+    // trashing the copied values. To prevent that, we must first allocate the
+    // needed stack memory by adjusting the sp before the copying.
+
+    "sub       lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE)
+    " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n"
+
+    "mov       ip, sp" "\n"
+    "mov       sp, lr" "\n"
+    "mov       lr, ip" "\n"
+
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+
+    ".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampolineEnd) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "sub       lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n"
+    "str       ip, [lr]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "msr       APSR, ip" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "mov       lr, ip" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "pop       { pc }" "\n"
+);
+#endif // COMPILER(GCC_OR_CLANG)
+
+void MacroAssemblerARMv7::probe(MacroAssemblerARMv7::ProbeFunction function, void* arg1, void* arg2)
+{
+    push(RegisterID::lr);
+    push(RegisterID::lr);
+    add32(TrustedImm32(8), RegisterID::sp, RegisterID::lr);
+    store32(RegisterID::lr, ArmAddress(RegisterID::sp, 4));
+    push(RegisterID::ip);
+    push(RegisterID::r0);
+    // The following uses RegisterID::ip. So, they must come after we push ip above.
+    push(trustedImm32FromPtr(arg2));
+    push(trustedImm32FromPtr(arg1));
+    push(trustedImm32FromPtr(function));
+
+    move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::ip);
+    m_assembler.blx(RegisterID::ip);
+}
+#endif // ENABLE(MASM_PROBE)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index 68a04fd22..3c95f2802 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2009-2010, 2014-2016 Apple Inc. All rights reserved.
  * Copyright (C) 2010 University of Szeged
  *
  * Redistribution and use in source and binary forms, with or without
@@ -24,8 +24,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef MacroAssemblerARMv7_h
-#define MacroAssemblerARMv7_h
+#pragma once
 
 #if ENABLE(ASSEMBLER)
 
@@ -34,7 +33,7 @@
 
 namespace JSC {
 
-class MacroAssemblerARMv7 : public AbstractMacroAssembler {
+class MacroAssemblerARMv7 : public AbstractMacroAssembler {
     static const RegisterID dataTempRegister = ARMRegisters::ip;
     static const RegisterID addressTempRegister = ARMRegisters::r6;
 
@@ -42,6 +41,9 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler {
     inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
 
 public:
+    static const unsigned numGPRs = 16;
+    static const unsigned numFPRs = 16;
+    
     MacroAssemblerARMv7()
         : m_makeJumpPatchable(false)
     {
@@ -62,12 +64,11 @@ public:
 
     Vector& jumpsToLink() { return m_assembler.jumpsToLink(); }
     void* unlinkedCode() { return m_assembler.unlinkedCode(); }
-    bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
-    JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
-    JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
-    void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
-    int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
-    void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
+    static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); }
+    static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); }
+    static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); }
+    static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
+    static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARMv7Assembler::link(record, from, fromInstruction, to); }
 
     struct ArmAddress {
         enum AddressType {
@@ -156,6 +157,11 @@ public:
         m_assembler.add(dest, dest, src);
     }
 
+    void add32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.add(dest, left, right);
+    }
+
     void add32(TrustedImm32 imm, RegisterID dest)
     {
         add32(imm, dest, dest);
@@ -170,6 +176,14 @@ public:
     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+
+        // For adds with stack pointer destination, moving the src first to sp is
+        // needed to avoid unpredictable instruction
+        if (dest == ARMRegisters::sp && src != dest) {
+            move(src, ARMRegisters::sp);
+            src = ARMRegisters::sp;
+        }
+
         if (armImm.isValid())
             m_assembler.add(dest, src, armImm);
         else {
@@ -218,6 +232,11 @@ public:
         store32(dataTempRegister, address.m_ptr);
     }
 
+    void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
+    {
+        add32(imm, srcDest);
+    }
+    
     void add64(TrustedImm32 imm, AbsoluteAddress address)
     {
         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
@@ -305,6 +324,11 @@ public:
         m_assembler.smull(dest, dataTempRegister, dest, src);
     }
 
+    void mul32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.smull(dest, dataTempRegister, left, right);
+    }
+
     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
         move(imm, dataTempRegister);
@@ -329,6 +353,31 @@ public:
         store32(dataTempRegister, addressTempRegister);
     }
 
+    void or32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid()) {
+            move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+            load32(addressTempRegister, dataTempRegister);
+            m_assembler.orr(dataTempRegister, dataTempRegister, armImm);
+            store32(dataTempRegister, addressTempRegister);
+        } else {
+            move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+            load32(addressTempRegister, dataTempRegister);
+            move(imm, addressTempRegister);
+            m_assembler.orr(dataTempRegister, dataTempRegister, addressTempRegister);
+            move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+            store32(dataTempRegister, addressTempRegister);
+        }
+    }
+
+    void or32(TrustedImm32 imm, Address address)
+    {
+        load32(address, dataTempRegister);
+        or32(imm, dataTempRegister, dataTempRegister);
+        store32(dataTempRegister, address);
+    }
+
     void or32(TrustedImm32 imm, RegisterID dest)
     {
         or32(imm, dest, dest);
@@ -345,6 +394,7 @@ public:
         if (armImm.isValid())
             m_assembler.orr(dest, src, armImm);
         else {
+            ASSERT(src != dataTempRegister);
             move(imm, dataTempRegister);
             m_assembler.orr(dest, src, dataTempRegister);
         }
@@ -362,7 +412,10 @@ public:
 
     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
-        m_assembler.asr(dest, src, imm.m_value & 0x1f);
+        if (!imm.m_value)
+            move(src, dest);
+        else
+            m_assembler.asr(dest, src, imm.m_value & 0x1f);
     }
 
     void rshift32(RegisterID shiftAmount, RegisterID dest)
@@ -387,7 +440,10 @@ public:
     
     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
-        m_assembler.lsr(dest, src, imm.m_value & 0x1f);
+        if (!imm.m_value)
+            move(src, dest);
+        else
+            m_assembler.lsr(dest, src, imm.m_value & 0x1f);
     }
 
     void urshift32(RegisterID shiftAmount, RegisterID dest)
@@ -405,6 +461,11 @@ public:
         m_assembler.sub(dest, dest, src);
     }
 
+    void sub32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.sub(dest, left, right);
+    }
+
     void sub32(TrustedImm32 imm, RegisterID dest)
     {
         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
@@ -527,7 +588,7 @@ private:
         }
     }
     
-    void load16Signed(ArmAddress address, RegisterID dest)
+    void load16SignedExtendTo32(ArmAddress address, RegisterID dest)
     {
         ASSERT(address.type == ArmAddress::HasIndex);
         m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
@@ -547,7 +608,7 @@ private:
         }
     }
     
-    void load8Signed(ArmAddress address, RegisterID dest)
+    void load8SignedExtendTo32(ArmAddress address, RegisterID dest)
     {
         ASSERT(address.type == ArmAddress::HasIndex);
         m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
@@ -624,6 +685,18 @@ public:
         m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
     }
     
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), dataTempRegister);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm32(misc), addressTempRegister);
+        abortWithReason(reason);
+    }
+
     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
     {
         ConvertibleLoadLabel result(this);
@@ -637,7 +710,7 @@ public:
         load8(setupArmAddress(address), dest);
     }
 
-    void load8Signed(ImplicitAddress, RegisterID)
+    void load8SignedExtendTo32(ImplicitAddress, RegisterID)
     {
         UNREACHABLE_FOR_PLATFORM();
     }
@@ -647,9 +720,9 @@ public:
         load8(setupArmAddress(address), dest);
     }
     
-    void load8Signed(BaseIndex address, RegisterID dest)
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
-        load8Signed(setupArmAddress(address), dest);
+        load8SignedExtendTo32(setupArmAddress(address), dest);
     }
 
     void load8(const void* address, RegisterID dest)
@@ -683,9 +756,9 @@ public:
         m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
     }
     
-    void load16Signed(BaseIndex address, RegisterID dest)
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
-        load16Signed(setupArmAddress(address), dest);
+        load16SignedExtendTo32(setupArmAddress(address), dest);
     }
     
     void load16(ImplicitAddress address, RegisterID dest)
@@ -699,7 +772,7 @@ public:
         }
     }
     
-    void load16Signed(ImplicitAddress, RegisterID)
+    void load16SignedExtendTo32(ImplicitAddress, RegisterID)
     {
         UNREACHABLE_FOR_PLATFORM();
     }
@@ -745,6 +818,11 @@ public:
         store32(dataTempRegister, address);
     }
 
+    void store8(RegisterID src, Address address)
+    {
+        store8(src, setupArmAddress(address));
+    }
+    
     void store8(RegisterID src, BaseIndex address)
     {
         store8(src, setupArmAddress(address));
@@ -758,7 +836,15 @@ public:
     
     void store8(TrustedImm32 imm, void* address)
     {
-        move(imm, dataTempRegister);
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        move(imm8, dataTempRegister);
+        store8(dataTempRegister, address);
+    }
+    
+    void store8(TrustedImm32 imm, Address address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        move(imm8, dataTempRegister);
         store8(dataTempRegister, address);
     }
     
@@ -803,6 +889,7 @@ public:
     static bool supportsFloatingPointTruncate() { return true; }
     static bool supportsFloatingPointSqrt() { return true; }
     static bool supportsFloatingPointAbs() { return true; }
+    static bool supportsFloatingPointRounding() { return false; }
 
     void loadDouble(ImplicitAddress address, FPRegisterID dest)
     {
@@ -856,9 +943,15 @@ public:
             m_assembler.vmov(dest, src);
     }
 
-    void loadDouble(const void* address, FPRegisterID dest)
+    void moveZeroToDouble(FPRegisterID reg)
     {
-        move(TrustedImmPtr(address), addressTempRegister);
+        static double zeroConstant = 0.;
+        loadDouble(TrustedImmPtr(&zeroConstant), reg);
+    }
+
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+    {
+        move(address, addressTempRegister);
         m_assembler.vldr(dest, addressTempRegister, 0);
     }
 
@@ -892,9 +985,9 @@ public:
         m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
     }
 
-    void storeDouble(FPRegisterID src, const void* address)
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
     {
-        move(TrustedImmPtr(address), addressTempRegister);
+        move(address, addressTempRegister);
         storeDouble(src, addressTempRegister);
     }
 
@@ -932,7 +1025,7 @@ public:
 
     void addDouble(AbsoluteAddress address, FPRegisterID dest)
     {
-        loadDouble(address.m_ptr, fpTempRegister);
+        loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
         m_assembler.vadd(dest, dest, fpTempRegister);
     }
 
@@ -993,6 +1086,24 @@ public:
         m_assembler.vneg(dest, src);
     }
 
+    NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
     {
         m_assembler.vmov(fpTempRegister, src, src);
@@ -1138,14 +1249,12 @@ public:
     
     void pop(RegisterID dest)
     {
-        // store postindexed with writeback
-        m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
+        m_assembler.pop(dest);
     }
 
     void push(RegisterID src)
     {
-        // store preindexed with writeback
-        m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+        m_assembler.push(src);
     }
 
     void push(Address address)
@@ -1160,6 +1269,16 @@ public:
         push(dataTempRegister);
     }
 
+    void popPair(RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.pop(1 << dest1 | 1 << dest2);
+    }
+    
+    void pushPair(RegisterID src1, RegisterID src2)
+    {
+        m_assembler.push(1 << src1 | 1 << src2);
+    }
+    
     // Register move operations:
     //
     // Move values in registers.
@@ -1225,6 +1344,11 @@ public:
         m_assembler.dmbSY();
     }
     
+    void storeFence()
+    {
+        m_assembler.dmbISHST();
+    }
+    
     static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
     {
         ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
@@ -1235,6 +1359,11 @@ public:
         return ARMv7Assembler::maxJumpReplacementSize();
     }
 
+    static ptrdiff_t patchableJumpSize()
+    {
+        return ARMv7Assembler::patchableJumpSize();
+    }
+
     // Forwards / external control flow operations:
     //
     // This set of jump and conditional branch operations return a Jump
@@ -1255,25 +1384,22 @@ public:
 private:
 
     // Should we be using TEQ for equal/not-equal?
-    void compare32(RegisterID left, TrustedImm32 right)
+    void compare32AndSetFlags(RegisterID left, TrustedImm32 right)
     {
         int32_t imm = right.m_value;
-        if (!imm)
-            m_assembler.tst(left, left);
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+        if (armImm.isValid())
+            m_assembler.cmp(left, armImm);
+        else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+            m_assembler.cmn(left, armImm);
         else {
-            ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
-            if (armImm.isValid())
-                m_assembler.cmp(left, armImm);
-            else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
-                m_assembler.cmn(left, armImm);
-            else {
-                move(TrustedImm32(imm), dataTempRegister);
-                m_assembler.cmp(left, dataTempRegister);
-            }
+            move(TrustedImm32(imm), dataTempRegister);
+            m_assembler.cmp(left, dataTempRegister);
         }
     }
 
-    void test32(RegisterID reg, TrustedImm32 mask)
+public:
+    void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
     {
         int32_t imm = mask.m_value;
 
@@ -1281,16 +1407,28 @@ private:
             m_assembler.tst(reg, reg);
         else {
             ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
-            if (armImm.isValid())
-                m_assembler.tst(reg, armImm);
-            else {
+            if (armImm.isValid()) {
+                if (reg == ARMRegisters::sp) {
+                    move(reg, addressTempRegister);
+                    m_assembler.tst(addressTempRegister, armImm);
+                } else
+                    m_assembler.tst(reg, armImm);
+            } else {
                 move(mask, dataTempRegister);
-                m_assembler.tst(reg, dataTempRegister);
+                if (reg == ARMRegisters::sp) {
+                    move(reg, addressTempRegister);
+                    m_assembler.tst(addressTempRegister, dataTempRegister);
+                } else
+                    m_assembler.tst(reg, dataTempRegister);
             }
         }
     }
+    
+    Jump branch(ResultCondition cond)
+    {
+        return Jump(makeBranch(cond));
+    }
 
-public:
     Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
     {
         m_assembler.cmp(left, right);
@@ -1299,7 +1437,7 @@ public:
 
     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
     {
-        compare32(left, right);
+        compare32AndSetFlags(left, right);
         return Jump(makeBranch(cond));
     }
 
@@ -1349,44 +1487,54 @@ public:
         return branch32(cond, addressTempRegister, right);
     }
 
+    Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+    {
+        load32(left, dataTempRegister);
+        return branch32(cond, dataTempRegister, right);
+    }
+
     Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
     {
-        compare32(left, right);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        compare32AndSetFlags(left, right8);
         return Jump(makeBranch(cond));
     }
 
     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
     {
-        ASSERT(!(0xffffff00 & right.m_value));
         // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
-        load8(left, addressTempRegister);
-        return branch8(cond, addressTempRegister, right);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
+        return branch8(cond, addressTempRegister, right8);
     }
 
     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
     {
-        ASSERT(!(0xffffff00 & right.m_value));
         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
-        load8(left, addressTempRegister);
-        return branch32(cond, addressTempRegister, right);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
+        return branch32(cond, addressTempRegister, right8);
     }
     
     Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right)
     {
         // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister.
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
-        load8(Address(addressTempRegister), addressTempRegister);
-        return branch32(cond, addressTempRegister, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister);
+        return branch32(cond, addressTempRegister, right8);
     }
     
     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
     {
+        ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
         m_assembler.tst(reg, mask);
         return Jump(makeBranch(cond));
     }
 
     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
     {
+        ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
         test32(reg, mask);
         return Jump(makeBranch(cond));
     }
@@ -1408,23 +1556,26 @@ public:
     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
     {
         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
-        load8(address, addressTempRegister);
-        return branchTest32(cond, addressTempRegister, mask);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister);
+        return branchTest32(cond, addressTempRegister, mask8);
     }
 
     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
     {
         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
-        load8(address, addressTempRegister);
-        return branchTest32(cond, addressTempRegister, mask);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister);
+        return branchTest32(cond, addressTempRegister, mask8);
     }
 
     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
-        load8(Address(addressTempRegister), addressTempRegister);
-        return branchTest32(cond, addressTempRegister, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister);
+        return branchTest32(cond, addressTempRegister, mask8);
     }
 
     void jump(RegisterID target)
@@ -1534,7 +1685,7 @@ public:
         return branchMul32(cond, src, dest, dest);
     }
 
-    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
         move(imm, dataTempRegister);
         return branchMul32(cond, dataTempRegister, src, dest);
@@ -1607,6 +1758,12 @@ public:
         return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
     }
 
+    ALWAYS_INLINE Call nearTailCall()
+    {
+        moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+        return Call(m_assembler.bx(dataTempRegister), Call::LinkableNearTail);
+    }
+
     ALWAYS_INLINE Call call()
     {
         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
@@ -1645,13 +1802,14 @@ public:
 
     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
     {
-        load8(left, addressTempRegister);
-        compare32(cond, addressTempRegister, right, dest);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
+        compare32(cond, addressTempRegister, right8, dest);
     }
 
     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
     {
-        compare32(left, right);
+        compare32AndSetFlags(left, right);
         m_assembler.it(armV7Condition(cond), false);
         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
@@ -1672,8 +1830,9 @@ public:
 
     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
     {
-        load8(address, dataTempRegister);
-        test32(dataTempRegister, mask);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister);
+        test32(dataTempRegister, mask8);
         m_assembler.it(armV7Condition(cond), false);
         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
@@ -1706,6 +1865,13 @@ public:
         return branch32(cond, addressTempRegister, dataTempRegister);
     }
     
+    ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        load32(left, addressTempRegister);
+        dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+        return branch32(cond, addressTempRegister, dataTempRegister);
+    }
+    
     PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
     {
         m_makeJumpPatchable = true;
@@ -1730,6 +1896,14 @@ public:
         return PatchableJump(result);
     }
 
+    PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32(cond, left, imm);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
     PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
     {
         m_makeJumpPatchable = true;
@@ -1738,6 +1912,14 @@ public:
         return PatchableJump(result);
     }
 
+    PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
     PatchableJump patchableJump()
     {
         padBeforePatch();
@@ -1770,17 +1952,13 @@ public:
     }
 
     
-    int executableOffsetFor(int location)
-    {
-        return m_assembler.executableOffsetFor(location);
-    }
-
     static FunctionPtr readCallTarget(CodeLocationCall call)
     {
         return FunctionPtr(reinterpret_cast(ARMv7Assembler::readCallTarget(call.dataLocation())));
     }
     
     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
     
     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
     {
@@ -1804,36 +1982,35 @@ public:
         return CodeLocationLabel();
     }
     
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+    
     static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
     {
         UNREACHABLE_FOR_PLATFORM();
     }
 
-#if USE(MASM_PROBE)
-    struct CPUState {
-        #define DECLARE_REGISTER(_type, _regName) \
-            _type _regName;
-        FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
-        #undef DECLARE_REGISTER
-    };
-
-    struct ProbeContext;
-    typedef void (*ProbeFunction)(struct ProbeContext*);
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
 
-    struct ProbeContext {
-        ProbeFunction probeFunction;
-        void* arg1;
-        void* arg2;
-        CPUState cpu;
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
 
-        void dump(const char* indentation = 0);
-    private:
-        void dumpCPURegisters(const char* indentation);
-    };
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
 
-    // For details about probe(), see comment in MacroAssemblerX86_64.h.
-    void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
-#endif // USE(MASM_PROBE)
+#if ENABLE(MASM_PROBE)
+    void probe(ProbeFunction, void* arg1, void* arg2);
+#endif // ENABLE(MASM_PROBE)
 
 protected:
     ALWAYS_INLINE Jump jump()
@@ -1928,24 +2105,16 @@ protected:
     
 private:
     friend class LinkBuffer;
-    friend class RepatchBuffer;
 
     static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        ARMv7Assembler::linkCall(code, call.m_label, function.value());
-    }
-
-    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
-    {
-        ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
-    }
-
-    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
-    {
-        ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+        if (call.isFlagSet(Call::Tail))
+            ARMv7Assembler::linkJump(code, call.m_label, function.value());
+        else
+            ARMv7Assembler::linkCall(code, call.m_label, function.value());
     }
 
-#if USE(MASM_PROBE)
+#if ENABLE(MASM_PROBE)
     inline TrustedImm32 trustedImm32FromPtr(void* ptr)
     {
         return TrustedImm32(TrustedImmPtr(ptr));
@@ -1968,5 +2137,3 @@ private:
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
-
-#endif // MacroAssemblerARMv7_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp
new file mode 100644
index 000000000..06460c9cb
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "MacroAssemblerCodeRef.h"
+
+#include "JSCInlines.h"
+#include "LLIntData.h"
+
+namespace JSC {
+
+MacroAssemblerCodePtr MacroAssemblerCodePtr::createLLIntCodePtr(OpcodeID codeId)
+{
+    return createFromExecutableAddress(LLInt::getCodePtr(codeId));
+}
+
+void MacroAssemblerCodePtr::dumpWithName(const char* name, PrintStream& out) const
+{
+    if (!m_value) {
+        out.print(name, "(null)");
+        return;
+    }
+    if (executableAddress() == dataLocation()) {
+        out.print(name, "(", RawPointer(executableAddress()), ")");
+        return;
+    }
+    out.print(name, "(executable = ", RawPointer(executableAddress()), ", dataLocation = ", RawPointer(dataLocation()), ")");
+}
+
+void MacroAssemblerCodePtr::dump(PrintStream& out) const
+{
+    dumpWithName("CodePtr", out);
+}
+
+MacroAssemblerCodeRef MacroAssemblerCodeRef::createLLIntCodeRef(OpcodeID codeId)
+{
+    return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId)));
+}
+
+bool MacroAssemblerCodeRef::tryToDisassemble(PrintStream& out, const char* prefix) const
+{
+    return JSC::tryToDisassemble(m_codePtr, size(), prefix, out);
+}
+
+bool MacroAssemblerCodeRef::tryToDisassemble(const char* prefix) const
+{
+    return tryToDisassemble(WTF::dataFile(), prefix);
+}
+
+CString MacroAssemblerCodeRef::disassembly() const
+{
+    StringPrintStream out;
+    if (!tryToDisassemble(out, ""))
+        return CString();
+    return out.toCString();
+}
+
+void MacroAssemblerCodeRef::dump(PrintStream& out) const
+{
+    m_codePtr.dumpWithName("CodeRef", out);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index 5f8ba8a92..c31cf8526 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,20 +23,17 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef MacroAssemblerCodeRef_h
-#define MacroAssemblerCodeRef_h
+#pragma once
 
-#include "Disassembler.h"
 #include "ExecutableAllocator.h"
-#include "LLIntData.h"
 #include 
-#include 
 #include 
 #include 
+#include 
 
 // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
 // instruction address on the platform (for example, check any alignment requirements).
-#if CPU(ARM_THUMB2) && !ENABLE(LLINT_C_LOOP)
+#if CPU(ARM_THUMB2) && ENABLE(JIT)
 // ARM instructions must be 16-bit aligned. Thumb2 code pointers to be loaded into
 // into the processor are decorated with the bottom bit set, while traditional ARM has
 // the lower bit clear. Since we don't know what kind of pointer, we check for both
@@ -51,34 +48,10 @@
 #define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
 #endif
 
-#if CPU(X86) && OS(WINDOWS)
-#define CALLING_CONVENTION_IS_STDCALL 1
-#ifndef CDECL
-#if COMPILER(MSVC)
-#define CDECL __cdecl
-#else
-#define CDECL __attribute__ ((__cdecl))
-#endif // COMPILER(MSVC)
-#endif // CDECL
-#else
-#define CALLING_CONVENTION_IS_STDCALL 0
-#endif
-
-#if CPU(X86)
-#define HAS_FASTCALL_CALLING_CONVENTION 1
-#ifndef FASTCALL
-#if COMPILER(MSVC)
-#define FASTCALL __fastcall
-#else
-#define FASTCALL  __attribute__ ((fastcall))
-#endif // COMPILER(MSVC)
-#endif // FASTCALL
-#else
-#define HAS_FASTCALL_CALLING_CONVENTION 0
-#endif // CPU(X86)
-
 namespace JSC {
 
+enum OpcodeID : unsigned;
+
 // FunctionPtr:
 //
 // FunctionPtr should be used to wrap pointers to C/C++ functions in JSC
@@ -132,6 +105,12 @@ public:
         ASSERT_VALID_CODE_POINTER(m_value);
     }
 
+    template
+    FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
 // MSVC doesn't seem to treat functions with different calling conventions as
 // different types; these methods already defined for fastcall, below.
 #if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS)
@@ -172,7 +151,7 @@ public:
     }
 #endif
 
-#if HAS_FASTCALL_CALLING_CONVENTION
+#if COMPILER_SUPPORTS(FASTCALL_CALLING_CONVENTION)
 
     template
     FunctionPtr(returnType (FASTCALL *value)())
@@ -254,6 +233,11 @@ public:
     }
 
     void* value() const { return m_value; }
+    
+    void dump(PrintStream& out) const
+    {
+        out.print(RawPointer(m_value));
+    }
 
 private:
     void* m_value;
@@ -288,12 +272,7 @@ public:
         return result;
     }
 
-#if ENABLE(LLINT)
-    static MacroAssemblerCodePtr createLLIntCodePtr(LLIntCode codeId)
-    {
-        return createFromExecutableAddress(LLInt::getCodePtr(codeId));
-    }
-#endif
+    static MacroAssemblerCodePtr createLLIntCodePtr(OpcodeID codeId);
 
     explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
         : m_value(ra.value())
@@ -309,29 +288,16 @@ public:
     void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
 #endif
 
-    bool operator!() const
-    {
-        return !m_value;
-    }
+    explicit operator bool() const { return m_value; }
     
     bool operator==(const MacroAssemblerCodePtr& other) const
     {
         return m_value == other.m_value;
     }
 
-    void dumpWithName(const char* name, PrintStream& out) const
-    {
-        if (executableAddress() == dataLocation()) {
-            out.print(name, "(", RawPointer(executableAddress()), ")");
-            return;
-        }
-        out.print(name, "(executable = ", RawPointer(executableAddress()), ", dataLocation = ", RawPointer(dataLocation()), ")");
-    }
+    void dumpWithName(const char* name, PrintStream& out) const;
     
-    void dump(PrintStream& out) const
-    {
-        dumpWithName("CodePtr", out);
-    }
+    void dump(PrintStream& out) const;
     
     enum EmptyValueTag { EmptyValue };
     enum DeletedValueTag { DeletedValue };
@@ -387,9 +353,9 @@ public:
     {
     }
 
-    MacroAssemblerCodeRef(PassRefPtr executableMemory)
+    MacroAssemblerCodeRef(Ref&& executableMemory)
         : m_codePtr(executableMemory->start())
-        , m_executableMemory(executableMemory)
+        , m_executableMemory(WTFMove(executableMemory))
     {
         ASSERT(m_executableMemory->isManaged());
         ASSERT(m_executableMemory->start());
@@ -404,13 +370,8 @@ public:
         return MacroAssemblerCodeRef(codePtr);
     }
     
-#if ENABLE(LLINT)
     // Helper for creating self-managed code refs from LLInt.
-    static MacroAssemblerCodeRef createLLIntCodeRef(LLIntCode codeId)
-    {
-        return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId)));
-    }
-#endif
+    static MacroAssemblerCodeRef createLLIntCodeRef(OpcodeID codeId);
 
     ExecutableMemoryHandle* executableMemory() const
     {
@@ -428,18 +389,16 @@ public:
             return 0;
         return m_executableMemory->sizeInBytes();
     }
+
+    bool tryToDisassemble(PrintStream& out, const char* prefix = "") const;
     
-    bool tryToDisassemble(const char* prefix) const
-    {
-        return JSC::tryToDisassemble(m_codePtr, size(), prefix, WTF::dataFile());
-    }
+    bool tryToDisassemble(const char* prefix = "") const;
     
-    bool operator!() const { return !m_codePtr; }
+    JS_EXPORT_PRIVATE CString disassembly() const;
     
-    void dump(PrintStream& out) const
-    {
-        m_codePtr.dumpWithName("CodeRef", out);
-    }
+    explicit operator bool() const { return !!m_codePtr; }
+    
+    void dump(PrintStream& out) const;
 
 private:
     MacroAssemblerCodePtr m_codePtr;
@@ -459,5 +418,3 @@ template struct HashTraits;
 template<> struct HashTraits : public CustomHashTraits { };
 
 } // namespace WTF
-
-#endif // MacroAssemblerCodeRef_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerHelpers.h b/Source/JavaScriptCore/assembler/MacroAssemblerHelpers.h
new file mode 100644
index 000000000..047e94c27
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerHelpers.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+namespace MacroAssemblerHelpers {
+
+// True if this:
+//     branch8(cond, value, value)
+// Is the same as this:
+//     branch32(cond, signExt8(value), signExt8(value))
+template
+inline bool isSigned(typename MacroAssemblerType::RelationalCondition cond)
+{
+    switch (cond) {
+    case MacroAssemblerType::Equal:
+    case MacroAssemblerType::NotEqual:
+    case MacroAssemblerType::GreaterThan:
+    case MacroAssemblerType::GreaterThanOrEqual:
+    case MacroAssemblerType::LessThan:
+    case MacroAssemblerType::LessThanOrEqual:
+        return true;
+    default:
+        return false;
+    }
+}
+
+// True if this:
+//     branch8(cond, value, value)
+// Is the same as this:
+//     branch32(cond, zeroExt8(value), zeroExt8(value))
+template
+inline bool isUnsigned(typename MacroAssemblerType::RelationalCondition cond)
+{
+    switch (cond) {
+    case MacroAssemblerType::Equal:
+    case MacroAssemblerType::NotEqual:
+    case MacroAssemblerType::Above:
+    case MacroAssemblerType::AboveOrEqual:
+    case MacroAssemblerType::Below:
+    case MacroAssemblerType::BelowOrEqual:
+        return true;
+    default:
+        return false;
+    }
+}
+
+// True if this:
+//     test8(cond, value, value)
+// Is the same as this:
+//     test32(cond, signExt8(value), signExt8(value))
+template
+inline bool isSigned(typename MacroAssemblerType::ResultCondition cond)
+{
+    switch (cond) {
+    case MacroAssemblerType::Signed:
+    case MacroAssemblerType::PositiveOrZero:
+    case MacroAssemblerType::Zero:
+    case MacroAssemblerType::NonZero:
+        return true;
+    default:
+        return false;
+    }
+}
+
+// True if this:
+//     test8(cond, value, value)
+// Is the same as this:
+//     test32(cond, zeroExt8(value), zeroExt8(value))
+template
+inline bool isUnsigned(typename MacroAssemblerType::ResultCondition cond)
+{
+    switch (cond) {
+    case MacroAssemblerType::Zero:
+    case MacroAssemblerType::NonZero:
+        return true;
+    default:
+        return false;
+    }
+}
+
+template
+inline typename MacroAssemblerType::TrustedImm32 mask8OnCondition(MacroAssemblerType&, typename MacroAssemblerType::RelationalCondition cond, typename MacroAssemblerType::TrustedImm32 value)
+{
+    if (isUnsigned(cond))
+        return typename MacroAssemblerType::TrustedImm32(static_cast(value.m_value));
+    return typename MacroAssemblerType::TrustedImm32(static_cast(value.m_value));
+}
+
+template
+inline typename MacroAssemblerType::TrustedImm32 mask8OnCondition(MacroAssemblerType&, typename MacroAssemblerType::ResultCondition cond, typename MacroAssemblerType::TrustedImm32 value)
+{
+    if (isUnsigned(cond))
+        return typename MacroAssemblerType::TrustedImm32(static_cast(value.m_value));
+    ASSERT_WITH_MESSAGE(cond != MacroAssemblerType::Overflow, "Overflow is not used for 8bit test operations.");
+    ASSERT(isSigned(cond));
+    return typename MacroAssemblerType::TrustedImm32(static_cast(value.m_value));
+}
+
+template
+void load8OnCondition(MacroAssemblerType& jit, Condition cond, Args... args)
+{
+    if (isUnsigned(cond))
+        return jit.load8(std::forward(args)...);
+    return jit.load8SignedExtendTo32(std::forward(args)...);
+}
+
+} } // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
index a30247d33..8d0ac915a 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
  * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -24,8 +24,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef MacroAssemblerMIPS_h
-#define MacroAssemblerMIPS_h
+#pragma once
 
 #if ENABLE(ASSEMBLER) && CPU(MIPS)
 
@@ -34,7 +33,7 @@
 
 namespace JSC {
 
-class MacroAssemblerMIPS : public AbstractMacroAssembler {
+class MacroAssemblerMIPS : public AbstractMacroAssembler {
 public:
     typedef MIPSRegisters::FPRegisterID FPRegisterID;
 
@@ -55,9 +54,9 @@ public:
     // For storing data loaded from the memory
     static const RegisterID dataTempRegister = MIPSRegisters::t1;
     // For storing address base
-    static const RegisterID addrTempRegister = MIPSRegisters::t2;
+    static const RegisterID addrTempRegister = MIPSRegisters::t7;
     // For storing compare result
-    static const RegisterID cmpTempRegister = MIPSRegisters::t3;
+    static const RegisterID cmpTempRegister = MIPSRegisters::t8;
 
     // FP temp register
     static const FPRegisterID fpTempRegister = MIPSRegisters::f16;
@@ -289,7 +288,7 @@ public:
     {
         if (!imm.m_value && !m_fixedWidth)
             move(MIPSRegisters::zero, dest);
-        else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth)
+        else if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth)
             m_assembler.andi(dest, dest, imm.m_value);
         else {
             /*
@@ -305,7 +304,7 @@ public:
     {
         if (!imm.m_value && !m_fixedWidth)
             move(MIPSRegisters::zero, dest);
-        else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth)
+        else if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth)
             m_assembler.andi(dest, src, imm.m_value);
         else {
             move(imm, immTempRegister);
@@ -313,6 +312,15 @@ public:
         }
     }
 
+    void countLeadingZeros32(RegisterID src, RegisterID dest)
+    {
+#if WTF_MIPS_ISA_AT_LEAST(32)
+        m_assembler.clz(dest, src);
+#else
+        static_assert(false, "CLZ opcode is not available for this ISA");
+#endif
+    }
+
     void lshift32(RegisterID shiftAmount, RegisterID dest)
     {
         m_assembler.sllv(dest, dest, shiftAmount);
@@ -376,12 +384,23 @@ public:
         m_assembler.orInsn(dest, op1, op2);
     }
 
+    void or32(TrustedImm32 imm, AbsoluteAddress dest)
+    {
+        if (!imm.m_value && !m_fixedWidth)
+            return;
+
+        // TODO: Swap dataTempRegister and immTempRegister usage
+        load32(dest.m_ptr, immTempRegister);
+        or32(imm, immTempRegister);
+        store32(immTempRegister, dest.m_ptr);
+    }
+
     void or32(TrustedImm32 imm, RegisterID dest)
     {
         if (!imm.m_value && !m_fixedWidth)
             return;
 
-        if (imm.m_value > 0 && imm.m_value < 65535
+        if (imm.m_value > 0 && imm.m_value <= 65535
             && !m_fixedWidth) {
             m_assembler.ori(dest, dest, imm.m_value);
             return;
@@ -397,10 +416,12 @@ public:
 
     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
-        if (!imm.m_value && !m_fixedWidth)
+        if (!imm.m_value && !m_fixedWidth) {
+            move(src, dest);
             return;
+        }
 
-        if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth) {
+        if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) {
             m_assembler.ori(dest, src, imm.m_value);
             return;
         }
@@ -619,10 +640,28 @@ public:
     {
         m_assembler.sqrtd(dst, src);
     }
-    
-    void absDouble(FPRegisterID, FPRegisterID)
+
+    void absDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.absd(dst, src);
+    }
+
+    NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID)
     {
-        RELEASE_ASSERT_NOT_REACHED();
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
     }
 
     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
@@ -707,7 +746,24 @@ public:
         m_assembler.lbu(dest, addrTempRegister, 0);
     }
 
-    void load8Signed(BaseIndex address, RegisterID dest)
+    void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth)
+            m_assembler.lb(dest, address.base, address.offset);
+        else {
+            /*
+                lui     addrTemp, (offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, base
+                lb      dest, (offset & 0xffff)(addrTemp)
+              */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lb(dest, addrTempRegister, address.offset);
+        }
+    }
+
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
         if (address.offset >= -32768 && address.offset <= 32767
             && !m_fixedWidth) {
@@ -735,6 +791,22 @@ public:
         }
     }
 
+    ALWAYS_INLINE void load8SignedExtendTo32(AbsoluteAddress address, RegisterID dest)
+    {
+        load8SignedExtendTo32(address.m_ptr, dest);
+    }
+
+    void load8SignedExtendTo32(const void* address, RegisterID dest)
+    {
+        /*
+            li  addrTemp, address
+            lb  dest, 0(addrTemp)
+        */
+        move(TrustedImmPtr(address), addrTempRegister);
+        m_assembler.lb(dest, addrTempRegister, 0);
+    }
+
+
     void load32(ImplicitAddress address, RegisterID dest)
     {
         if (address.offset >= -32768 && address.offset <= 32767
@@ -782,7 +854,53 @@ public:
 
     void load16Unaligned(BaseIndex address, RegisterID dest)
     {
-        load16(address, dest);
+        if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) {
+            /*
+                sll     addrtemp, address.index, address.scale
+                addu    addrtemp, addrtemp, address.base
+                lbu     immTemp, address.offset+x(addrtemp) (x=0 for LE, x=1 for BE)
+                lbu     dest, address.offset+x(addrtemp)    (x=1 for LE, x=0 for BE)
+                sll     dest, dest, 8
+                or      dest, dest, immTemp
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+#if CPU(BIG_ENDIAN)
+            m_assembler.lbu(immTempRegister, addrTempRegister, address.offset + 1);
+            m_assembler.lbu(dest, addrTempRegister, address.offset);
+#else
+            m_assembler.lbu(immTempRegister, addrTempRegister, address.offset);
+            m_assembler.lbu(dest, addrTempRegister, address.offset + 1);
+#endif
+            m_assembler.sll(dest, dest, 8);
+            m_assembler.orInsn(dest, dest, immTempRegister);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, address.offset >> 16
+                ori     immTemp, immTemp, address.offset & 0xffff
+                addu    addrTemp, addrTemp, immTemp
+                lbu     immTemp, x(addrtemp) (x=0 for LE, x=1 for BE)
+                lbu     dest, x(addrtemp)    (x=1 for LE, x=0 for BE)
+                sll     dest, dest, 8
+                or      dest, dest, immTemp
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, address.offset >> 16);
+            m_assembler.ori(immTempRegister, immTempRegister, address.offset);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+#if CPU(BIG_ENDIAN)
+            m_assembler.lbu(immTempRegister, addrTempRegister, 1);
+            m_assembler.lbu(dest, addrTempRegister, 0);
+#else
+            m_assembler.lbu(immTempRegister, addrTempRegister, 0);
+            m_assembler.lbu(dest, addrTempRegister, 1);
+#endif
+            m_assembler.sll(dest, dest, 8);
+            m_assembler.orInsn(dest, dest, immTempRegister);
+        }
     }
 
     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
@@ -919,7 +1037,7 @@ public:
         }
     }
 
-    void load16Signed(BaseIndex address, RegisterID dest)
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
         if (address.offset >= -32768 && address.offset <= 32767
             && !m_fixedWidth) {
@@ -1005,16 +1123,45 @@ public:
             li  addrTemp, address
             sb  src, 0(addrTemp)
         */
-        if (!imm.m_value && !m_fixedWidth) {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        if (!imm8.m_value && !m_fixedWidth) {
             move(TrustedImmPtr(address), addrTempRegister);
             m_assembler.sb(MIPSRegisters::zero, addrTempRegister, 0);
         } else {
-            move(imm, immTempRegister);
+            move(imm8, immTempRegister);
             move(TrustedImmPtr(address), addrTempRegister);
             m_assembler.sb(immTempRegister, addrTempRegister, 0);
         }
     }
 
+    void store8(TrustedImm32 imm, ImplicitAddress address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            if (!imm8.m_value)
+                m_assembler.sb(MIPSRegisters::zero, address.base, address.offset);
+            else {
+                move(imm8, immTempRegister);
+                m_assembler.sb(immTempRegister, address.base, address.offset);
+            }
+        } else {
+            /*
+                lui     addrTemp, (offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, base
+                sb      immTemp, (offset & 0xffff)(addrTemp)
+              */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            if (!imm8.m_value && !m_fixedWidth)
+                m_assembler.sb(MIPSRegisters::zero, addrTempRegister, address.offset);
+            else {
+                move(imm8, immTempRegister);
+                m_assembler.sb(immTempRegister, addrTempRegister, address.offset);
+            }
+        }
+    }
+
     void store16(RegisterID src, BaseIndex address)
     {
         if (address.offset >= -32768 && address.offset <= 32767
@@ -1208,7 +1355,17 @@ public:
         return false;
 #endif
     }
-    static bool supportsFloatingPointAbs() { return false; }
+
+    static bool supportsFloatingPointAbs()
+    {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+        return true;
+#else
+        return false;
+#endif
+    }
+
+    static bool supportsFloatingPointRounding() { return false; }
 
     // Stack manipulation operations:
     //
@@ -1224,6 +1381,13 @@ public:
         m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 4);
     }
 
+    void popPair(RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.lw(dest1, MIPSRegisters::sp, 0);
+        m_assembler.lw(dest2, MIPSRegisters::sp, 4);
+        m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 8);
+    }
+
     void push(RegisterID src)
     {
         m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -4);
@@ -1242,6 +1406,13 @@ public:
         push(immTempRegister);
     }
 
+    void pushPair(RegisterID src1, RegisterID src2)
+    {
+        m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -8);
+        m_assembler.sw(src2, MIPSRegisters::sp, 4);
+        m_assembler.sw(src1, MIPSRegisters::sp, 0);
+    }
+
     // Register move operations:
     //
     // Move values in registers.
@@ -1307,38 +1478,35 @@ public:
 
     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
     {
-        // Make sure the immediate value is unsigned 8 bits.
-        ASSERT(!(right.m_value & 0xFFFFFF00));
-        load8(left, dataTempRegister);
-        move(right, immTempRegister);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister);
+        move(right8, immTempRegister);
         return branch32(cond, dataTempRegister, immTempRegister);
     }
 
     Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
     {
-        // Make sure the immediate value is unsigned 8 bits.
-        ASSERT(!(right.m_value & 0xFFFFFF00));
-        load8(left, dataTempRegister);
-        move(right, immTempRegister);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister);
+        move(right8, immTempRegister);
         return branch32(cond, dataTempRegister, immTempRegister);
     }
 
     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
     {
-        // Make sure the immediate value is unsigned 8 bits.
-        ASSERT(!(right.m_value & 0xFFFFFF00));
-        load8(left, dataTempRegister);
-        move(right, immTempRegister);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister);
+        move(right8, immTempRegister);
         compare32(cond, dataTempRegister, immTempRegister, dest);
     }
 
     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
     {
-        ASSERT(!(right.m_value & 0xFFFFFF00));
-        load8(left, dataTempRegister);
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister);
         // Be careful that the previous load8() uses immTempRegister.
         // So, we need to put move() after load8().
-        move(right, immTempRegister);
+        move(right8, immTempRegister);
         return branch32(cond, dataTempRegister, immTempRegister);
     }
 
@@ -1444,20 +1612,36 @@ public:
 
     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
     {
-        ASSERT((cond == Zero) || (cond == NonZero));
+        ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
         m_assembler.andInsn(cmpTempRegister, reg, mask);
-        if (cond == Zero)
+        switch (cond) {
+        case Zero:
             return branchEqual(cmpTempRegister, MIPSRegisters::zero);
-        return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        case NonZero:
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        case Signed:
+            m_assembler.slt(cmpTempRegister, cmpTempRegister, MIPSRegisters::zero);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
     }
 
     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
     {
-        ASSERT((cond == Zero) || (cond == NonZero));
+        ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
         if (mask.m_value == -1 && !m_fixedWidth) {
-            if (cond == Zero)
+            switch (cond) {
+            case Zero:
                 return branchEqual(reg, MIPSRegisters::zero);
-            return branchNotEqual(reg, MIPSRegisters::zero);
+            case NonZero:
+                return branchNotEqual(reg, MIPSRegisters::zero);
+            case Signed:
+                m_assembler.slt(cmpTempRegister, reg, MIPSRegisters::zero);
+                return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
         }
         move(mask, immTempRegister);
         return branchTest32(cond, reg, immTempRegister);
@@ -1477,21 +1661,24 @@ public:
 
     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        load8(address, dataTempRegister);
-        return branchTest32(cond, dataTempRegister, mask);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister);
+        return branchTest32(cond, dataTempRegister, mask8);
     }
 
     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        load8(address, dataTempRegister);
-        return branchTest32(cond, dataTempRegister, mask);
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister);
+        return branchTest32(cond, dataTempRegister, mask8);
     }
 
     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
         move(TrustedImmPtr(address.m_ptr), dataTempRegister);
-        load8(Address(dataTempRegister), dataTempRegister);
-        return branchTest32(cond, dataTempRegister, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(dataTempRegister), dataTempRegister);
+        return branchTest32(cond, dataTempRegister, mask8);
     }
 
     Jump jump()
@@ -1657,6 +1844,12 @@ public:
         return branchAdd32(cond, immTempRegister, dest);
     }
 
+    Jump branchAdd32(ResultCondition cond, Address address, RegisterID dest)
+    {
+        load32(address, immTempRegister);
+        return branchAdd32(cond, immTempRegister, dest);
+    }
+
     Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
         move(imm, immTempRegister);
@@ -1819,7 +2012,7 @@ public:
         return Jump();
     }
 
-    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
         move(imm, immTempRegister);
         return branchMul32(cond, immTempRegister, src, dest);
@@ -1975,6 +2168,16 @@ public:
         return Call(m_assembler.label(), Call::LinkableNear);
     }
 
+    Call nearTailCall()
+    {
+        m_assembler.nop();
+        m_assembler.nop();
+        m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 0);
+        m_assembler.nop();
+        insertRelaxationWords();
+        return Call(m_assembler.label(), Call::LinkableNearTail);
+    }
+
     Call call()
     {
         m_assembler.lui(MIPSRegisters::t9, 0);
@@ -2048,14 +2251,15 @@ public:
     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
     {
         ASSERT((cond == Zero) || (cond == NonZero));
-        load8(address, dataTempRegister);
-        if (mask.m_value == -1 && !m_fixedWidth) {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister);
+        if ((mask8.m_value & 0xff) == 0xff && !m_fixedWidth) {
             if (cond == Zero)
                 m_assembler.sltiu(dest, dataTempRegister, 1);
             else
                 m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
         } else {
-            move(mask, immTempRegister);
+            move(mask8, immTempRegister);
             m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
             if (cond == Zero)
                 m_assembler.sltiu(dest, cmpTempRegister, 1);
@@ -2120,6 +2324,16 @@ public:
         return temp;
     }
 
+    Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        m_fixedWidth = true;
+        load32(left, dataTempRegister);
+        dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+        Jump temp = branch32(cond, dataTempRegister, immTempRegister);
+        m_fixedWidth = false;
+        return temp;
+    }
+
     DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
     {
         m_fixedWidth = true;
@@ -2268,7 +2482,7 @@ public:
 #endif
     }
 
-    void loadDouble(const void* address, FPRegisterID dest)
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
     {
 #if WTF_MIPS_ISA(1)
         /*
@@ -2276,7 +2490,7 @@ public:
             lwc1        dest, 0(addrTemp)
             lwc1        dest+1, 4(addrTemp)
          */
-        move(TrustedImmPtr(address), addrTempRegister);
+        move(address, addrTempRegister);
         m_assembler.lwc1(dest, addrTempRegister, 0);
         m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
 #else
@@ -2284,7 +2498,7 @@ public:
             li          addrTemp, address
             ldc1        dest, 0(addrTemp)
         */
-        move(TrustedImmPtr(address), addrTempRegister);
+        move(address, addrTempRegister);
         m_assembler.ldc1(dest, addrTempRegister, 0);
 #endif
     }
@@ -2406,14 +2620,14 @@ public:
 #endif
     }
 
-    void storeDouble(FPRegisterID src, const void* address)
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
     {
 #if WTF_MIPS_ISA(1)
-        move(TrustedImmPtr(address), addrTempRegister);
+        move(address, addrTempRegister);
         m_assembler.swc1(src, addrTempRegister, 0);
         m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
 #else
-        move(TrustedImmPtr(address), addrTempRegister);
+        move(address, addrTempRegister);
         m_assembler.sdc1(src, addrTempRegister, 0);
 #endif
     }
@@ -2424,6 +2638,11 @@ public:
             m_assembler.movd(dest, src);
     }
 
+    void moveZeroToDouble(FPRegisterID reg)
+    {
+        convertInt32ToDouble(MIPSRegisters::zero, reg);
+    }
+
     void swapDouble(FPRegisterID fr1, FPRegisterID fr2)
     {
         moveDouble(fr1, fpTempRegister);
@@ -2449,7 +2668,7 @@ public:
 
     void addDouble(AbsoluteAddress address, FPRegisterID dest)
     {
-        loadDouble(address.m_ptr, fpTempRegister);
+        loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
         m_assembler.addd(dest, dest, fpTempRegister);
     }
 
@@ -2656,7 +2875,7 @@ public:
     {
         m_assembler.truncwd(fpTempRegister, src);
         m_assembler.mfc1(dest, fpTempRegister);
-        return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0));
+        return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0x7fffffff));
     }
 
     // Result is undefined if the value is outside of the integer range.
@@ -2740,6 +2959,18 @@ public:
         m_assembler.sync();
     }
 
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), dataTempRegister);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm32(misc), immTempRegister);
+        abortWithReason(reason);
+    }
+
     static FunctionPtr readCallTarget(CodeLocationCall call)
     {
         return FunctionPtr(reinterpret_cast(MIPSAssembler::readCallTarget(call.dataLocation())));
@@ -2757,6 +2988,13 @@ public:
     }
 
     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
 
     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
     {
@@ -2774,23 +3012,14 @@ public:
         return CodeLocationLabel();
     }
 
-    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
     {
         UNREACHABLE_FOR_PLATFORM();
     }
 
-
-private:
-    // If m_fixedWidth is true, we will generate a fixed number of instructions.
-    // Otherwise, we can emit any number of instructions.
-    bool m_fixedWidth;
-
-    friend class LinkBuffer;
-    friend class RepatchBuffer;
-
-    static void linkCall(void* code, Call call, FunctionPtr function)
+    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
     {
-        MIPSAssembler::linkCall(code, call.m_label, function.value());
+        UNREACHABLE_FOR_PLATFORM();
     }
 
     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
@@ -2803,10 +3032,23 @@ private:
         MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
     }
 
+private:
+    // If m_fixedWidth is true, we will generate a fixed number of instructions.
+    // Otherwise, we can emit any number of instructions.
+    bool m_fixedWidth;
+
+    friend class LinkBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        if (call.isFlagSet(Call::Tail))
+            MIPSAssembler::linkJump(code, call.m_label, function.value());
+        else
+            MIPSAssembler::linkCall(code, call.m_label, function.value());
+    }
+
 };
 
-}
+} // namespace JSC
 
 #endif // ENABLE(ASSEMBLER) && CPU(MIPS)
-
-#endif // MacroAssemblerMIPS_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.cpp
new file mode 100644
index 000000000..c6c175752
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MacroAssemblerPrinter.h"
+
+#if ENABLE(MASM_PROBE)
+
+#include "MacroAssembler.h"
+
+namespace JSC {
+
+using CPUState = MacroAssembler::CPUState;
+using ProbeContext = MacroAssembler::ProbeContext;
+using RegisterID = MacroAssembler::RegisterID;
+using FPRegisterID = MacroAssembler::FPRegisterID;
+
+static void printIndent(int indentation)
+{
+    for (; indentation > 0; indentation--)
+        dataLog("    ");
+}
+
+#define INDENT printIndent(indentation)
+    
+void printCPU(CPUState& cpu, int indentation)
+{
+    INDENT, dataLog("cpu: {\n");
+    printCPURegisters(cpu, indentation + 1);
+    INDENT, dataLog("}\n");
+}
+
+void printCPURegisters(CPUState& cpu, int indentation)
+{
+#if USE(JSVALUE32_64)
+    #define INTPTR_HEX_VALUE_FORMAT "0x%08lx"
+#else
+    #define INTPTR_HEX_VALUE_FORMAT "0x%016lx"
+#endif
+
+    #define PRINT_GPREGISTER(_type, _regName) { \
+        intptr_t value = reinterpret_cast(cpu._regName); \
+        INDENT, dataLogF("%6s: " INTPTR_HEX_VALUE_FORMAT "  %ld\n", #_regName, value, value) ; \
+    }
+    FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER)
+    FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER)
+    #undef PRINT_GPREGISTER
+    #undef INTPTR_HEX_VALUE_FORMAT
+    
+    #define PRINT_FPREGISTER(_type, _regName) { \
+        uint64_t* u = reinterpret_cast(&cpu._regName); \
+        double* d = reinterpret_cast(&cpu._regName); \
+        INDENT, dataLogF("%6s: 0x%016llx  %.13g\n", #_regName, *u, *d); \
+    }
+    FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER)
+    #undef PRINT_FPREGISTER
+}
+
+static void printPC(CPUState& cpu)
+{
+    union {
+        void* voidPtr;
+        intptr_t intptrValue;
+    } u;
+#if CPU(X86) || CPU(X86_64)
+    u.voidPtr = cpu.eip;
+#elif CPU(ARM_TRADITIONAL) || CPU(ARM_THUMB2) || CPU(ARM64)
+    u.voidPtr = cpu.pc;
+#else
+#error "Unsupported CPU"
+#endif
+    dataLogF("pc:<%p %ld>", u.voidPtr, u.intptrValue);
+}
+
+void printRegister(CPUState& cpu, RegisterID regID)
+{
+    const char* name = CPUState::gprName(regID);
+    union {
+        void* voidPtr;
+        intptr_t intptrValue;
+    } u;
+    u.voidPtr = cpu.gpr(regID);
+    dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue);
+}
+
+void printRegister(CPUState& cpu, FPRegisterID regID)
+{
+    const char* name = CPUState::fprName(regID);
+    union {
+        double doubleValue;
+        uint64_t uint64Value;
+    } u;
+    u.doubleValue = cpu.fpr(regID);
+    dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue);
+}
+
+void printMemory(CPUState& cpu, const Memory& memory)
+{
+    uint8_t* ptr = nullptr;
+    switch (memory.addressType) {
+    case Memory::AddressType::Address: {
+        ptr = reinterpret_cast(cpu.gpr(memory.u.address.base));
+        ptr += memory.u.address.offset;
+        break;
+    }
+    case Memory::AddressType::AbsoluteAddress: {
+        ptr = reinterpret_cast(const_cast(memory.u.absoluteAddress.m_ptr));
+        break;
+    }
+    }
+
+    if (memory.dumpStyle == Memory::SingleWordDump) {
+        if (memory.numBytes == sizeof(int8_t)) {
+            auto p = reinterpret_cast(ptr);
+            dataLogF("%p:<0x%02x %d>", p, *p, *p);
+            return;
+        }
+        if (memory.numBytes == sizeof(int16_t)) {
+            auto p = reinterpret_cast(ptr);
+            dataLogF("%p:<0x%04x %d>", p, *p, *p);
+            return;
+        }
+        if (memory.numBytes == sizeof(int32_t)) {
+            auto p = reinterpret_cast(ptr);
+            dataLogF("%p:<0x%08x %d>", p, *p, *p);
+            return;
+        }
+        if (memory.numBytes == sizeof(int64_t)) {
+            auto p = reinterpret_cast(ptr);
+            dataLogF("%p:<0x%016llx %lld>", p, *p, *p);
+            return;
+        }
+        // Else, unknown word size. Fall thru and dump in the generic way.
+    }
+
+    // Generic dump: dump rows of 16 bytes in 4 byte groupings.
+    size_t numBytes = memory.numBytes;
+    for (size_t i = 0; i < numBytes; i++) {
+        if (!(i % 16))
+            dataLogF("%p: ", &ptr[i]);
+        else if (!(i % 4))
+            dataLog(" ");
+
+        dataLogF("%02x", ptr[i]);
+
+        if (i % 16 == 15)
+            dataLog("\n");
+    }
+    if (numBytes % 16 < 15)
+        dataLog("\n");
+}
+
+void MacroAssemblerPrinter::printCallback(ProbeContext* context)
+{
+    typedef PrintArg Arg;
+    PrintArgsList& argsList =
+    *reinterpret_cast(context->arg1);
+    for (size_t i = 0; i < argsList.size(); i++) {
+        auto& arg = argsList[i];
+        switch (arg.type) {
+        case Arg::Type::AllRegisters:
+            printCPU(context->cpu, 1);
+            break;
+        case Arg::Type::PCRegister:
+            printPC(context->cpu);
+            break;
+        case Arg::Type::RegisterID:
+            printRegister(context->cpu, arg.u.gpRegisterID);
+            break;
+        case Arg::Type::FPRegisterID:
+            printRegister(context->cpu, arg.u.fpRegisterID);
+            break;
+        case Arg::Type::Memory:
+            printMemory(context->cpu, arg.u.memory);
+            break;
+        case Arg::Type::ConstCharPtr:
+            dataLog(arg.u.constCharPtr);
+            break;
+        case Arg::Type::ConstVoidPtr:
+            dataLogF("%p", arg.u.constVoidPtr);
+            break;
+        case Arg::Type::IntptrValue:
+            dataLog(arg.u.intptrValue);
+            break;
+        case Arg::Type::UintptrValue:
+            dataLog(arg.u.uintptrValue);
+            break;
+        }
+    }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(MASM_PROBE)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.h b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.h
new file mode 100644
index 000000000..bbce7ee58
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(MASM_PROBE)
+
+#include "MacroAssembler.h"
+
+namespace JSC {
+
+// What is MacroAssembler::print()?
+// ===============================
+// The MacroAsssembler::print() makes it easy to add print logging
+// from JIT compiled code, and can be used to print all types of values
+// at runtime e.g. CPU register values being operated on by the compiled
+// code.
+//
+// print() is built on top of MacroAsssembler::probe(), and hence
+// inserting logging in JIT compiled code will not perturb register values.
+// The only register value that is perturbed is the PC (program counter)
+// since there is now more compiled code to do the printing.
+//
+// How to use the MacroAssembler print()?
+// =====================================
+// 1. #include "MacroAssemblerPrinter.h" in the JIT file where you want to use print().
+//
+// 2. Add print() calls like these in your JIT code:
+//
+//      jit.print("Hello world\n"); // Emits code to print the string.
+//
+//      CodeBlock* cb = ...;
+//      jit.print(cb, "\n");        // Emits code to print the pointer value.
+//
+//      RegisterID regID = ...;
+//      jit.print(regID, "\n");     // Emits code to print the register value (not the id).
+//
+//      // Emits code to print all registers. Unlike other items, this prints
+//      // multiple lines as follows:
+//      //      cpu {
+//      //          eax: 0x123456789
+//      //          ebx: 0x000000abc
+//      //          ...
+//      //      }
+//      jit.print(AllRegisters());
+//
+//      jit.print(MemWord(regID), "\n");   // Emits code to print a byte pointed to by the register.
+//      jit.print(MemWord(regID), "\n");  // Emits code to print a 32-bit word pointed to by the register.
+//
+//      jit.print(MemWord(Address(regID, 23), "\n");     // Emits code to print a byte at the address.
+//      jit.print(MemWord(AbsoluteAddress(&cb), "\n");  // Emits code to print an intptr_t sized word at the address.
+//
+//      jit.print(Memory(reg, 100), "\n");              // Emits code to print a 100 bytes at the address pointed by the register.
+//      jit.print(Memory(Address(reg, 4), 100), "\n");  // Emits code to print a 100 bytes at the address.
+//
+//      // Print multiple things at once. This incurs the probe overhead only once
+//      // to print all the items.
+//      jit.print("cb:", cb, " regID:", regID, " cpu:\n", AllRegisters());
+//
+//   The type of values that can be printed is encapsulated in the PrintArg struct below.
+//
+//   Note: print() does not automatically insert a '\n' at the end of the line.
+//   If you want a '\n', you'll have to add it explicitly (as in the examples above).
+
+
+// This is a marker type only used with MacroAssemblerPrinter::print().
+// See MacroAssemblerPrinter::print() below for details.
+struct AllRegisters { };
+struct PCRegister { };
+
+struct Memory {
+    using Address = MacroAssembler::Address;
+    using AbsoluteAddress = MacroAssembler::AbsoluteAddress;
+    using RegisterID = MacroAssembler::RegisterID;
+
+    enum class AddressType {
+        Address,
+        AbsoluteAddress,
+    };
+
+    enum DumpStyle {
+        SingleWordDump,
+        GenericDump,
+    };
+
+    Memory(RegisterID& reg, size_t bytes, DumpStyle style = GenericDump)
+        : addressType(AddressType::Address)
+        , dumpStyle(style)
+        , numBytes(bytes)
+    {
+        u.address = Address(reg, 0);
+    }
+
+    Memory(const Address& address, size_t bytes, DumpStyle style = GenericDump)
+        : addressType(AddressType::Address)
+        , dumpStyle(style)
+        , numBytes(bytes)
+    {
+        u.address = address;
+    }
+
+    Memory(const AbsoluteAddress& address, size_t bytes, DumpStyle style = GenericDump)
+        : addressType(AddressType::AbsoluteAddress)
+        , dumpStyle(style)
+        , numBytes(bytes)
+    {
+        u.absoluteAddress = address;
+    }
+
+    AddressType addressType;
+    DumpStyle dumpStyle;
+    size_t numBytes;
+    union UnionedAddress {
+        UnionedAddress() { }
+
+        Address address;
+        AbsoluteAddress absoluteAddress;
+    } u;
+};
+
+template 
+struct MemWord : public Memory {
+    MemWord(RegisterID& reg)
+        : Memory(reg, sizeof(IntType), Memory::SingleWordDump)
+    { }
+
+    MemWord(const Address& address)
+        : Memory(address, sizeof(IntType), Memory::SingleWordDump)
+    { }
+
+    MemWord(const AbsoluteAddress& address)
+        : Memory(address, sizeof(IntType), Memory::SingleWordDump)
+    { }
+};
+
+
+class MacroAssemblerPrinter {
+    using CPUState = MacroAssembler::CPUState;
+    using ProbeContext = MacroAssembler::ProbeContext;
+    using RegisterID = MacroAssembler::RegisterID;
+    using FPRegisterID = MacroAssembler::FPRegisterID;
+    
+public:
+    template
+    static void print(MacroAssembler* masm, Arguments... args)
+    {
+        auto argsList = std::make_unique();
+        appendPrintArg(argsList.get(), args...);
+        masm->probe(printCallback, argsList.release(), 0);
+    }
+    
+private:
+    struct PrintArg {
+
+        enum class Type {
+            AllRegisters,
+            PCRegister,
+            RegisterID,
+            FPRegisterID,
+            Memory,
+            ConstCharPtr,
+            ConstVoidPtr,
+            IntptrValue,
+            UintptrValue,
+        };
+        
+        PrintArg(AllRegisters&)
+            : type(Type::AllRegisters)
+        {
+        }
+        
+        PrintArg(PCRegister&)
+            : type(Type::PCRegister)
+        {
+        }
+        
+        PrintArg(RegisterID regID)
+            : type(Type::RegisterID)
+        {
+            u.gpRegisterID = regID;
+        }
+        
+        PrintArg(FPRegisterID regID)
+            : type(Type::FPRegisterID)
+        {
+            u.fpRegisterID = regID;
+        }
+
+        PrintArg(const Memory& memory)
+            : type(Type::Memory)
+        {
+            u.memory = memory;
+        }
+
+        PrintArg(const char* ptr)
+            : type(Type::ConstCharPtr)
+        {
+            u.constCharPtr = ptr;
+        }
+        
+        PrintArg(const void* ptr)
+            : type(Type::ConstVoidPtr)
+        {
+            u.constVoidPtr = ptr;
+        }
+        
+        PrintArg(int value)
+            : type(Type::IntptrValue)
+        {
+            u.intptrValue = value;
+        }
+        
+        PrintArg(unsigned value)
+            : type(Type::UintptrValue)
+        {
+            u.intptrValue = value;
+        }
+        
+        PrintArg(intptr_t value)
+            : type(Type::IntptrValue)
+        {
+            u.intptrValue = value;
+        }
+        
+        PrintArg(uintptr_t value)
+            : type(Type::UintptrValue)
+        {
+            u.uintptrValue = value;
+        }
+        
+        Type type;
+        union Value {
+            Value() { }
+
+            RegisterID gpRegisterID;
+            FPRegisterID fpRegisterID;
+            Memory memory;
+            const char* constCharPtr;
+            const void* constVoidPtr;
+            intptr_t intptrValue;
+            uintptr_t uintptrValue;
+        } u;
+    };
+
+    typedef Vector PrintArgsList;
+    
+    template
+    static void appendPrintArg(PrintArgsList* argsList, FirstArg& firstArg, Arguments... otherArgs)
+    {
+        argsList->append(PrintArg(firstArg));
+        appendPrintArg(argsList, otherArgs...);
+    }
+    
+    static void appendPrintArg(PrintArgsList*) { }
+
+private:
+    static void printCallback(ProbeContext*);
+};
+
+template
+void MacroAssembler::print(Arguments... args)
+{
+    MacroAssemblerPrinter::print(this, args...);
+}
+
+
+// These printers will print a block of information. That block may be
+// indented with the specified indentation.
+void printCPU(MacroAssembler::CPUState&, int indentation = 0);
+void printCPURegisters(MacroAssembler::CPUState&, int indentation = 0);
+
+// These printers will print the specified information in line in the
+// print stream. Hence, no indentation will be applied.
+void printRegister(MacroAssembler::CPUState&, MacroAssembler::RegisterID);
+void printRegister(MacroAssembler::CPUState&, MacroAssembler::FPRegisterID);
+void printMemory(MacroAssembler::CPUState&, const Memory&);
+
+} // namespace JSC
+
+#endif // ENABLE(MASM_PROBE)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h b/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
deleted file mode 100644
index 32ea2b1b5..000000000
--- a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
+++ /dev/null
@@ -1,2543 +0,0 @@
-/*
- * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
- * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef MacroAssemblerSH4_h
-#define MacroAssemblerSH4_h
-
-#if ENABLE(ASSEMBLER) && CPU(SH4)
-
-#include "SH4Assembler.h"
-#include "AbstractMacroAssembler.h"
-#include 
-
-namespace JSC {
-
-class MacroAssemblerSH4 : public AbstractMacroAssembler {
-public:
-    typedef SH4Assembler::FPRegisterID FPRegisterID;
-
-    static const Scale ScalePtr = TimesFour;
-    static const FPRegisterID fscratch = SH4Registers::dr10;
-    static const RegisterID stackPointerRegister = SH4Registers::sp;
-    static const RegisterID framePointerRegister = SH4Registers::fp;
-    static const RegisterID linkRegister = SH4Registers::pr;
-    static const RegisterID scratchReg3 = SH4Registers::r13;
-
-    static const int MaximumCompactPtrAlignedAddressOffset = 60;
-
-    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
-    {
-        return (value >= 0) && (value <= MaximumCompactPtrAlignedAddressOffset) && (!(value & 3));
-    }
-
-    enum RelationalCondition {
-        Equal = SH4Assembler::EQ,
-        NotEqual = SH4Assembler::NE,
-        Above = SH4Assembler::HI,
-        AboveOrEqual = SH4Assembler::HS,
-        Below = SH4Assembler::LI,
-        BelowOrEqual = SH4Assembler::LS,
-        GreaterThan = SH4Assembler::GT,
-        GreaterThanOrEqual = SH4Assembler::GE,
-        LessThan = SH4Assembler::LT,
-        LessThanOrEqual = SH4Assembler::LE
-    };
-
-    enum ResultCondition {
-        Overflow = SH4Assembler::OF,
-        Signed = SH4Assembler::SI,
-        PositiveOrZero = SH4Assembler::NS,
-        Zero = SH4Assembler::EQ,
-        NonZero = SH4Assembler::NE
-    };
-
-    enum DoubleCondition {
-        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
-        DoubleEqual = SH4Assembler::EQ,
-        DoubleNotEqual = SH4Assembler::NE,
-        DoubleGreaterThan = SH4Assembler::GT,
-        DoubleGreaterThanOrEqual = SH4Assembler::GE,
-        DoubleLessThan = SH4Assembler::LT,
-        DoubleLessThanOrEqual = SH4Assembler::LE,
-        // If either operand is NaN, these conditions always evaluate to true.
-        DoubleEqualOrUnordered = SH4Assembler::EQU,
-        DoubleNotEqualOrUnordered = SH4Assembler::NEU,
-        DoubleGreaterThanOrUnordered = SH4Assembler::GTU,
-        DoubleGreaterThanOrEqualOrUnordered = SH4Assembler::GEU,
-        DoubleLessThanOrUnordered = SH4Assembler::LTU,
-        DoubleLessThanOrEqualOrUnordered = SH4Assembler::LEU,
-    };
-
-    RegisterID claimScratch()
-    {
-        return m_assembler.claimScratch();
-    }
-
-    void releaseScratch(RegisterID reg)
-    {
-        m_assembler.releaseScratch(reg);
-    }
-
-    static RelationalCondition invert(RelationalCondition cond)
-    {
-        switch (cond) {
-        case Equal:
-            return NotEqual;
-        case NotEqual:
-            return Equal;
-        case Above:
-            return BelowOrEqual;
-        case AboveOrEqual:
-            return Below;
-        case Below:
-            return AboveOrEqual;
-        case BelowOrEqual:
-            return Above;
-        case GreaterThan:
-            return LessThanOrEqual;
-        case GreaterThanOrEqual:
-            return LessThan;
-        case LessThan:
-            return GreaterThanOrEqual;
-        case LessThanOrEqual:
-            return GreaterThan;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-    }
-
-    // Integer arithmetic operations
-
-    void add32(RegisterID src, RegisterID dest)
-    {
-        m_assembler.addlRegReg(src, dest);
-    }
-
-    void add32(RegisterID src1, RegisterID src2, RegisterID dest)
-    {
-        if (src1 == dest)
-            add32(src2, dest);
-        else {
-            move(src2, dest);
-            add32(src1, dest);
-        }
-    }
-
-    void add32(TrustedImm32 imm, RegisterID dest)
-    {
-        if (!imm.m_value)
-            return;
-
-        if (m_assembler.isImmediate(imm.m_value)) {
-            m_assembler.addlImm8r(imm.m_value, dest);
-            return;
-        }
-
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(imm.m_value, scr);
-        m_assembler.addlRegReg(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
-    {
-        move(src, dest);
-        add32(imm, dest);
-    }
-
-    void add32(TrustedImm32 imm, Address address)
-    {
-        if (!imm.m_value)
-            return;
-
-        RegisterID scr = claimScratch();
-        load32(address, scr);
-        add32(imm, scr);
-        store32(scr, address);
-        releaseScratch(scr);
-    }
-
-    void add32(Address src, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        load32(src, scr);
-        m_assembler.addlRegReg(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void add32(AbsoluteAddress src, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        load32(src.m_ptr, scr);
-        m_assembler.addlRegReg(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void and32(RegisterID src, RegisterID dest)
-    {
-        m_assembler.andlRegReg(src, dest);
-    }
-
-    void and32(RegisterID src1, RegisterID src2, RegisterID dest)
-    {
-        if (src1 == dest)
-            and32(src2, dest);
-        else {
-            move(src2, dest);
-            and32(src1, dest);
-        }
-    }
-
-    void and32(Address src, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        load32(src, scr);
-        and32(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void and32(TrustedImm32 imm, RegisterID dest)
-    {
-        if (!imm.m_value) {
-            m_assembler.movImm8(0, dest);
-            return;
-        }
-
-        if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
-            m_assembler.andlImm8r(imm.m_value, dest);
-            return;
-        }
-
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(imm.m_value, scr);
-        m_assembler.andlRegReg(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
-    {
-        if (src != dest) {
-            move(imm, dest);
-            and32(src, dest);
-            return;
-        }
-
-        and32(imm, dest);
-    }
-
-    void lshift32(RegisterID shiftamount, RegisterID dest)
-    {
-        RegisterID shiftTmp = claimScratch();
-        m_assembler.loadConstant(0x1f, shiftTmp);
-        m_assembler.andlRegReg(shiftamount, shiftTmp);
-        m_assembler.shldRegReg(dest, shiftTmp);
-        releaseScratch(shiftTmp);
-    }
-
-    void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
-    {
-        move(src, dest);
-        lshift32(shiftAmount, dest);
-    }
-
-    void lshift32(TrustedImm32 imm, RegisterID dest)
-    {
-        int immMasked = imm.m_value & 0x1f;
-        if (!immMasked)
-            return;
-
-        if ((immMasked == 1) || (immMasked == 2) || (immMasked == 8) || (immMasked == 16)) {
-            m_assembler.shllImm8r(immMasked, dest);
-            return;
-        }
-
-        RegisterID shiftTmp = claimScratch();
-        m_assembler.loadConstant(immMasked, shiftTmp);
-        m_assembler.shldRegReg(dest, shiftTmp);
-        releaseScratch(shiftTmp);
-    }
-
-    void lshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
-    {
-        move(src, dest);
-        lshift32(shiftamount, dest);
-    }
-
-    void mul32(RegisterID src, RegisterID dest)
-    {
-        mul32(src, dest, dest);    
-    }
-
-    void mul32(RegisterID src1, RegisterID src2, RegisterID dest)
-    {
-        m_assembler.imullRegReg(src1, src2);
-        m_assembler.stsmacl(dest);
-    }
-
-    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
-    {
-        if (src == dest) {
-            RegisterID immval = claimScratch();
-            move(imm, immval);
-            mul32(immval, dest);
-            releaseScratch(immval);
-        } else {
-            move(imm, dest);
-            mul32(src, dest);
-        }
-    }
-
-    void or32(RegisterID src, RegisterID dest)
-    {
-        m_assembler.orlRegReg(src, dest);
-    }
-
-    void or32(TrustedImm32 imm, RegisterID dest)
-    {
-        if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
-            m_assembler.orlImm8r(imm.m_value, dest);
-            return;
-        }
-
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(imm.m_value, scr);
-        m_assembler.orlRegReg(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
-    {
-        if (op1 == op2)
-            move(op1, dest);
-        else if (op1 == dest)
-            or32(op2, dest);
-        else {
-            move(op2, dest);
-            or32(op1, dest);
-        }
-    }
-
-    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
-    {
-        if (src != dest) {
-            move(imm, dest);
-            or32(src, dest);
-            return;
-        }
-
-        or32(imm, dest);
-    }
-
-    void or32(RegisterID src, AbsoluteAddress address)
-    {
-        RegisterID destptr = claimScratch();
-        move(TrustedImmPtr(address.m_ptr), destptr);
-        RegisterID destval = claimScratch();
-        m_assembler.movlMemReg(destptr, destval);
-        m_assembler.orlRegReg(src, destval);
-        m_assembler.movlRegMem(destval, destptr);
-        releaseScratch(destval);
-        releaseScratch(destptr);
-    }
-
-    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
-    {
-        if (src != dest) {
-            move(imm, dest);
-            xor32(src, dest);
-            return;
-        }
-
-        xor32(imm, dest);
-    }
-
-    void rshift32(RegisterID shiftamount, RegisterID dest)
-    {
-        RegisterID shiftTmp = claimScratch();
-        m_assembler.loadConstant(0x1f, shiftTmp);
-        m_assembler.andlRegReg(shiftamount, shiftTmp);
-        m_assembler.neg(shiftTmp, shiftTmp);
-        m_assembler.shadRegReg(dest, shiftTmp);
-        releaseScratch(shiftTmp);
-    }
-
-    void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
-    {
-        move(src, dest);
-        rshift32(shiftAmount, dest);
-    }
-
-    void rshift32(TrustedImm32 imm, RegisterID dest)
-    {
-        int immMasked = imm.m_value & 0x1f;
-        if (!immMasked)
-            return;
-
-        if (immMasked == 1) {
-            m_assembler.sharImm8r(immMasked, dest);
-            return;
-        }
-
-        RegisterID shiftTmp = claimScratch();
-        m_assembler.loadConstant(-immMasked, shiftTmp);
-        m_assembler.shadRegReg(dest, shiftTmp);
-        releaseScratch(shiftTmp);
-    }
-
-    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
-    {
-        move(src, dest);
-        rshift32(imm, dest);
-    }
-
-    void sub32(RegisterID src, RegisterID dest)
-    {
-        m_assembler.sublRegReg(src, dest);
-    }
-
-    void sub32(TrustedImm32 imm, AbsoluteAddress address)
-    {
-        if (!imm.m_value)
-            return;
-
-        RegisterID result = claimScratch();
-        RegisterID scratchReg = claimScratch();
-
-        move(TrustedImmPtr(address.m_ptr), scratchReg);
-        m_assembler.movlMemReg(scratchReg, result);
-
-        if (m_assembler.isImmediate(-imm.m_value))
-            m_assembler.addlImm8r(-imm.m_value, result);
-        else {
-            m_assembler.loadConstant(imm.m_value, scratchReg3);
-            m_assembler.sublRegReg(scratchReg3, result);
-        }
-
-        store32(result, scratchReg);
-        releaseScratch(result);
-        releaseScratch(scratchReg);
-    }
-
-    void sub32(TrustedImm32 imm, Address address)
-    {
-        add32(TrustedImm32(-imm.m_value), address);
-    }
-
-    void add32(TrustedImm32 imm, AbsoluteAddress address)
-    {
-        if (!imm.m_value)
-            return;
-
-        RegisterID result = claimScratch();
-        RegisterID scratchReg = claimScratch();
-
-        move(TrustedImmPtr(address.m_ptr), scratchReg);
-        m_assembler.movlMemReg(scratchReg, result);
-
-        if (m_assembler.isImmediate(imm.m_value))
-            m_assembler.addlImm8r(imm.m_value, result);
-        else {
-            m_assembler.loadConstant(imm.m_value, scratchReg3);
-            m_assembler.addlRegReg(scratchReg3, result);
-        }
-
-        store32(result, scratchReg);
-        releaseScratch(result);
-        releaseScratch(scratchReg);
-    }
-
-    void add64(TrustedImm32 imm, AbsoluteAddress address)
-    {
-        RegisterID scr1 = claimScratch();
-        RegisterID scr2 = claimScratch();
-
-        // Add 32-bit LSB first.
-        move(TrustedImmPtr(address.m_ptr), scratchReg3);
-        m_assembler.movlMemReg(scratchReg3, scr1); // scr1 = 32-bit LSB of int64 @ address
-        m_assembler.loadConstant(imm.m_value, scr2);
-        m_assembler.clrt();
-        m_assembler.addclRegReg(scr1, scr2);
-        m_assembler.movlRegMem(scr2, scratchReg3); // Update address with 32-bit LSB result.
-
-        // Then add 32-bit MSB.
-        m_assembler.addlImm8r(4, scratchReg3);
-        m_assembler.movlMemReg(scratchReg3, scr1); // scr1 = 32-bit MSB of int64 @ address
-        m_assembler.movt(scr2);
-        if (imm.m_value < 0)
-            m_assembler.addlImm8r(-1, scr2); // Sign extend imm value if needed.
-        m_assembler.addvlRegReg(scr2, scr1);
-        m_assembler.movlRegMem(scr1, scratchReg3); // Update (address + 4) with 32-bit MSB result.
-
-        releaseScratch(scr2);
-        releaseScratch(scr1);
-    }
-
-    void sub32(TrustedImm32 imm, RegisterID dest)
-    {
-        if (!imm.m_value)
-            return;
-
-        if (m_assembler.isImmediate(-imm.m_value)) {
-            m_assembler.addlImm8r(-imm.m_value, dest);
-            return;
-        }
-
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(imm.m_value, scr);
-        m_assembler.sublRegReg(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void sub32(Address src, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        load32(src, scr);
-        m_assembler.sublRegReg(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void xor32(RegisterID src, RegisterID dest)
-    {
-        m_assembler.xorlRegReg(src, dest);
-    }
-
-    void xor32(RegisterID src1, RegisterID src2, RegisterID dest)
-    {
-        if (src1 == dest)
-            xor32(src2, dest);
-        else {
-            move(src2, dest);
-            xor32(src1, dest);
-        }
-    }
-
-    void xor32(TrustedImm32 imm, RegisterID srcDest)
-    {
-        if (imm.m_value == -1) {
-            m_assembler.notlReg(srcDest, srcDest);
-            return;
-        }
-
-        if ((srcDest != SH4Registers::r0) || (imm.m_value > 255) || (imm.m_value < 0)) {
-            RegisterID scr = claimScratch();
-            m_assembler.loadConstant(imm.m_value, scr);
-            m_assembler.xorlRegReg(scr, srcDest);
-            releaseScratch(scr);
-            return;
-        }
-
-        m_assembler.xorlImm8r(imm.m_value, srcDest);
-    }
-
-    void compare32(int imm, RegisterID dst, RelationalCondition cond)
-    {
-        if (((cond == Equal) || (cond == NotEqual)) && (dst == SH4Registers::r0) && m_assembler.isImmediate(imm)) {
-            m_assembler.cmpEqImmR0(imm, dst);
-            return;
-        }
-
-        if (((cond == Equal) || (cond == NotEqual)) && !imm) {
-            m_assembler.testlRegReg(dst, dst);
-            return;
-        }
-
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(imm, scr);
-        m_assembler.cmplRegReg(scr, dst, SH4Condition(cond));
-        releaseScratch(scr);
-    }
-
-    void compare32(int offset, RegisterID base, RegisterID left, RelationalCondition cond)
-    {
-        RegisterID scr = claimScratch();
-        if (!offset) {
-            m_assembler.movlMemReg(base, scr);
-            m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
-            releaseScratch(scr);
-            return;
-        }
-
-        if ((offset < 0) || (offset >= 64)) {
-            m_assembler.loadConstant(offset, scr);
-            m_assembler.addlRegReg(base, scr);
-            m_assembler.movlMemReg(scr, scr);
-            m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
-            releaseScratch(scr);
-            return;
-        }
-
-        m_assembler.movlMemReg(offset >> 2, base, scr);
-        m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
-        releaseScratch(scr);
-    }
-
-    void testImm(int imm, int offset, RegisterID base)
-    {
-        RegisterID scr = claimScratch();
-        load32(base, offset, scr);
-
-        RegisterID scr1 = claimScratch();
-        move(TrustedImm32(imm), scr1);
-
-        m_assembler.testlRegReg(scr, scr1);
-        releaseScratch(scr);
-        releaseScratch(scr1);
-    }
-
-    void testlImm(int imm, RegisterID dst)
-    {
-        if ((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0)) {
-            m_assembler.testlImm8r(imm, dst);
-            return;
-        }
-
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(imm, scr);
-        m_assembler.testlRegReg(scr, dst);
-        releaseScratch(scr);
-    }
-
-    void compare32(RegisterID right, int offset, RegisterID base, RelationalCondition cond)
-    {
-        if (!offset) {
-            RegisterID scr = claimScratch();
-            m_assembler.movlMemReg(base, scr);
-            m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
-            releaseScratch(scr);
-            return;
-        }
-
-        if ((offset < 0) || (offset >= 64)) {
-            RegisterID scr = claimScratch();
-            m_assembler.loadConstant(offset, scr);
-            m_assembler.addlRegReg(base, scr);
-            m_assembler.movlMemReg(scr, scr);
-            m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
-            releaseScratch(scr);
-            return;
-        }
-
-        RegisterID scr = claimScratch();
-        m_assembler.movlMemReg(offset >> 2, base, scr);
-        m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
-        releaseScratch(scr);
-    }
-
-    void compare32(int imm, int offset, RegisterID base, RelationalCondition cond)
-    {
-        RegisterID scr = claimScratch();
-        load32(base, offset, scr);
-
-        RegisterID scr1 = claimScratch();
-        move(TrustedImm32(imm), scr1);
-
-        m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
-
-        releaseScratch(scr1);
-        releaseScratch(scr);
-    }
-
-    // Memory access operation
-
-    ALWAYS_INLINE void loadEffectiveAddress(BaseIndex address, RegisterID dest, int extraoffset = 0)
-    {
-        if (dest == address.base) {
-            RegisterID scaledIndex = claimScratch();
-            move(address.index, scaledIndex);
-            lshift32(TrustedImm32(address.scale), scaledIndex);
-            add32(scaledIndex, dest);
-            releaseScratch(scaledIndex);
-        } else {
-            move(address.index, dest);
-            lshift32(TrustedImm32(address.scale), dest);
-            add32(address.base, dest);
-        }
-
-        add32(TrustedImm32(address.offset + extraoffset), dest);
-    }
-
-    void load32(ImplicitAddress address, RegisterID dest)
-    {
-        load32(address.base, address.offset, dest);
-    }
-
-    void load8(ImplicitAddress address, RegisterID dest)
-    {
-        load8(address.base, address.offset, dest);
-    }
-
-    void load8(BaseIndex address, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        move(address.index, scr);
-        lshift32(TrustedImm32(address.scale), scr);
-        add32(address.base, scr);
-        load8(scr, address.offset, dest);
-        releaseScratch(scr);
-    }
-
-    void load8(AbsoluteAddress address, RegisterID dest)
-    {
-        move(TrustedImmPtr(address.m_ptr), dest);
-        m_assembler.movbMemReg(dest, dest);
-        m_assembler.extub(dest, dest);
-    }
-
-    void load8(const void* address, RegisterID dest)
-    {
-        load8(AbsoluteAddress(address), dest);
-    }
-
-    void load8PostInc(RegisterID base, RegisterID dest)
-    {
-        m_assembler.movbMemRegIn(base, dest);
-        m_assembler.extub(dest, dest);
-    }
-
-    void load8Signed(BaseIndex address, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        move(address.index, scr);
-        lshift32(TrustedImm32(address.scale), scr);
-        add32(address.base, scr);
-        load8Signed(scr, address.offset, dest);
-        releaseScratch(scr);
-    }
-
-    void load32(BaseIndex address, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        move(address.index, scr);
-        lshift32(TrustedImm32(address.scale), scr);
-        add32(address.base, scr);
-        load32(scr, address.offset, dest);
-        releaseScratch(scr);
-    }
-
-    void load32(const void* address, RegisterID dest)
-    {
-        move(TrustedImmPtr(address), dest);
-        m_assembler.movlMemReg(dest, dest);
-    }
-
-    void load32(RegisterID base, int offset, RegisterID dest)
-    {
-        if (!offset) {
-            m_assembler.movlMemReg(base, dest);
-            return;
-        }
-
-        if ((offset >= 0) && (offset < 64)) {
-            m_assembler.movlMemReg(offset >> 2, base, dest);
-            return;
-        }
-
-        RegisterID scr = (dest == base) ? claimScratch() : dest;
-
-        m_assembler.loadConstant(offset, scr);
-        if (base == SH4Registers::r0)
-            m_assembler.movlR0mr(scr, dest);
-        else {
-            m_assembler.addlRegReg(base, scr);
-            m_assembler.movlMemReg(scr, dest);
-        }
-
-        if (dest == base)
-            releaseScratch(scr);
-    }
-
-    void load8Signed(RegisterID base, int offset, RegisterID dest)
-    {
-        if (!offset) {
-            m_assembler.movbMemReg(base, dest);
-            return;
-        }
-
-        if ((offset > 0) && (offset <= 15) && (dest == SH4Registers::r0)) {
-            m_assembler.movbMemReg(offset, base, dest);
-            return;
-        }
-
-        RegisterID scr = (dest == base) ? claimScratch() : dest;
-
-        m_assembler.loadConstant(offset, scr);
-        if (base == SH4Registers::r0)
-            m_assembler.movbR0mr(scr, dest);
-        else {
-            m_assembler.addlRegReg(base, scr);
-            m_assembler.movbMemReg(scr, dest);
-        }
-
-        if (dest == base)
-            releaseScratch(scr);
-    }
-
-    void load8(RegisterID base, int offset, RegisterID dest)
-    {
-        load8Signed(base, offset, dest);
-        m_assembler.extub(dest, dest);
-    }
-
-    void load32(RegisterID src, RegisterID dst)
-    {
-        m_assembler.movlMemReg(src, dst);
-    }
-
-    void load16(ImplicitAddress address, RegisterID dest)
-    {
-        if (!address.offset) {
-            m_assembler.movwMemReg(address.base, dest);
-            m_assembler.extuw(dest, dest);
-            return;
-        }
-
-        if ((address.offset > 0) && (address.offset <= 30) && (dest == SH4Registers::r0)) {
-            m_assembler.movwMemReg(address.offset >> 1, address.base, dest);
-            m_assembler.extuw(dest, dest);
-            return;
-        }
-
-        RegisterID scr = (dest == address.base) ? claimScratch() : dest;
-
-        m_assembler.loadConstant(address.offset, scr);
-        if (address.base == SH4Registers::r0)
-            m_assembler.movwR0mr(scr, dest);
-        else {
-            m_assembler.addlRegReg(address.base, scr);
-            m_assembler.movwMemReg(scr, dest);
-        }
-        m_assembler.extuw(dest, dest);
-
-        if (dest == address.base)
-            releaseScratch(scr);
-    }
-
-    void load16Unaligned(BaseIndex address, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-
-        loadEffectiveAddress(address, scr);
-
-        RegisterID scr1 = claimScratch();
-        load8PostInc(scr, scr1);
-        load8(scr, dest);
-        m_assembler.shllImm8r(8, dest);
-        or32(scr1, dest);
-
-        releaseScratch(scr);
-        releaseScratch(scr1);
-    }
-
-    void load16(RegisterID src, RegisterID dest)
-    {
-        m_assembler.movwMemReg(src, dest);
-        m_assembler.extuw(dest, dest);
-    }
-
-    void load16Signed(RegisterID src, RegisterID dest)
-    {
-        m_assembler.movwMemReg(src, dest);
-    }
-
-    void load16(BaseIndex address, RegisterID dest)
-    {
-        load16Signed(address, dest);
-        m_assembler.extuw(dest, dest);
-    }
-
-    void load16PostInc(RegisterID base, RegisterID dest)
-    {
-        m_assembler.movwMemRegIn(base, dest);
-        m_assembler.extuw(dest, dest);
-    }
-
-    void load16Signed(BaseIndex address, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-
-        move(address.index, scr);
-        lshift32(TrustedImm32(address.scale), scr);
-        add32(TrustedImm32(address.offset), scr);
-
-        if (address.base == SH4Registers::r0)
-            m_assembler.movwR0mr(scr, dest);
-        else {
-            add32(address.base, scr);
-            load16Signed(scr, dest);
-        }
-
-        releaseScratch(scr);
-    }
-
-    void store8(RegisterID src, BaseIndex address)
-    {
-        RegisterID scr = claimScratch();
-
-        move(address.index, scr);
-        lshift32(TrustedImm32(address.scale), scr);
-        add32(TrustedImm32(address.offset), scr);
-
-        if (address.base == SH4Registers::r0)
-            m_assembler.movbRegMemr0(src, scr);
-        else {
-            add32(address.base, scr);
-            m_assembler.movbRegMem(src, scr);
-        }
-
-        releaseScratch(scr);
-    }
-
-    void store8(RegisterID src, void* address)
-    {
-        RegisterID destptr = claimScratch();
-        move(TrustedImmPtr(address), destptr);
-        m_assembler.movbRegMem(src, destptr);
-        releaseScratch(destptr);
-    }
-
-    void store8(TrustedImm32 imm, void* address)
-    {
-        ASSERT((imm.m_value >= -128) && (imm.m_value <= 127));
-        RegisterID dstptr = claimScratch();
-        move(TrustedImmPtr(address), dstptr);
-        RegisterID srcval = claimScratch();
-        move(imm, srcval);
-        m_assembler.movbRegMem(srcval, dstptr);
-        releaseScratch(dstptr);
-        releaseScratch(srcval);
-    }
-
-    void store16(RegisterID src, BaseIndex address)
-    {
-        RegisterID scr = claimScratch();
-
-        move(address.index, scr);
-        lshift32(TrustedImm32(address.scale), scr);
-        add32(TrustedImm32(address.offset), scr);
-
-        if (address.base == SH4Registers::r0)
-            m_assembler.movwRegMemr0(src, scr);
-        else {
-            add32(address.base, scr);
-            m_assembler.movwRegMem(src, scr);
-        }
-
-        releaseScratch(scr);
-    }
-
-    void store32(RegisterID src, ImplicitAddress address)
-    {
-        if (!address.offset) {
-            m_assembler.movlRegMem(src, address.base);
-            return;
-        }
-
-        if ((address.offset >= 0) && (address.offset < 64)) {
-            m_assembler.movlRegMem(src, address.offset >> 2, address.base);
-            return;
-        }
-
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(address.offset, scr);
-        if (address.base == SH4Registers::r0)
-            m_assembler.movlRegMemr0(src, scr);
-        else {
-            m_assembler.addlRegReg(address.base, scr);
-            m_assembler.movlRegMem(src, scr);
-        }
-        releaseScratch(scr);
-    }
-
-    void store32(RegisterID src, RegisterID dst)
-    {
-        m_assembler.movlRegMem(src, dst);
-    }
-
-    void store32(TrustedImm32 imm, ImplicitAddress address)
-    {
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(imm.m_value, scr);
-        store32(scr, address);
-        releaseScratch(scr);
-    }
-
-    void store32(RegisterID src, BaseIndex address)
-    {
-        RegisterID scr = claimScratch();
-
-        move(address.index, scr);
-        lshift32(TrustedImm32(address.scale), scr);
-        add32(address.base, scr);
-        store32(src, Address(scr, address.offset));
-
-        releaseScratch(scr);
-    }
-
-    void store32(TrustedImm32 imm, void* address)
-    {
-        RegisterID scr = claimScratch();
-        RegisterID scr1 = claimScratch();
-        m_assembler.loadConstant(imm.m_value, scr);
-        move(TrustedImmPtr(address), scr1);
-        m_assembler.movlRegMem(scr, scr1);
-        releaseScratch(scr);
-        releaseScratch(scr1);
-    }
-
-    void store32(RegisterID src, void* address)
-    {
-        RegisterID scr = claimScratch();
-        move(TrustedImmPtr(address), scr);
-        m_assembler.movlRegMem(src, scr);
-        releaseScratch(scr);
-    }
-
-    void store32(TrustedImm32 imm, BaseIndex address)
-    {
-        RegisterID destptr = claimScratch();
-
-        loadEffectiveAddress(address, destptr);
-
-        RegisterID srcval = claimScratch();
-        move(imm, srcval);
-        m_assembler.movlRegMem(srcval, destptr);
-        releaseScratch(srcval);
-        releaseScratch(destptr);
-    }
-
-    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        DataLabel32 label(this);
-        m_assembler.loadConstantUnReusable(address.offset, scr);
-        m_assembler.addlRegReg(address.base, scr);
-        m_assembler.movlMemReg(scr, dest);
-        releaseScratch(scr);
-        return label;
-    }
-    
-    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
-    {
-        RegisterID scr = claimScratch();
-        DataLabel32 label(this);
-        m_assembler.loadConstantUnReusable(address.offset, scr);
-        m_assembler.addlRegReg(address.base, scr);
-        m_assembler.movlRegMem(src, scr);
-        releaseScratch(scr);
-        return label;
-    }
-
-    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
-    {
-        DataLabelCompact dataLabel(this);
-        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
-        m_assembler.movlMemRegCompact(address.offset >> 2, address.base, dest);
-        return dataLabel;
-    }
-
-    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
-    {
-        ConvertibleLoadLabel result(this);
-
-        RegisterID scr = claimScratch();
-        m_assembler.movImm8(address.offset, scr);
-        m_assembler.addlRegReg(address.base, scr);
-        m_assembler.movlMemReg(scr, dest);
-        releaseScratch(scr);
-
-        return result;
-    }
-
-    // Floating-point operations
-
-    static bool supportsFloatingPoint() { return true; }
-    static bool supportsFloatingPointTruncate() { return true; }
-    static bool supportsFloatingPointSqrt() { return true; }
-    static bool supportsFloatingPointAbs() { return true; }
-
-    void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
-    {
-        m_assembler.fldsfpul((FPRegisterID)(src + 1));
-        m_assembler.stsfpulReg(dest1);
-        m_assembler.fldsfpul(src);
-        m_assembler.stsfpulReg(dest2);
-    }
-
-    void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID)
-    {
-        m_assembler.ldsrmfpul(src1);
-        m_assembler.fstsfpul((FPRegisterID)(dest + 1));
-        m_assembler.ldsrmfpul(src2);
-        m_assembler.fstsfpul(dest);
-    }
-
-    void moveDouble(FPRegisterID src, FPRegisterID dest)
-    {
-        if (src != dest) {
-            m_assembler.fmovsRegReg((FPRegisterID)(src + 1), (FPRegisterID)(dest + 1));
-            m_assembler.fmovsRegReg(src, dest);
-        }
-    }
-
-    void swapDouble(FPRegisterID fr1, FPRegisterID fr2)
-    {
-        if (fr1 != fr2) {
-            m_assembler.fldsfpul((FPRegisterID)(fr1 + 1));
-            m_assembler.fmovsRegReg((FPRegisterID)(fr2 + 1), (FPRegisterID)(fr1 + 1));
-            m_assembler.fstsfpul((FPRegisterID)(fr2 + 1));
-            m_assembler.fldsfpul(fr1);
-            m_assembler.fmovsRegReg(fr2, fr1);
-            m_assembler.fstsfpul(fr2);
-        }
-    }
-
-    void loadFloat(BaseIndex address, FPRegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-
-        loadEffectiveAddress(address, scr);
-
-        m_assembler.fmovsReadrm(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void loadDouble(BaseIndex address, FPRegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-
-        loadEffectiveAddress(address, scr);
-
-        m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
-        m_assembler.fmovsReadrm(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void loadDouble(ImplicitAddress address, FPRegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-
-        m_assembler.loadConstant(address.offset, scr);
-        if (address.base == SH4Registers::r0) {
-            m_assembler.fmovsReadr0r(scr, (FPRegisterID)(dest + 1));
-            m_assembler.addlImm8r(4, scr);
-            m_assembler.fmovsReadr0r(scr, dest);
-            releaseScratch(scr);
-            return;
-        }
-
-        m_assembler.addlRegReg(address.base, scr);
-        m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
-        m_assembler.fmovsReadrm(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void loadDouble(const void* address, FPRegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        move(TrustedImmPtr(address), scr);
-        m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
-        m_assembler.fmovsReadrm(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void storeFloat(FPRegisterID src, BaseIndex address)
-    {
-        RegisterID scr = claimScratch();
-        loadEffectiveAddress(address, scr);
-        m_assembler.fmovsWriterm(src, scr);
-        releaseScratch(scr);
-    }
-
-    void storeDouble(FPRegisterID src, ImplicitAddress address)
-    {
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(address.offset + 8, scr);
-        m_assembler.addlRegReg(address.base, scr);
-        m_assembler.fmovsWriterndec(src, scr);
-        m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
-        releaseScratch(scr);
-    }
-
-    void storeDouble(FPRegisterID src, BaseIndex address)
-    {
-        RegisterID scr = claimScratch();
-
-        loadEffectiveAddress(address, scr, 8);
-
-        m_assembler.fmovsWriterndec(src, scr);
-        m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
-
-        releaseScratch(scr);
-    }
-
-    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
-    {
-        if (op1 == dest)
-            addDouble(op2, dest);
-        else {
-            moveDouble(op2, dest);
-            addDouble(op1, dest);
-        }
-    }
-
-    void storeDouble(FPRegisterID src, const void* address)
-    {
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(reinterpret_cast(const_cast(address)) + 8, scr);
-        m_assembler.fmovsWriterndec(src, scr);
-        m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
-        releaseScratch(scr);
-    }
-
-    void addDouble(FPRegisterID src, FPRegisterID dest)
-    {
-        m_assembler.daddRegReg(src, dest);
-    }
-
-    void addDouble(AbsoluteAddress address, FPRegisterID dest)
-    {
-        loadDouble(address.m_ptr, fscratch);
-        addDouble(fscratch, dest);
-    }
-
-    void addDouble(Address address, FPRegisterID dest)
-    {
-        loadDouble(address, fscratch);
-        addDouble(fscratch, dest);
-    }
-
-    void subDouble(FPRegisterID src, FPRegisterID dest)
-    {
-        m_assembler.dsubRegReg(src, dest);
-    }
-
-    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
-    {
-        if (op2 == dest) {
-            moveDouble(op1, fscratch);
-            subDouble(op2, fscratch);
-            moveDouble(fscratch, dest);
-        } else {
-            moveDouble(op1, dest);
-            subDouble(op2, dest);
-        }
-    }
-
-    void subDouble(Address address, FPRegisterID dest)
-    {
-        loadDouble(address, fscratch);
-        subDouble(fscratch, dest);
-    }
-
-    void mulDouble(FPRegisterID src, FPRegisterID dest)
-    {
-        m_assembler.dmulRegReg(src, dest);
-    }
-
-    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
-    {
-        if (op1 == dest)
-            mulDouble(op2, dest);
-        else {
-            moveDouble(op2, dest);
-            mulDouble(op1, dest);
-        }
-    }
-
-    void mulDouble(Address address, FPRegisterID dest)
-    {
-        loadDouble(address, fscratch);
-        mulDouble(fscratch, dest);
-    }
-
-    void divDouble(FPRegisterID src, FPRegisterID dest)
-    {
-        m_assembler.ddivRegReg(src, dest);
-    }
-
-    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
-    {
-        if (op2 == dest) {
-            moveDouble(op1, fscratch);
-            divDouble(op2, fscratch);
-            moveDouble(fscratch, dest);
-        } else {
-            moveDouble(op1, dest);
-            divDouble(op2, dest);
-        }
-    }
-
-    void negateDouble(FPRegisterID src, FPRegisterID dest)
-    {
-        moveDouble(src, dest);
-        m_assembler.dneg(dest);
-    }
-
-    void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
-    {
-        m_assembler.fldsfpul(src);
-        m_assembler.dcnvsd(dst);
-    }
-
-    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
-    {
-        m_assembler.dcnvds(src);
-        m_assembler.fstsfpul(dst);
-    }
-
-    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
-    {
-        m_assembler.ldsrmfpul(src);
-        m_assembler.floatfpulDreg(dest);
-    }
-
-    void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        load32(src.m_ptr, scr);
-        convertInt32ToDouble(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void convertInt32ToDouble(Address src, FPRegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        load32(src, scr);
-        convertInt32ToDouble(scr, dest);
-        releaseScratch(scr);
-    }
-
-    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
-    {
-        RegisterID scr = claimScratch();
-        Jump m_jump;
-        JumpList end;
-
-        loadEffectiveAddress(address, scr);
-
-        RegisterID scr1 = claimScratch();
-        if (dest != SH4Registers::r0)
-            move(SH4Registers::r0, scr1);
-
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 58, sizeof(uint32_t));
-        move(scr, SH4Registers::r0);
-        m_assembler.testlImm8r(0x3, SH4Registers::r0);
-        m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
-
-        if (dest != SH4Registers::r0)
-            move(scr1, SH4Registers::r0);
-
-        load32(scr, dest);
-        end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
-        m_assembler.nop();
-        m_jump.link(this);
-        m_assembler.testlImm8r(0x1, SH4Registers::r0);
-
-        if (dest != SH4Registers::r0)
-            move(scr1, SH4Registers::r0);
-
-        m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
-        load16PostInc(scr, scr1);
-        load16(scr, dest);
-        m_assembler.shllImm8r(16, dest);
-        or32(scr1, dest);
-        end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
-        m_assembler.nop();
-        m_jump.link(this);
-        load8PostInc(scr, scr1);
-        load16PostInc(scr, dest);
-        m_assembler.shllImm8r(8, dest);
-        or32(dest, scr1);
-        load8(scr, dest);
-        m_assembler.shllImm8r(8, dest);
-        m_assembler.shllImm8r(16, dest);
-        or32(scr1, dest);
-        end.link(this);
-
-        releaseScratch(scr);
-        releaseScratch(scr1);
-    }
-
-    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
-    {
-        RegisterID scr = scratchReg3;
-        load32WithUnalignedHalfWords(left, scr);
-        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
-            m_assembler.testlRegReg(scr, scr);
-        else
-            compare32(right.m_value, scr, cond);
-
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
-    {
-        m_assembler.movImm8(0, scratchReg3);
-        convertInt32ToDouble(scratchReg3, scratch);
-        return branchDouble(DoubleNotEqual, reg, scratch);
-    }
-
-    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
-    {
-        m_assembler.movImm8(0, scratchReg3);
-        convertInt32ToDouble(scratchReg3, scratch);
-        return branchDouble(DoubleEqualOrUnordered, reg, scratch);
-    }
-
-    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
-    {
-        if (cond == DoubleEqual) {
-            m_assembler.dcmppeq(right, left);
-            return branchTrue();
-        }
-
-        if (cond == DoubleNotEqual) {
-            JumpList end;
-            m_assembler.dcmppeq(left, left);
-            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
-            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppeq(right, right);
-            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppeq(right, left);
-            Jump m_jump = branchFalse();
-            end.link(this);
-            return m_jump;
-        }
-
-        if (cond == DoubleGreaterThan) {
-            m_assembler.dcmppgt(right, left);
-            return branchTrue();
-        }
-
-        if (cond == DoubleGreaterThanOrEqual) {
-            JumpList end;
-            m_assembler.dcmppeq(left, left);
-            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
-            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppeq(right, right);
-            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppgt(left, right);
-            Jump m_jump = branchFalse();
-            end.link(this);
-            return m_jump;
-        }
-
-        if (cond == DoubleLessThan) {
-            m_assembler.dcmppgt(left, right);
-            return branchTrue();
-        }
-
-        if (cond == DoubleLessThanOrEqual) {
-            JumpList end;
-            m_assembler.dcmppeq(left, left);
-            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
-            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppeq(right, right);
-            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppgt(right, left);
-            Jump m_jump = branchFalse();
-            end.link(this);
-            return m_jump;
-        }
-
-        if (cond == DoubleEqualOrUnordered) {
-            JumpList takeBranch;
-            m_assembler.dcmppeq(left, left);
-            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
-            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppeq(right, right);
-            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppeq(left, right);
-            m_assembler.branch(BF_OPCODE, 2);
-            takeBranch.link(this);
-            return Jump(m_assembler.extraInstrForBranch(scratchReg3));
-        }
-
-        if (cond == DoubleGreaterThanOrUnordered) {
-            JumpList takeBranch;
-            m_assembler.dcmppeq(left, left);
-            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
-            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppeq(right, right);
-            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppgt(right, left);
-            m_assembler.branch(BF_OPCODE, 2);
-            takeBranch.link(this);
-            return Jump(m_assembler.extraInstrForBranch(scratchReg3));
-        }
-
-        if (cond == DoubleGreaterThanOrEqualOrUnordered) {
-            m_assembler.dcmppgt(left, right);
-            return branchFalse();
-        }
-
-        if (cond == DoubleLessThanOrUnordered) {
-            JumpList takeBranch;
-            m_assembler.dcmppeq(left, left);
-            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
-            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppeq(right, right);
-            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
-            m_assembler.dcmppgt(left, right);
-            m_assembler.branch(BF_OPCODE, 2);
-            takeBranch.link(this);
-            return Jump(m_assembler.extraInstrForBranch(scratchReg3));
-        }
-
-        if (cond == DoubleLessThanOrEqualOrUnordered) {
-            m_assembler.dcmppgt(right, left);
-            return branchFalse();
-        }
-
-        ASSERT(cond == DoubleNotEqualOrUnordered);
-        m_assembler.dcmppeq(right, left);
-        return branchFalse();
-    }
-
-    Jump branchTrue()
-    {
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
-        m_assembler.branch(BF_OPCODE, 2);
-        return Jump(m_assembler.extraInstrForBranch(scratchReg3));
-    }
-
-    Jump branchFalse()
-    {
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
-        m_assembler.branch(BT_OPCODE, 2);
-        return Jump(m_assembler.extraInstrForBranch(scratchReg3));
-    }
-
-    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
-    {
-        RegisterID scr = claimScratch();
-        move(left.index, scr);
-        lshift32(TrustedImm32(left.scale), scr);
-        add32(left.base, scr);
-        load32(scr, left.offset, scr);
-        compare32(right.m_value, scr, cond);
-        releaseScratch(scr);
-
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    void sqrtDouble(FPRegisterID src, FPRegisterID dest)
-    {
-        moveDouble(src, dest);
-        m_assembler.dsqrt(dest);
-    }
-    
-    void absDouble(FPRegisterID src, FPRegisterID dest)
-    {
-        moveDouble(src, dest);
-        m_assembler.dabs(dest);
-    }
-
-    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
-    {
-        RegisterID addressTempRegister = claimScratch();
-        load8(address, addressTempRegister);
-        Jump jmp = branchTest32(cond, addressTempRegister, mask);
-        releaseScratch(addressTempRegister);
-        return jmp;
-    }
-
-    Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
-    {
-        RegisterID addressTempRegister = claimScratch();
-        load8(address, addressTempRegister);
-        Jump jmp = branchTest32(cond, addressTempRegister, mask);
-        releaseScratch(addressTempRegister);
-        return jmp;
-    }
-
-    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
-    {
-        RegisterID addressTempRegister = claimScratch();
-        move(TrustedImmPtr(address.m_ptr), addressTempRegister);
-        load8(Address(addressTempRegister), addressTempRegister);
-        Jump jmp = branchTest32(cond, addressTempRegister, mask);
-        releaseScratch(addressTempRegister);
-        return jmp;
-    }
-
-    void signExtend32ToPtr(RegisterID src, RegisterID dest)
-    {
-        move(src, dest);
-    }
-
-    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
-    {
-        move(src, dest);
-    }
-
-    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
-    {
-        RegisterID addressTempRegister = claimScratch();
-        load8(left, addressTempRegister);
-        Jump jmp = branch32(cond, addressTempRegister, right);
-        releaseScratch(addressTempRegister);
-        return jmp;
-    }
-
-    Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
-    {
-        RegisterID addressTempRegister = claimScratch();
-        load8(left, addressTempRegister);
-        Jump jmp = branch32(cond, addressTempRegister, right);
-        releaseScratch(addressTempRegister);
-        return jmp;
-    }
-
-    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
-    {
-        RegisterID addressTempRegister = claimScratch();
-        load8(left, addressTempRegister);
-        compare32(cond, addressTempRegister, right, dest);
-        releaseScratch(addressTempRegister);
-    }
-
-    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
-    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
-    {
-        Jump result;
-        truncateDoubleToInt32(src, dest);
-        RegisterID intscr = claimScratch();
-        m_assembler.loadConstant(0x7fffffff, intscr);
-        m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 12, sizeof(uint32_t));
-        if (branchType == BranchIfTruncateFailed) {
-            m_assembler.branch(BT_OPCODE, 2);
-            m_assembler.addlImm8r(1, intscr);
-            m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
-            result = branchTrue();
-        } else {
-            Jump out = Jump(m_assembler.je(), SH4Assembler::JumpNear);
-            m_assembler.addlImm8r(1, intscr);
-            m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
-            result = branchFalse();
-            out.link(this);
-        }
-        releaseScratch(intscr);
-        return result;
-    }
-
-    Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
-    {
-        Jump result;
-        RegisterID intscr = claimScratch();
-        m_assembler.loadConstant(0x80000000, intscr);
-        convertInt32ToDouble(intscr, fscratch);
-        addDouble(src, fscratch);
-        truncateDoubleToInt32(fscratch, dest);
-        m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 16, sizeof(uint32_t));
-        if (branchType == BranchIfTruncateFailed) {
-            m_assembler.branch(BT_OPCODE, 4);
-            m_assembler.addlImm8r(-1, intscr);
-            m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
-            m_assembler.addlImm8r(1, intscr);
-            m_assembler.sublRegReg(intscr, dest);
-            result = branchTrue();
-        } else {
-            Jump out = Jump(m_assembler.je(), SH4Assembler::JumpNear);
-            m_assembler.addlImm8r(-1, intscr);
-            m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
-            m_assembler.addlImm8r(1, intscr);
-            m_assembler.sublRegReg(intscr, dest);
-            result = branchFalse();
-            out.link(this);
-        }
-        releaseScratch(intscr);
-        return result;
-    }
-
-    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
-    {
-        m_assembler.ftrcdrmfpul(src);
-        m_assembler.stsfpulReg(dest);
-    }
-
-    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
-    {
-        RegisterID intscr = claimScratch();
-        m_assembler.loadConstant(0x80000000, intscr);
-        convertInt32ToDouble(intscr, fscratch);
-        addDouble(src, fscratch);
-        m_assembler.ftrcdrmfpul(fscratch);
-        m_assembler.stsfpulReg(dest);
-        m_assembler.sublRegReg(intscr, dest);
-        releaseScratch(intscr);
-    }
-
-    // Stack manipulation operations
-
-    void pop(RegisterID dest)
-    {
-        m_assembler.popReg(dest);
-    }
-
-    void push(RegisterID src)
-    {
-        m_assembler.pushReg(src);
-    }
-
-    void push(TrustedImm32 imm)
-    {
-        RegisterID scr = claimScratch();
-        m_assembler.loadConstant(imm.m_value, scr);
-        push(scr);
-        releaseScratch(scr);
-    }
-
-    // Register move operations
-
-    void move(TrustedImm32 imm, RegisterID dest)
-    {
-        m_assembler.loadConstant(imm.m_value, dest);
-    }
-
-    DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
-    {
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t));
-        DataLabelPtr dataLabel(this);
-        m_assembler.loadConstantUnReusable(reinterpret_cast(initialValue.m_value), dest);
-        return dataLabel;
-    }
-
-    void move(RegisterID src, RegisterID dest)
-    {
-        if (src != dest)
-            m_assembler.movlRegReg(src, dest);
-    }
-
-    void move(TrustedImmPtr imm, RegisterID dest)
-    {
-        m_assembler.loadConstant(imm.asIntptr(), dest);
-    }
-
-    void swap(RegisterID reg1, RegisterID reg2)
-    {
-        if (reg1 != reg2) {
-            xor32(reg1, reg2);
-            xor32(reg2, reg1);
-            xor32(reg1, reg2);
-        }
-    }
-
-    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
-    {
-        m_assembler.cmplRegReg(right, left, SH4Condition(cond));
-        if (cond != NotEqual) {
-            m_assembler.movt(dest);
-            return;
-        }
-
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
-        m_assembler.movImm8(0, dest);
-        m_assembler.branch(BT_OPCODE, 0);
-        m_assembler.movImm8(1, dest);
-    }
-
-    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
-    {
-        if (left != dest) {
-            move(right, dest);
-            compare32(cond, left, dest, dest);
-            return;
-        }
-
-        RegisterID scr = claimScratch();
-        move(right, scr);
-        compare32(cond, left, scr, dest);
-        releaseScratch(scr);
-    }
-
-    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
-    {
-        ASSERT((cond == Zero) || (cond == NonZero));
-
-        load8(address, dest);
-        if (mask.m_value == -1)
-            compare32(0, dest, static_cast(cond));
-        else
-            testlImm(mask.m_value, dest);
-        if (cond != NonZero) {
-            m_assembler.movt(dest);
-            return;
-        }
-
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
-        m_assembler.movImm8(0, dest);
-        m_assembler.branch(BT_OPCODE, 0);
-        m_assembler.movImm8(1, dest);
-    }
-
-    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
-    {
-        ASSERT((cond == Zero) || (cond == NonZero));
-
-        load32(address, dest);
-        if (mask.m_value == -1)
-            compare32(0, dest, static_cast(cond));
-        else
-            testlImm(mask.m_value, dest);
-        if (cond != NonZero) {
-            m_assembler.movt(dest);
-            return;
-        }
-
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
-        m_assembler.movImm8(0, dest);
-        m_assembler.branch(BT_OPCODE, 0);
-        m_assembler.movImm8(1, dest);
-    }
-
-    void loadPtrLinkReg(ImplicitAddress address)
-    {
-        RegisterID scr = claimScratch();
-        load32(address, scr);
-        m_assembler.ldspr(scr);
-        releaseScratch(scr);
-    }
-
-    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
-    {
-        m_assembler.cmplRegReg(right, left, SH4Condition(cond));
-        /* BT label => BF off
-           nop         LDR reg
-           nop         braf @reg
-           nop         nop
-         */
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
-    {
-        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
-            m_assembler.testlRegReg(left, left);
-        else
-            compare32(right.m_value, left, cond);
-
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
-    {
-        compare32(right.offset, right.base, left, cond);
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
-    {
-        compare32(right, left.offset, left.base, cond);
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
-    {
-        compare32(right.m_value, left.offset, left.base, cond);
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
-    {
-        RegisterID scr = claimScratch();
-
-        load32(left.m_ptr, scr);
-        m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
-        releaseScratch(scr);
-
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
-    {
-        RegisterID addressTempRegister = claimScratch();
-
-        move(TrustedImmPtr(left.m_ptr), addressTempRegister);
-        m_assembler.movlMemReg(addressTempRegister, addressTempRegister);
-        compare32(right.m_value, addressTempRegister, cond);
-        releaseScratch(addressTempRegister);
-
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
-    {
-        ASSERT(!(right.m_value & 0xFFFFFF00));
-        RegisterID lefttmp = claimScratch();
-
-        loadEffectiveAddress(left, lefttmp);
-
-        load8(lefttmp, lefttmp);
-        RegisterID righttmp = claimScratch();
-        m_assembler.loadConstant(right.m_value, righttmp);
-
-        Jump result = branch32(cond, lefttmp, righttmp);
-        releaseScratch(lefttmp);
-        releaseScratch(righttmp);
-        return result;
-    }
-
-    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
-    {
-        ASSERT((cond == Zero) || (cond == NonZero));
-
-        m_assembler.testlRegReg(reg, mask);
-
-        if (cond == NonZero) // NotEqual
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
-    {
-        ASSERT((cond == Zero) || (cond == NonZero));
-
-        if (mask.m_value == -1)
-            m_assembler.testlRegReg(reg, reg);
-        else
-            testlImm(mask.m_value, reg);
-
-        if (cond == NonZero) // NotEqual
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
-    {
-        ASSERT((cond == Zero) || (cond == NonZero));
-
-        if (mask.m_value == -1)
-            compare32(0, address.offset, address.base, static_cast(cond));
-        else
-            testImm(mask.m_value, address.offset, address.base);
-
-        if (cond == NonZero) // NotEqual
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
-    {
-        ASSERT((cond == Zero) || (cond == NonZero));
-
-        RegisterID scr = claimScratch();
-
-        move(address.index, scr);
-        lshift32(TrustedImm32(address.scale), scr);
-        add32(address.base, scr);
-        load32(scr, address.offset, scr);
-
-        if (mask.m_value == -1)
-            m_assembler.testlRegReg(scr, scr);
-        else
-            testlImm(mask.m_value, scr);
-
-        releaseScratch(scr);
-
-        if (cond == NonZero) // NotEqual
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump jump()
-    {
-        return Jump(m_assembler.jmp());
-    }
-
-    void jump(RegisterID target)
-    {
-        m_assembler.jmpReg(target);
-    }
-
-    void jump(Address address)
-    {
-        RegisterID scr = claimScratch();
-        load32(address, scr);
-        m_assembler.jmpReg(scr);
-        releaseScratch(scr);
-    }
-
-    void jump(AbsoluteAddress address)
-    {
-        RegisterID scr = claimScratch();
-
-        move(TrustedImmPtr(address.m_ptr), scr);
-        m_assembler.movlMemReg(scr, scr);
-        m_assembler.jmpReg(scr);
-        releaseScratch(scr);
-    }
-
-    // Arithmetic control flow operations
-
-    Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
-
-        if (cond == Overflow)
-            return branchMul32(cond, TrustedImm32(-1), srcDest, srcDest);
-
-        neg32(srcDest);
-
-        if (cond == Signed) {
-            m_assembler.cmppz(srcDest);
-            return branchFalse();
-        }
-
-        compare32(0, srcDest, Equal);
-        return (cond == NonZero) ? branchFalse() : branchTrue();
-    }
-
-    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
-
-        if (cond == Overflow) {
-            m_assembler.addvlRegReg(src, dest);
-            return branchTrue();
-        }
-
-        m_assembler.addlRegReg(src, dest);
-
-        if ((cond == Signed) || (cond == PositiveOrZero)) {
-            m_assembler.cmppz(dest);
-            return (cond == Signed) ? branchFalse() : branchTrue();
-        }
-
-        compare32(0, dest, Equal);
-        return (cond == NonZero) ? branchFalse() : branchTrue();
-    }
-
-    Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
-
-        if (cond == Overflow) {
-            if (src1 == dest)
-                m_assembler.addvlRegReg(src2, dest);
-            else {
-                move(src2, dest);
-                m_assembler.addvlRegReg(src1, dest);
-            }
-            return branchTrue();
-        }
-
-        add32(src1, src2, dest);
-
-        if ((cond == Signed) || (cond == PositiveOrZero)) {
-            m_assembler.cmppz(dest);
-            return (cond == Signed) ? branchFalse() : branchTrue();
-        }
-
-        compare32(0, dest, Equal);
-        return (cond == NonZero) ? branchFalse() : branchTrue();
-    }
-
-    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
-
-        RegisterID immval = claimScratch();
-        move(imm, immval);
-        Jump result = branchAdd32(cond, immval, dest);
-        releaseScratch(immval);
-        return result;
-    }
-
-    Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
-
-        move(src, dest);
-
-        if (cond == Overflow) {
-            move(imm, scratchReg3);
-            m_assembler.addvlRegReg(scratchReg3, dest);
-            return branchTrue();
-        }
-
-        add32(imm, dest);
-
-        if ((cond == Signed) || (cond == PositiveOrZero)) {
-            m_assembler.cmppz(dest);
-            return (cond == Signed) ? branchFalse() : branchTrue();
-        }
-
-        compare32(0, dest, Equal);
-        return (cond == NonZero) ? branchFalse() : branchTrue();
-    }
-
-    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
-        bool result;
-
-        move(imm, scratchReg3);
-        RegisterID destptr = claimScratch();
-        RegisterID destval = claimScratch();
-        move(TrustedImmPtr(dest.m_ptr), destptr);
-        m_assembler.movlMemReg(destptr, destval);
-        if (cond == Overflow) {
-            m_assembler.addvlRegReg(scratchReg3, destval);
-            result = true;
-        } else {
-            m_assembler.addlRegReg(scratchReg3, destval);
-            if ((cond == Signed) || (cond == PositiveOrZero)) {
-                m_assembler.cmppz(destval);
-                result = (cond == PositiveOrZero);
-            } else {
-                m_assembler.testlRegReg(destval, destval);
-                result = (cond != NonZero);
-            }
-        }
-        m_assembler.movlRegMem(destval, destptr);
-        releaseScratch(destval);
-        releaseScratch(destptr);
-        return result ? branchTrue() : branchFalse();
-    }
-
-    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
-
-        if (cond == Overflow) {
-            RegisterID scrsign = claimScratch();
-            RegisterID msbres = claimScratch();
-            m_assembler.dmulslRegReg(src, dest);
-            m_assembler.stsmacl(dest);
-            m_assembler.cmppz(dest);
-            m_assembler.movt(scrsign);
-            m_assembler.addlImm8r(-1, scrsign);
-            m_assembler.stsmach(msbres);
-            m_assembler.cmplRegReg(msbres, scrsign, SH4Condition(Equal));
-            releaseScratch(msbres);
-            releaseScratch(scrsign);
-            return branchFalse();
-        }
-
-        mul32(src, dest);
-
-        if (cond == Signed) {
-            m_assembler.cmppz(dest);
-            return branchFalse();
-        }
-
-        compare32(0, dest, static_cast(cond));
-        return (cond == NonZero) ? branchFalse() : branchTrue();
-    }
-
-    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
-
-        if (cond == Overflow) {
-            RegisterID scrsign = claimScratch();
-            RegisterID msbres = claimScratch();
-            m_assembler.dmulslRegReg(src1, src2);
-            m_assembler.stsmacl(dest);
-            m_assembler.cmppz(dest);
-            m_assembler.movt(scrsign);
-            m_assembler.addlImm8r(-1, scrsign);
-            m_assembler.stsmach(msbres);
-            m_assembler.cmplRegReg(msbres, scrsign, SH4Condition(Equal));
-            releaseScratch(msbres);
-            releaseScratch(scrsign);
-            return branchFalse();
-        }
-
-        mul32(src1, src2, dest);
-
-        if (cond == Signed) {
-            m_assembler.cmppz(dest);
-            return branchFalse();
-        }
-
-        compare32(0, dest, Equal);
-        return (cond == NonZero) ? branchFalse() : branchTrue();
-    }
-
-    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
-
-        if (src == dest) {
-            move(imm, scratchReg3);
-            return branchMul32(cond, scratchReg3, dest);
-        }
-
-        move(imm, dest);
-        return branchMul32(cond, src, dest);
-    }
-
-    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
-
-        if (cond == Overflow) {
-            m_assembler.subvlRegReg(src, dest);
-            return branchTrue();
-        }
-
-        sub32(src, dest);
-
-        if (cond == Signed) {
-            m_assembler.cmppz(dest);
-            return branchFalse();
-        }
-
-        compare32(0, dest, static_cast(cond));
-        return (cond == NonZero) ? branchFalse() : branchTrue();
-    }
-
-    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
-
-        RegisterID immval = claimScratch();
-        move(imm, immval);
-        Jump result = branchSub32(cond, immval, dest);
-        releaseScratch(immval);
-        return result;
-    }
-
-    Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
-
-        move(src, dest);
-        return branchSub32(cond, imm, dest);
-    }
-
-    Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
-    {
-        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
-
-        if (src2 != dest) {
-            move(src1, dest);
-            return branchSub32(cond, src2, dest);
-        }
-
-        if (cond == Overflow) {
-            RegisterID tmpval = claimScratch();
-            move(src1, tmpval);
-            m_assembler.subvlRegReg(src2, tmpval);
-            move(tmpval, dest);
-            releaseScratch(tmpval);
-            return branchTrue();
-        }
-
-        RegisterID tmpval = claimScratch();
-        move(src1, tmpval);
-        sub32(src2, tmpval);
-        move(tmpval, dest);
-        releaseScratch(tmpval);
-
-        if (cond == Signed) {
-            m_assembler.cmppz(dest);
-            return branchFalse();
-        }
-
-        compare32(0, dest, static_cast(cond));
-        return (cond == NonZero) ? branchFalse() : branchTrue();
-    }
-
-    Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
-    {
-        ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
-
-        or32(src, dest);
-
-        if (cond == Signed) {
-            m_assembler.cmppz(dest);
-            return branchFalse();
-        }
-
-        compare32(0, dest, static_cast(cond));
-        return (cond == NonZero) ? branchFalse() : branchTrue();
-    }
-
-    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
-    {
-        truncateDoubleToInt32(src, dest);
-        convertInt32ToDouble(dest, fscratch);
-        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fscratch, src));
-
-        if (negZeroCheck)
-            failureCases.append(branch32(Equal, dest, TrustedImm32(0)));
-    }
-
-    void neg32(RegisterID dst)
-    {
-        m_assembler.neg(dst, dst);
-    }
-
-    void urshift32(RegisterID shiftamount, RegisterID dest)
-    {
-        RegisterID shiftTmp = claimScratch();
-        m_assembler.loadConstant(0x1f, shiftTmp);
-        m_assembler.andlRegReg(shiftamount, shiftTmp);
-        m_assembler.neg(shiftTmp, shiftTmp);
-        m_assembler.shldRegReg(dest, shiftTmp);
-        releaseScratch(shiftTmp);
-    }
-
-    void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
-    {
-        move(src, dest);
-        urshift32(shiftAmount, dest);
-    }
-
-    void urshift32(TrustedImm32 imm, RegisterID dest)
-    {
-        int immMasked = imm.m_value & 0x1f;
-        if (!immMasked)
-            return;
-
-        if ((immMasked == 1) || (immMasked == 2) || (immMasked == 8) || (immMasked == 16)) {
-            m_assembler.shlrImm8r(immMasked, dest);
-            return;
-        }
-
-        RegisterID shiftTmp = claimScratch();
-        m_assembler.loadConstant(-immMasked, shiftTmp);
-        m_assembler.shldRegReg(dest, shiftTmp);
-        releaseScratch(shiftTmp);
-    }
-
-    void urshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
-    {
-        move(src, dest);
-        urshift32(shiftamount, dest);
-    }
-
-    Call call()
-    {
-        return Call(m_assembler.call(), Call::Linkable);
-    }
-
-    Call nearCall()
-    {
-        return Call(m_assembler.call(), Call::LinkableNear);
-    }
-
-    Call call(RegisterID target)
-    {
-        return Call(m_assembler.call(target), Call::None);
-    }
-
-    void call(Address address)
-    {
-        RegisterID target = claimScratch();
-        load32(address.base, address.offset, target);
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2);
-        m_assembler.branch(JSR_OPCODE, target);
-        m_assembler.nop();
-        releaseScratch(target);
-    }
-
-    void breakpoint()
-    {
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2);
-        m_assembler.bkpt();
-        m_assembler.nop();
-    }
-
-    Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
-    {
-        RegisterID dataTempRegister = claimScratch();
-
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t));
-        dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
-        m_assembler.cmplRegReg(dataTempRegister, left, SH4Condition(cond));
-        releaseScratch(dataTempRegister);
-
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
-    {
-        RegisterID scr = claimScratch();
-
-        m_assembler.loadConstant(left.offset, scr);
-        m_assembler.addlRegReg(left.base, scr);
-        m_assembler.movlMemReg(scr, scr);
-        RegisterID scr1 = claimScratch();
-        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t));
-        dataLabel = moveWithPatch(initialRightValue, scr1);
-        m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
-        releaseScratch(scr);
-        releaseScratch(scr1);
-
-        if (cond == NotEqual)
-            return branchFalse();
-        return branchTrue();
-    }
-
-    void ret()
-    {
-        m_assembler.ret();
-        m_assembler.nop();
-    }
-
-    DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
-    {
-        RegisterID scr = claimScratch();
-        DataLabelPtr label = moveWithPatch(initialValue, scr);
-        store32(scr, address);
-        releaseScratch(scr);
-        return label;
-    }
-
-    DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
-
-    int sizeOfConstantPool()
-    {
-        return m_assembler.sizeOfConstantPool();
-    }
-
-    Call tailRecursiveCall()
-    {
-        RegisterID scr = claimScratch();
-
-        m_assembler.loadConstantUnReusable(0x0, scr, true);
-        Jump m_jump = Jump(m_assembler.jmp(scr));
-        releaseScratch(scr);
-
-        return Call::fromTailJump(m_jump);
-    }
-
-    Call makeTailRecursiveCall(Jump oldJump)
-    {
-        oldJump.link(this);
-        return tailRecursiveCall();
-    }
-
-    void nop()
-    {
-        m_assembler.nop();
-    }
-
-    void memoryFence()
-    {
-        m_assembler.synco();
-    }
-
-    static FunctionPtr readCallTarget(CodeLocationCall call)
-    {
-        return FunctionPtr(reinterpret_cast(SH4Assembler::readCallTarget(call.dataLocation())));
-    }
-
-    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
-    {
-        SH4Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
-    }
-    
-    static ptrdiff_t maxJumpReplacementSize()
-    {
-        return SH4Assembler::maxJumpReplacementSize();
-    }
-
-    static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
-
-    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
-    {
-        return label.labelAtOffset(0);
-    }
-
-    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
-    {
-        SH4Assembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart.dataLocation(), rd, reinterpret_cast(initialValue));
-    }
-
-    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
-    {
-        UNREACHABLE_FOR_PLATFORM();
-        return CodeLocationLabel();
-    }
-
-    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
-    {
-        UNREACHABLE_FOR_PLATFORM();
-    }
-
-protected:
-    SH4Assembler::Condition SH4Condition(RelationalCondition cond)
-    {
-        return static_cast(cond);
-    }
-
-    SH4Assembler::Condition SH4Condition(ResultCondition cond)
-    {
-        return static_cast(cond);
-    }
-private:
-    friend class LinkBuffer;
-    friend class RepatchBuffer;
-
-    static void linkCall(void* code, Call call, FunctionPtr function)
-    {
-        SH4Assembler::linkCall(code, call.m_label, function.value());
-    }
-
-    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
-    {
-        SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
-    }
-
-    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
-    {
-        SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
-    }
-};
-
-} // namespace JSC
-
-#endif // ENABLE(ASSEMBLER)
-
-#endif // MacroAssemblerSH4_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
index 547158fa7..75f35456d 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,21 +23,19 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef MacroAssemblerX86_h
-#define MacroAssemblerX86_h
+#pragma once
 
 #if ENABLE(ASSEMBLER) && CPU(X86)
 
 #include "MacroAssemblerX86Common.h"
 
-#if USE(MASM_PROBE)
-#include 
-#endif
-
 namespace JSC {
 
 class MacroAssemblerX86 : public MacroAssemblerX86Common {
 public:
+    static const unsigned numGPRs = 8;
+    static const unsigned numFPRs = 8;
+    
     static const Scale ScalePtr = TimesFour;
 
     using MacroAssemblerX86Common::add32;
@@ -111,6 +109,18 @@ public:
         m_assembler.movzbl_mr(address, dest);
     }
 
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), X86Registers::eax);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm32(misc), X86Registers::edx);
+        abortWithReason(reason);
+    }
+
     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
     {
         ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
@@ -123,11 +133,11 @@ public:
         m_assembler.addsd_mr(address.m_ptr, dest);
     }
 
-    void storeDouble(FPRegisterID src, const void* address)
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
     {
         ASSERT(isSSE2Present());
-        ASSERT(address);
-        m_assembler.movsd_rm(src, address);
+        ASSERT(address.m_value);
+        m_assembler.movsd_rm(src, address.m_value);
     }
 
     void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
@@ -152,22 +162,24 @@ public:
 
     void store8(TrustedImm32 imm, void* address)
     {
-        ASSERT(-128 <= imm.m_value && imm.m_value < 128);
-        m_assembler.movb_i8m(imm.m_value, address);
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        m_assembler.movb_i8m(imm8.m_value, address);
     }
     
-    // Possibly clobbers src.
     void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
     {
-        movePackedToInt32(src, dest1);
-        rshiftPacked(TrustedImm32(32), src);
-        movePackedToInt32(src, dest2);
+        ASSERT(isSSE2Present());
+        m_assembler.pextrw_irr(3, src, dest1);
+        m_assembler.pextrw_irr(2, src, dest2);
+        lshift32(TrustedImm32(16), dest1);
+        or32(dest1, dest2);
+        moveFloatTo32(src, dest1);
     }
 
     void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
     {
-        moveInt32ToPacked(src1, dest);
-        moveInt32ToPacked(src2, scratch);
+        move32ToFloat(src1, dest);
+        move32ToFloat(src2, scratch);
         lshiftPacked(TrustedImm32(32), scratch);
         orPacked(scratch, dest);
     }
@@ -227,17 +239,18 @@ public:
     
     Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
     {
-        m_assembler.cmpb_im(right.m_value, left.m_ptr);
+        TrustedImm32 right8(static_cast(right.m_value));
+        m_assembler.cmpb_im(right8.m_value, left.m_ptr);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
-        if (mask.m_value == -1)
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        if (mask8.m_value == -1)
             m_assembler.cmpb_im(0, address.m_ptr);
         else
-            m_assembler.testb_im(mask.m_value, address.m_ptr);
+            m_assembler.testb_im(mask8.m_value, address.m_ptr);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
@@ -257,6 +270,14 @@ public:
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
+    Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        padBeforePatch();
+        m_assembler.cmpl_im_force32(initialRightValue.m_value, left.offset, left.base);
+        dataLabel = DataLabel32(this);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
     DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
     {
         padBeforePatch();
@@ -265,7 +286,6 @@ public:
     }
 
     static bool supportsFloatingPoint() { return isSSE2Present(); }
-    // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
     static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
     static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
     static bool supportsFloatingPointAbs() { return isSSE2Present(); }
@@ -277,6 +297,7 @@ public:
     }
 
     static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
     
     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
     {
@@ -299,6 +320,17 @@ public:
         return label.labelAtOffset(-totalBytes);
     }
     
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
+    {
+        const int opcodeBytes = 1;
+        const int modRMBytes = 1;
+        const int offsetBytes = 0;
+        const int immediateBytes = 4;
+        const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes;
+        ASSERT(totalBytes >= maxJumpReplacementSize());
+        return label.labelAtOffset(-totalBytes);
+    }
+    
     static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
     {
         X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast(initialValue), reg);
@@ -310,18 +342,10 @@ public:
         X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast(initialValue), 0, address.base);
     }
 
-#if USE(MASM_PROBE)
-    // For details about probe(), see comment in MacroAssemblerX86_64.h.
-    void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
-#endif // USE(MASM_PROBE)
-
-private:
-    friend class LinkBuffer;
-    friend class RepatchBuffer;
-
-    static void linkCall(void* code, Call call, FunctionPtr function)
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address address, int32_t initialValue)
     {
-        X86Assembler::linkCall(code, call.m_label, function.value());
+        ASSERT(!address.offset);
+        X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), initialValue, 0, address.base);
     }
 
     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
@@ -334,47 +358,18 @@ private:
         X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
     }
 
-#if USE(MASM_PROBE)
-    inline TrustedImm32 trustedImm32FromPtr(void* ptr)
-    {
-        return TrustedImm32(TrustedImmPtr(ptr));
-    }
-
-    inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
-    {
-        return TrustedImm32(TrustedImmPtr(reinterpret_cast(function)));
-    }
+private:
+    friend class LinkBuffer;
 
-    inline TrustedImm32 trustedImm32FromPtr(void (*function)())
+    static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        return TrustedImm32(TrustedImmPtr(reinterpret_cast(function)));
+        if (call.isFlagSet(Call::Tail))
+            X86Assembler::linkJump(code, call.m_label, function.value());
+        else
+            X86Assembler::linkCall(code, call.m_label, function.value());
     }
-#endif
 };
 
-#if USE(MASM_PROBE)
-
-extern "C" void ctiMasmProbeTrampoline();
-
-// For details on "What code is emitted for the probe?" and "What values are in
-// the saved registers?", see comment for MacroAssemblerX86::probe() in
-// MacroAssemblerX86_64.h.
-
-inline void MacroAssemblerX86::probe(MacroAssemblerX86::ProbeFunction function, void* arg1, void* arg2)
-{
-    push(RegisterID::esp);
-    push(RegisterID::eax);
-    push(trustedImm32FromPtr(arg2));
-    push(trustedImm32FromPtr(arg1));
-    push(trustedImm32FromPtr(function));
-
-    move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::eax);
-    call(RegisterID::eax);
-}
-#endif // USE(MASM_PROBE)
-
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
-
-#endif // MacroAssemblerX86_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
index 0fab05fb5..528c60fa5 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,55 +28,534 @@
 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
 #include "MacroAssemblerX86Common.h"
 
+#include 
+
 namespace JSC {
 
-#if USE(MASM_PROBE)
+#if ENABLE(MASM_PROBE)
+
+extern "C" void ctiMasmProbeTrampoline();
+
+#if COMPILER(GCC_OR_CLANG)
+
+// The following are offsets for MacroAssemblerX86Common::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
 
-void MacroAssemblerX86Common::ProbeContext::dumpCPURegisters(const char* indentation)
-{
 #if CPU(X86)
-    #define DUMP_GPREGISTER(_type, _regName) { \
-        int32_t value = reinterpret_cast(cpu._regName); \
-        dataLogF("%s    %6s: 0x%08x   %d\n", indentation, #_regName, value, value) ; \
-    }
-#elif CPU(X86_64)
-    #define DUMP_GPREGISTER(_type, _regName) { \
-        int64_t value = reinterpret_cast(cpu._regName); \
-        dataLogF("%s    %6s: 0x%016llx   %lld\n", indentation, #_regName, value, value) ; \
-    }
+#define PTR_SIZE 4
+#else // CPU(X86_64)
+#define PTR_SIZE 8
 #endif
-    FOR_EACH_CPU_GPREGISTER(DUMP_GPREGISTER)
-    FOR_EACH_CPU_SPECIAL_REGISTER(DUMP_GPREGISTER)
-    #undef DUMP_GPREGISTER
-
-    #define DUMP_FPREGISTER(_type, _regName) { \
-        uint32_t* u = reinterpret_cast(&cpu._regName); \
-        double* d = reinterpret_cast(&cpu._regName); \
-        dataLogF("%s    %6s: 0x%08x%08x 0x%08x%08x   %12g %12g\n", \
-            indentation, #_regName, u[3], u[2], u[1], u[0], d[1], d[0]); \
-    }
-    FOR_EACH_CPU_FPREGISTER(DUMP_FPREGISTER)
-    #undef DUMP_FPREGISTER
-}
 
-void MacroAssemblerX86Common::ProbeContext::dump(const char* indentation)
-{
-    if (!indentation)
-        indentation = "";
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
+
+#define PROBE_FIRST_GPR_OFFSET (3 * PTR_SIZE)
+#define PROBE_CPU_EAX_OFFSET (PROBE_FIRST_GPR_OFFSET + (0 * PTR_SIZE))
+#define PROBE_CPU_ECX_OFFSET (PROBE_FIRST_GPR_OFFSET + (1 * PTR_SIZE))
+#define PROBE_CPU_EDX_OFFSET (PROBE_FIRST_GPR_OFFSET + (2 * PTR_SIZE))
+#define PROBE_CPU_EBX_OFFSET (PROBE_FIRST_GPR_OFFSET + (3 * PTR_SIZE))
+#define PROBE_CPU_ESP_OFFSET (PROBE_FIRST_GPR_OFFSET + (4 * PTR_SIZE))
+#define PROBE_CPU_EBP_OFFSET (PROBE_FIRST_GPR_OFFSET + (5 * PTR_SIZE))
+#define PROBE_CPU_ESI_OFFSET (PROBE_FIRST_GPR_OFFSET + (6 * PTR_SIZE))
+#define PROBE_CPU_EDI_OFFSET (PROBE_FIRST_GPR_OFFSET + (7 * PTR_SIZE))
+
+#if CPU(X86)
+#define PROBE_FIRST_SPECIAL_OFFSET (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE))
+#else // CPU(X86_64)
+#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE))
+#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPR_OFFSET + (9 * PTR_SIZE))
+#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPR_OFFSET + (10 * PTR_SIZE))
+#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPR_OFFSET + (11 * PTR_SIZE))
+#define PROBE_CPU_R12_OFFSET (PROBE_FIRST_GPR_OFFSET + (12 * PTR_SIZE))
+#define PROBE_CPU_R13_OFFSET (PROBE_FIRST_GPR_OFFSET + (13 * PTR_SIZE))
+#define PROBE_CPU_R14_OFFSET (PROBE_FIRST_GPR_OFFSET + (14 * PTR_SIZE))
+#define PROBE_CPU_R15_OFFSET (PROBE_FIRST_GPR_OFFSET + (15 * PTR_SIZE))
+#define PROBE_FIRST_SPECIAL_OFFSET (PROBE_FIRST_GPR_OFFSET + (16 * PTR_SIZE))
+#endif // CPU(X86_64)
+
+#define PROBE_CPU_EIP_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (0 * PTR_SIZE))
+#define PROBE_CPU_EFLAGS_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (1 * PTR_SIZE))
+#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (2 * PTR_SIZE))
+
+#define XMM_SIZE 8
+#define PROBE_CPU_XMM0_OFFSET (PROBE_FIRST_XMM_OFFSET + (0 * XMM_SIZE))
+#define PROBE_CPU_XMM1_OFFSET (PROBE_FIRST_XMM_OFFSET + (1 * XMM_SIZE))
+#define PROBE_CPU_XMM2_OFFSET (PROBE_FIRST_XMM_OFFSET + (2 * XMM_SIZE))
+#define PROBE_CPU_XMM3_OFFSET (PROBE_FIRST_XMM_OFFSET + (3 * XMM_SIZE))
+#define PROBE_CPU_XMM4_OFFSET (PROBE_FIRST_XMM_OFFSET + (4 * XMM_SIZE))
+#define PROBE_CPU_XMM5_OFFSET (PROBE_FIRST_XMM_OFFSET + (5 * XMM_SIZE))
+#define PROBE_CPU_XMM6_OFFSET (PROBE_FIRST_XMM_OFFSET + (6 * XMM_SIZE))
+#define PROBE_CPU_XMM7_OFFSET (PROBE_FIRST_XMM_OFFSET + (7 * XMM_SIZE))
+
+#if CPU(X86)
+#define PROBE_SIZE (PROBE_CPU_XMM7_OFFSET + XMM_SIZE)
+#else // CPU(X86_64)
+#define PROBE_CPU_XMM8_OFFSET (PROBE_FIRST_XMM_OFFSET + (8 * XMM_SIZE))
+#define PROBE_CPU_XMM9_OFFSET (PROBE_FIRST_XMM_OFFSET + (9 * XMM_SIZE))
+#define PROBE_CPU_XMM10_OFFSET (PROBE_FIRST_XMM_OFFSET + (10 * XMM_SIZE))
+#define PROBE_CPU_XMM11_OFFSET (PROBE_FIRST_XMM_OFFSET + (11 * XMM_SIZE))
+#define PROBE_CPU_XMM12_OFFSET (PROBE_FIRST_XMM_OFFSET + (12 * XMM_SIZE))
+#define PROBE_CPU_XMM13_OFFSET (PROBE_FIRST_XMM_OFFSET + (13 * XMM_SIZE))
+#define PROBE_CPU_XMM14_OFFSET (PROBE_FIRST_XMM_OFFSET + (14 * XMM_SIZE))
+#define PROBE_CPU_XMM15_OFFSET (PROBE_FIRST_XMM_OFFSET + (15 * XMM_SIZE))
+#define PROBE_SIZE (PROBE_CPU_XMM15_OFFSET + XMM_SIZE)
+#endif // CPU(X86_64)
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerX86Common::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eax) == PROBE_CPU_EAX_OFFSET, ProbeContext_cpu_eax_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ecx) == PROBE_CPU_ECX_OFFSET, ProbeContext_cpu_ecx_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edx) == PROBE_CPU_EDX_OFFSET, ProbeContext_cpu_edx_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebx) == PROBE_CPU_EBX_OFFSET, ProbeContext_cpu_ebx_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esp) == PROBE_CPU_ESP_OFFSET, ProbeContext_cpu_esp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebp) == PROBE_CPU_EBP_OFFSET, ProbeContext_cpu_ebp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esi) == PROBE_CPU_ESI_OFFSET, ProbeContext_cpu_esi_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edi) == PROBE_CPU_EDI_OFFSET, ProbeContext_cpu_edi_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eip) == PROBE_CPU_EIP_OFFSET, ProbeContext_cpu_eip_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eflags) == PROBE_CPU_EFLAGS_OFFSET, ProbeContext_cpu_eflags_offset_matches_ctiMasmProbeTrampoline);
+
+#if CPU(X86_64)
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r12) == PROBE_CPU_R12_OFFSET, ProbeContext_cpu_r12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r13) == PROBE_CPU_R13_OFFSET, ProbeContext_cpu_r13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r14) == PROBE_CPU_R14_OFFSET, ProbeContext_cpu_r14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r15) == PROBE_CPU_R15_OFFSET, ProbeContext_cpu_r15_offset_matches_ctiMasmProbeTrampoline);
+#endif // CPU(X86_64)
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm0) == PROBE_CPU_XMM0_OFFSET, ProbeContext_cpu_xmm0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm1) == PROBE_CPU_XMM1_OFFSET, ProbeContext_cpu_xmm1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm2) == PROBE_CPU_XMM2_OFFSET, ProbeContext_cpu_xmm2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm3) == PROBE_CPU_XMM3_OFFSET, ProbeContext_cpu_xmm3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm4) == PROBE_CPU_XMM4_OFFSET, ProbeContext_cpu_xmm4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm5) == PROBE_CPU_XMM5_OFFSET, ProbeContext_cpu_xmm5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm6) == PROBE_CPU_XMM6_OFFSET, ProbeContext_cpu_xmm6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm7) == PROBE_CPU_XMM7_OFFSET, ProbeContext_cpu_xmm7_offset_matches_ctiMasmProbeTrampoline);
+
+#if CPU(X86_64)
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm8) == PROBE_CPU_XMM8_OFFSET, ProbeContext_cpu_xmm8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm9) == PROBE_CPU_XMM9_OFFSET, ProbeContext_cpu_xmm9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm10) == PROBE_CPU_XMM10_OFFSET, ProbeContext_cpu_xmm10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm11) == PROBE_CPU_XMM11_OFFSET, ProbeContext_cpu_xmm11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm12) == PROBE_CPU_XMM12_OFFSET, ProbeContext_cpu_xmm12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm13) == PROBE_CPU_XMM13_OFFSET, ProbeContext_cpu_xmm13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm14) == PROBE_CPU_XMM14_OFFSET, ProbeContext_cpu_xmm14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm15) == PROBE_CPU_XMM15_OFFSET, ProbeContext_cpu_xmm15_offset_matches_ctiMasmProbeTrampoline);
+#endif // CPU(X86_64)
+
+COMPILE_ASSERT(sizeof(MacroAssemblerX86Common::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+
+#undef PROBE_OFFSETOF
+
+#if CPU(X86)
+asm (
+    ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+    HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+    "pushfd" "\n"
+
+    // MacroAssemblerX86Common::probe() has already generated code to store some values.
+    // Together with the eflags pushed above, the top of stack now looks like
+    // this:
+    //     esp[0 * ptrSize]: eflags
+    //     esp[1 * ptrSize]: return address / saved eip
+    //     esp[2 * ptrSize]: probeFunction
+    //     esp[3 * ptrSize]: arg1
+    //     esp[4 * ptrSize]: arg2
+    //     esp[5 * ptrSize]: saved eax
+    //     esp[6 * ptrSize]: saved esp
+
+    "movl %esp, %eax" "\n"
+    "subl $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %esp" "\n"
+
+    // The X86_64 ABI specifies that the worse case stack alignment requirement
+    // is 32 bytes.
+    "andl $~0x1f, %esp" "\n"
+
+    "movl %ebp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%esp)" "\n"
+    "movl %esp, %ebp" "\n" // Save the ProbeContext*.
+
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp)" "\n"
+    "movl %edx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp)" "\n"
+    "movl %ebx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp)" "\n"
+    "movl %esi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp)" "\n"
+    "movl %edi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp)" "\n"
+
+    "movl 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp)" "\n"
+    "movl 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp)" "\n"
+    "movl 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n"
+    "movl 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%ebp)" "\n"
+    "movl 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%ebp)" "\n"
+    "movl 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp)" "\n"
+    "movl 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n"
+
+    "movq %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp)" "\n"
+    "movq %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp)" "\n"
+    "movq %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp)" "\n"
+    "movq %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp)" "\n"
+    "movq %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp)" "\n"
+    "movq %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp)" "\n"
+    "movq %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp)" "\n"
+    "movq %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp)" "\n"
+
+    // Reserve stack space for the arg while maintaining the required stack
+    // pointer 32 byte alignment:
+    "subl $0x20, %esp" "\n"
+    "movl %ebp, 0(%esp)" "\n" // the ProbeContext* arg.
+
+    "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n"
+
+    // To enable probes to modify register state, we copy all registers
+    // out of the ProbeContext before returning.
+
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp), %edx" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp), %ebx" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp), %esi" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp), %edi" "\n"
 
-    dataLogF("%sProbeContext %p {\n", indentation, this);
-    dataLogF("%s  probeFunction: %p\n", indentation, probeFunction);
-    dataLogF("%s  arg1: %p %llu\n", indentation, arg1, reinterpret_cast(arg1));
-    dataLogF("%s  arg2: %p %llu\n", indentation, arg2, reinterpret_cast(arg2));
-    dataLogF("%s  cpu: {\n", indentation);
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp), %xmm0" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp), %xmm1" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp), %xmm2" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp), %xmm3" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp), %xmm4" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp), %xmm5" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp), %xmm6" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp), %xmm7" "\n"
 
-    dumpCPURegisters(indentation);
+    // There are 6 more registers left to restore:
+    //     eax, ecx, ebp, esp, eip, and eflags.
+    // We need to handle these last few restores carefully because:
+    //
+    // 1. We need to push the return address on the stack for ret to use.
+    //    That means we need to write to the stack.
+    // 2. The user probe function may have altered the restore value of esp to
+    //    point to the vicinity of one of the restore values for the remaining
+    //    registers left to be restored.
+    //    That means, for requirement 1, we may end up writing over some of the
+    //    restore values. We can check for this, and first copy the restore
+    //    values to a "safe area" on the stack before commencing with the action
+    //    for requirement 1.
+    // 3. For requirement 2, we need to ensure that the "safe area" is
+    //    protected from interrupt handlers overwriting it. Hence, the esp needs
+    //    to be adjusted to include the "safe area" before we start copying the
+    //    the restore values.
 
-    dataLogF("%s  }\n", indentation);
-    dataLogF("%s}\n", indentation);
+    "movl %ebp, %eax" "\n"
+    "addl $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %eax" "\n"
+    "cmpl %eax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n"
+    "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+    // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new
+    // rsp will be. This time we don't have to 32-byte align it because we're
+    // not using to store any xmm regs.
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n"
+    "subl $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %eax" "\n"
+    "movl %eax, %esp" "\n"
+
+    "subl $" STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) ", %eax" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%eax)" "\n"
+    "movl %eax, %ebp" "\n"
+
+    SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n"
+    "subl $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %eax" "\n"
+    // At this point, %esp should be < %eax.
+
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+    "movl %eax, %esp" "\n"
+
+    "popfd" "\n"
+    "popl %eax" "\n"
+    "popl %ecx" "\n"
+    "popl %ebp" "\n"
+    "ret" "\n"
+);
+#endif // CPU(X86)
+
+#if CPU(X86_64)
+asm (
+    ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+    HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+    "pushfq" "\n"
+
+    // MacroAssemblerX86Common::probe() has already generated code to store some values.
+    // Together with the rflags pushed above, the top of stack now looks like
+    // this:
+    //     esp[0 * ptrSize]: rflags
+    //     esp[1 * ptrSize]: return address / saved rip
+    //     esp[2 * ptrSize]: probeFunction
+    //     esp[3 * ptrSize]: arg1
+    //     esp[4 * ptrSize]: arg2
+    //     esp[5 * ptrSize]: saved rax
+    //     esp[6 * ptrSize]: saved rsp
+
+    "movq %rsp, %rax" "\n"
+    "subq $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rsp" "\n"
+
+    // The X86_64 ABI specifies that the worse case stack alignment requirement
+    // is 32 bytes.
+    "andq $~0x1f, %rsp" "\n"
+
+    "movq %rbp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rsp)" "\n"
+    "movq %rsp, %rbp" "\n" // Save the ProbeContext*.
+
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp)" "\n"
+    "movq %rdx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp)" "\n"
+    "movq %rbx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp)" "\n"
+    "movq %rsi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp)" "\n"
+    "movq %rdi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp)" "\n"
+
+    "movq 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp)" "\n"
+    "movq 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp)" "\n"
+    "movq 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n"
+    "movq 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%rbp)" "\n"
+    "movq 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%rbp)" "\n"
+    "movq 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp)" "\n"
+    "movq 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n"
+
+    "movq %r8, " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp)" "\n"
+    "movq %r9, " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp)" "\n"
+    "movq %r10, " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp)" "\n"
+    "movq %r11, " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp)" "\n"
+    "movq %r12, " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp)" "\n"
+    "movq %r13, " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp)" "\n"
+    "movq %r14, " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp)" "\n"
+    "movq %r15, " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp)" "\n"
+
+    "movq %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp)" "\n"
+    "movq %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp)" "\n"
+    "movq %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp)" "\n"
+    "movq %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp)" "\n"
+    "movq %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp)" "\n"
+    "movq %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp)" "\n"
+    "movq %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp)" "\n"
+    "movq %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp)" "\n"
+    "movq %xmm8, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM8_OFFSET) "(%rbp)" "\n"
+    "movq %xmm9, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM9_OFFSET) "(%rbp)" "\n"
+    "movq %xmm10, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM10_OFFSET) "(%rbp)" "\n"
+    "movq %xmm11, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM11_OFFSET) "(%rbp)" "\n"
+    "movq %xmm12, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM12_OFFSET) "(%rbp)" "\n"
+    "movq %xmm13, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM13_OFFSET) "(%rbp)" "\n"
+    "movq %xmm14, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM14_OFFSET) "(%rbp)" "\n"
+    "movq %xmm15, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM15_OFFSET) "(%rbp)" "\n"
+
+    "movq %rbp, %rdi" "\n" // the ProbeContext* arg.
+    "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n"
+
+    // To enable probes to modify register state, we copy all registers
+    // out of the ProbeContext before returning.
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp), %rdx" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp), %rbx" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp), %rsi" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp), %rdi" "\n"
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp), %r8" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp), %r9" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp), %r10" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp), %r11" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp), %r12" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp), %r13" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp), %r14" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp), %r15" "\n"
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp), %xmm0" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp), %xmm1" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp), %xmm2" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp), %xmm3" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp), %xmm4" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp), %xmm5" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp), %xmm6" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp), %xmm7" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM8_OFFSET) "(%rbp), %xmm8" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM9_OFFSET) "(%rbp), %xmm9" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM10_OFFSET) "(%rbp), %xmm10" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM11_OFFSET) "(%rbp), %xmm11" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM12_OFFSET) "(%rbp), %xmm12" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM13_OFFSET) "(%rbp), %xmm13" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM14_OFFSET) "(%rbp), %xmm14" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM15_OFFSET) "(%rbp), %xmm15" "\n"
+
+    // There are 6 more registers left to restore:
+    //     rax, rcx, rbp, rsp, rip, and rflags.
+    // We need to handle these last few restores carefully because:
+    //
+    // 1. We need to push the return address on the stack for ret to use
+    //    That means we need to write to the stack.
+    // 2. The user probe function may have altered the restore value of esp to
+    //    point to the vicinity of one of the restore values for the remaining
+    //    registers left to be restored.
+    //    That means, for requirement 1, we may end up writing over some of the
+    //    restore values. We can check for this, and first copy the restore
+    //    values to a "safe area" on the stack before commencing with the action
+    //    for requirement 1.
+    // 3. For both requirement 2, we need to ensure that the "safe area" is
+    //    protected from interrupt handlers overwriting it. Hence, the esp needs
+    //    to be adjusted to include the "safe area" before we start copying the
+    //    the restore values.
+
+    "movq %rbp, %rax" "\n"
+    "addq $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %rax" "\n"
+    "cmpq %rax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n"
+    "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+    // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new
+    // rsp will be. This time we don't have to 32-byte align it because we're
+    // not using to store any xmm regs.
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n"
+    "subq $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rax" "\n"
+    "movq %rax, %rsp" "\n"
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rax)" "\n"
+    "movq %rax, %rbp" "\n"
+
+    SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n"
+    "subq $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %rax" "\n"
+    // At this point, %rsp should be < %rax.
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+    "movq %rax, %rsp" "\n"
+
+    "popfq" "\n"
+    "popq %rax" "\n"
+    "popq %rcx" "\n"
+    "popq %rbp" "\n"
+    "ret" "\n"
+);
+#endif // CPU(X86_64)
+
+#endif // COMPILER(GCC_OR_CLANG)
+
+// What code is emitted for the probe?
+// ==================================
+// We want to keep the size of the emitted probe invocation code as compact as
+// possible to minimize the perturbation to the JIT generated code. However,
+// we also need to preserve the CPU registers and set up the ProbeContext to be
+// passed to the user probe function.
+//
+// Hence, we do only the minimum here to preserve a scratch register (i.e. rax
+// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments.
+// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation
+// work i.e. saving the CPUState (and setting up the ProbeContext), calling the
+// user probe function, and restoring the CPUState before returning to JIT
+// generated code.
+//
+// What registers need to be saved?
+// ===============================
+// The registers are saved for 2 reasons:
+// 1. To preserve their state in the JITted code. This means that all registers
+//    that are not callee saved needs to be saved. We also need to save the
+//    condition code registers because the probe can be inserted between a test
+//    and a branch.
+// 2. To allow the probe to inspect the values of the registers for debugging
+//    purposes. This means all registers need to be saved.
+//
+// In summary, save everything. But for reasons stated above, we should do the
+// minimum here and let ctiMasmProbeTrampoline do the heavy lifting to save the
+// full set.
+//
+// What values are in the saved registers?
+// ======================================
+// Conceptually, the saved registers should contain values as if the probe
+// is not present in the JIT generated code. Hence, they should contain values
+// that are expected at the start of the instruction immediately following the
+// probe.
+//
+// Specifically, the saved stack pointer register will point to the stack
+// position before we push the ProbeContext frame. The saved rip will point to
+// the address of the instruction immediately following the probe. 
+
+void MacroAssemblerX86Common::probe(MacroAssemblerX86Common::ProbeFunction function, void* arg1, void* arg2)
+{
+    push(RegisterID::esp);
+    push(RegisterID::eax);
+    move(TrustedImmPtr(arg2), RegisterID::eax);
+    push(RegisterID::eax);
+    move(TrustedImmPtr(arg1), RegisterID::eax);
+    push(RegisterID::eax);
+    move(TrustedImmPtr(reinterpret_cast(function)), RegisterID::eax);
+    push(RegisterID::eax);
+    move(TrustedImmPtr(reinterpret_cast(ctiMasmProbeTrampoline)), RegisterID::eax);
+    call(RegisterID::eax);
 }
 
-#endif // USE(MASM_PROBE)
+#endif // ENABLE(MASM_PROBE)
+
+#if CPU(X86) && !OS(MAC_OS_X)
+MacroAssemblerX86Common::SSE2CheckState MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
+#endif
+
+MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_sse4_1CheckState = CPUIDCheckState::NotChecked;
+MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_avxCheckState = CPUIDCheckState::NotChecked;
+MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_lzcntCheckState = CPUIDCheckState::NotChecked;
+MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_bmi1CheckState = CPUIDCheckState::NotChecked;
 
 } // namespace JSC
 
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
index ac09eaca4..695e640f0 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2014-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,22 +23,35 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef MacroAssemblerX86Common_h
-#define MacroAssemblerX86Common_h
+#pragma once
 
 #if ENABLE(ASSEMBLER)
 
 #include "X86Assembler.h"
 #include "AbstractMacroAssembler.h"
+#include 
+
+#if COMPILER(MSVC)
+#include 
+#endif
 
 namespace JSC {
 
-class MacroAssemblerX86Common : public AbstractMacroAssembler {
+class MacroAssemblerX86Common : public AbstractMacroAssembler {
 public:
 #if CPU(X86_64)
-    static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
-#endif
+    // Use this directly only if you're not generating code with it.
+    static const X86Registers::RegisterID s_scratchRegister = X86Registers::r11;
 
+    // Use this when generating code so that we get enforcement of the disallowing of scratch register
+    // usage.
+    X86Registers::RegisterID scratchRegister()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return s_scratchRegister;
+    }
+#endif
+    
 protected:
     static const int DoubleConditionBitInvert = 0x10;
     static const int DoubleConditionBitSpecial = 0x20;
@@ -73,6 +86,7 @@ public:
         NonZero = X86Assembler::ConditionNE
     };
 
+    // FIXME: it would be neat to rename this to FloatingPointCondition in every assembler.
     enum DoubleCondition {
         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
         DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
@@ -117,6 +131,33 @@ public:
         m_assembler.addl_im(imm.m_value, address.offset, address.base);
     }
 
+    void add32(TrustedImm32 imm, BaseIndex address)
+    {
+        m_assembler.addl_im(imm.m_value, address.offset, address.base, address.index, address.scale);
+    }
+
+    void add8(TrustedImm32 imm, Address address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        m_assembler.addb_im(imm8.m_value, address.offset, address.base);
+    }
+
+    void add8(TrustedImm32 imm, BaseIndex address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        m_assembler.addb_im(imm8.m_value, address.offset, address.base, address.index, address.scale);
+    }
+
+    void add16(TrustedImm32 imm, Address address)
+    {
+        m_assembler.addw_im(imm.m_value, address.offset, address.base);
+    }
+
+    void add16(TrustedImm32 imm, BaseIndex address)
+    {
+        m_assembler.addw_im(imm.m_value, address.offset, address.base, address.index, address.scale);
+    }
+
     void add32(TrustedImm32 imm, RegisterID dest)
     {
         if (imm.m_value == 1)
@@ -135,11 +176,66 @@ public:
         m_assembler.addl_rm(src, dest.offset, dest.base);
     }
 
+    void add32(RegisterID src, BaseIndex dest)
+    {
+        m_assembler.addl_rm(src, dest.offset, dest.base, dest.index, dest.scale);
+    }
+
+    void add8(RegisterID src, Address dest)
+    {
+        m_assembler.addb_rm(src, dest.offset, dest.base);
+    }
+
+    void add8(RegisterID src, BaseIndex dest)
+    {
+        m_assembler.addb_rm(src, dest.offset, dest.base, dest.index, dest.scale);
+    }
+
+    void add16(RegisterID src, Address dest)
+    {
+        m_assembler.addw_rm(src, dest.offset, dest.base);
+    }
+
+    void add16(RegisterID src, BaseIndex dest)
+    {
+        m_assembler.addw_rm(src, dest.offset, dest.base, dest.index, dest.scale);
+    }
+
     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
+        if (!imm.m_value) {
+            zeroExtend32ToPtr(src, dest);
+            return;
+        }
+
+        if (src == dest) {
+            add32(imm, dest);
+            return;
+        }
+
         m_assembler.leal_mr(imm.m_value, src, dest);
     }
-    
+
+    void add32(RegisterID a, RegisterID b, RegisterID dest)
+    {
+        x86Lea32(BaseIndex(a, b, TimesOne), dest);
+    }
+
+    void x86Lea32(BaseIndex index, RegisterID dest)
+    {
+        if (!index.scale && !index.offset) {
+            if (index.base == dest) {
+                add32(index.index, dest);
+                return;
+            }
+            if (index.index == dest) {
+                add32(index.base, dest);
+                return;
+            }
+        }
+        m_assembler.leal_mr(index.offset, index.base, index.index, index.scale, dest);
+    }
+
     void and32(RegisterID src, RegisterID dest)
     {
         m_assembler.andl_rr(src, dest);
@@ -172,24 +268,77 @@ public:
         else if (op1 == dest)
             and32(op2, dest);
         else {
-            move(op2, dest);
+            move32IfNeeded(op2, dest);
+            and32(op1, dest);
+        }
+    }
+
+    void and32(Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            and32(op1, dest);
+        else if (op1.base == dest) {
+            load32(op1, dest);
+            and32(op2, dest);
+        } else {
+            zeroExtend32ToPtr(op2, dest);
             and32(op1, dest);
         }
     }
 
+    void and32(RegisterID op1, Address op2, RegisterID dest)
+    {
+        and32(op2, op1, dest);
+    }
+
     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
-        move(src, dest);
+        move32IfNeeded(src, dest);
         and32(imm, dest);
     }
 
-    void lshift32(RegisterID shift_amount, RegisterID dest)
+    void countLeadingZeros32(RegisterID src, RegisterID dst)
     {
-        ASSERT(shift_amount != dest);
+        if (supportsLZCNT()) {
+            m_assembler.lzcnt_rr(src, dst);
+            return;
+        }
+        m_assembler.bsr_rr(src, dst);
+        clz32AfterBsr(dst);
+    }
+
+    void countLeadingZeros32(Address src, RegisterID dst)
+    {
+        if (supportsLZCNT()) {
+            m_assembler.lzcnt_mr(src.offset, src.base, dst);
+            return;
+        }
+        m_assembler.bsr_mr(src.offset, src.base, dst);
+        clz32AfterBsr(dst);
+    }
+
+    void countTrailingZeros32(RegisterID src, RegisterID dst)
+    {
+        if (supportsBMI1()) {
+            m_assembler.tzcnt_rr(src, dst);
+            return;
+        }
+        m_assembler.bsf_rr(src, dst);
+        ctzAfterBsf<32>(dst);
+    }
 
+    // Only used for testing purposes.
+    void illegalInstruction()
+    {
+        m_assembler.illegalInstruction();
+    }
+    
+    void lshift32(RegisterID shift_amount, RegisterID dest)
+    {
         if (shift_amount == X86Registers::ecx)
             m_assembler.shll_CLr(dest);
         else {
+            ASSERT(shift_amount != dest);
             // On x86 we can only shift by ecx; if asked to shift by another register we'll
             // need rejig the shift amount into ecx first, and restore the registers afterwards.
             // If we dest is ecx, then shift the swapped register!
@@ -203,8 +352,7 @@ public:
     {
         ASSERT(shift_amount != dest);
 
-        if (src != dest)
-            move(src, dest);
+        move32IfNeeded(src, dest);
         lshift32(shift_amount, dest);
     }
 
@@ -215,8 +363,7 @@ public:
     
     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
-        if (src != dest)
-            move(src, dest);
+        move32IfNeeded(src, dest);
         lshift32(imm, dest);
     }
     
@@ -225,16 +372,80 @@ public:
         m_assembler.imull_rr(src, dest);
     }
 
+    void mul32(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src2 == dest) {
+            m_assembler.imull_rr(src1, dest);
+            return;
+        }
+        move32IfNeeded(src1, dest);
+        m_assembler.imull_rr(src2, dest);
+    }
+
     void mul32(Address src, RegisterID dest)
     {
         m_assembler.imull_mr(src.offset, src.base, dest);
     }
+
+    void mul32(Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            mul32(op1, dest);
+        else if (op1.base == dest) {
+            load32(op1, dest);
+            mul32(op2, dest);
+        } else {
+            zeroExtend32ToPtr(op2, dest);
+            mul32(op1, dest);
+        }
+    }
+
+    void mul32(RegisterID src1, Address src2, RegisterID dest)
+    {
+        mul32(src2, src1, dest);
+    }
     
     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
         m_assembler.imull_i32r(src, imm.m_value, dest);
     }
 
+    void x86ConvertToDoubleWord32()
+    {
+        m_assembler.cdq();
+    }
+
+    void x86ConvertToDoubleWord32(RegisterID eax, RegisterID edx)
+    {
+        ASSERT_UNUSED(eax, eax == X86Registers::eax);
+        ASSERT_UNUSED(edx, edx == X86Registers::edx);
+        x86ConvertToDoubleWord32();
+    }
+
+    void x86Div32(RegisterID denominator)
+    {
+        m_assembler.idivl_r(denominator);
+    }
+
+    void x86Div32(RegisterID eax, RegisterID edx, RegisterID denominator)
+    {
+        ASSERT_UNUSED(eax, eax == X86Registers::eax);
+        ASSERT_UNUSED(edx, edx == X86Registers::edx);
+        x86Div32(denominator);
+    }
+
+    void x86UDiv32(RegisterID denominator)
+    {
+        m_assembler.divl_r(denominator);
+    }
+
+    void x86UDiv32(RegisterID eax, RegisterID edx, RegisterID denominator)
+    {
+        ASSERT_UNUSED(eax, eax == X86Registers::eax);
+        ASSERT_UNUSED(edx, edx == X86Registers::edx);
+        x86UDiv32(denominator);
+    }
+
     void neg32(RegisterID srcDest)
     {
         m_assembler.negl_r(srcDest);
@@ -277,24 +488,42 @@ public:
         else if (op1 == dest)
             or32(op2, dest);
         else {
-            move(op2, dest);
+            move32IfNeeded(op2, dest);
+            or32(op1, dest);
+        }
+    }
+
+    void or32(Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            or32(op1, dest);
+        else if (op1.base == dest) {
+            load32(op1, dest);
+            or32(op2, dest);
+        } else {
+            zeroExtend32ToPtr(op2, dest);
             or32(op1, dest);
         }
     }
 
+    void or32(RegisterID op1, Address op2, RegisterID dest)
+    {
+        or32(op2, op1, dest);
+    }
+
     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
-        move(src, dest);
+        move32IfNeeded(src, dest);
         or32(imm, dest);
     }
 
     void rshift32(RegisterID shift_amount, RegisterID dest)
     {
-        ASSERT(shift_amount != dest);
-
         if (shift_amount == X86Registers::ecx)
             m_assembler.sarl_CLr(dest);
         else {
+            ASSERT(shift_amount != dest);
+            
             // On x86 we can only shift by ecx; if asked to shift by another register we'll
             // need rejig the shift amount into ecx first, and restore the registers afterwards.
             // If we dest is ecx, then shift the swapped register!
@@ -308,8 +537,7 @@ public:
     {
         ASSERT(shift_amount != dest);
 
-        if (src != dest)
-            move(src, dest);
+        move32IfNeeded(src, dest);
         rshift32(shift_amount, dest);
     }
 
@@ -320,18 +548,17 @@ public:
     
     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
-        if (src != dest)
-            move(src, dest);
+        move32IfNeeded(src, dest);
         rshift32(imm, dest);
     }
     
     void urshift32(RegisterID shift_amount, RegisterID dest)
     {
-        ASSERT(shift_amount != dest);
-
         if (shift_amount == X86Registers::ecx)
             m_assembler.shrl_CLr(dest);
         else {
+            ASSERT(shift_amount != dest);
+        
             // On x86 we can only shift by ecx; if asked to shift by another register we'll
             // need rejig the shift amount into ecx first, and restore the registers afterwards.
             // If we dest is ecx, then shift the swapped register!
@@ -345,8 +572,7 @@ public:
     {
         ASSERT(shift_amount != dest);
 
-        if (src != dest)
-            move(src, dest);
+        move32IfNeeded(src, dest);
         urshift32(shift_amount, dest);
     }
 
@@ -357,16 +583,64 @@ public:
     
     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
-        if (src != dest)
-            move(src, dest);
+        move32IfNeeded(src, dest);
         urshift32(imm, dest);
     }
-    
+
+    void rotateRight32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.rorl_i8r(imm.m_value, dest);
+    }
+
+    void rotateRight32(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.rorl_CLr(dest);
+        else {
+            ASSERT(src != dest);
+
+            // Can only rotate by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.rorl_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
+    void rotateLeft32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.roll_i8r(imm.m_value, dest);
+    }
+
+    void rotateLeft32(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.roll_CLr(dest);
+        else {
+            ASSERT(src != dest);
+
+            // Can only rotate by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.roll_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
     void sub32(RegisterID src, RegisterID dest)
     {
         m_assembler.subl_rr(src, dest);
     }
-    
+
+    void sub32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        if (dest == right) {
+            neg32(dest);
+            add32(left, dest);
+            return;
+        }
+        move(left, dest);
+        sub32(right, dest);
+    }
+
     void sub32(TrustedImm32 imm, RegisterID dest)
     {
         if (imm.m_value == 1)
@@ -406,9 +680,9 @@ public:
     void xor32(TrustedImm32 imm, RegisterID dest)
     {
         if (imm.m_value == -1)
-        m_assembler.notl_r(dest);
+            m_assembler.notl_r(dest);
         else
-        m_assembler.xorl_ir(imm.m_value, dest);
+            m_assembler.xorl_ir(imm.m_value, dest);
     }
 
     void xor32(RegisterID src, Address dest)
@@ -428,27 +702,70 @@ public:
         else if (op1 == dest)
             xor32(op2, dest);
         else {
-            move(op2, dest);
+            move32IfNeeded(op2, dest);
+            xor32(op1, dest);
+        }
+    }
+
+    void xor32(Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            xor32(op1, dest);
+        else if (op1.base == dest) {
+            load32(op1, dest);
+            xor32(op2, dest);
+        } else {
+            zeroExtend32ToPtr(op2, dest);
             xor32(op1, dest);
         }
     }
 
+    void xor32(RegisterID op1, Address op2, RegisterID dest)
+    {
+        xor32(op2, op1, dest);
+    }
+
     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
-        move(src, dest);
+        move32IfNeeded(src, dest);
         xor32(imm, dest);
     }
 
+    void not32(RegisterID srcDest)
+    {
+        m_assembler.notl_r(srcDest);
+    }
+
+    void not32(Address dest)
+    {
+        m_assembler.notl_m(dest.offset, dest.base);
+    }
+
     void sqrtDouble(FPRegisterID src, FPRegisterID dst)
     {
         m_assembler.sqrtsd_rr(src, dst);
     }
 
+    void sqrtDouble(Address src, FPRegisterID dst)
+    {
+        m_assembler.sqrtsd_mr(src.offset, src.base, dst);
+    }
+
+    void sqrtFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.sqrtss_rr(src, dst);
+    }
+
+    void sqrtFloat(Address src, FPRegisterID dst)
+    {
+        m_assembler.sqrtss_mr(src.offset, src.base, dst);
+    }
+
     void absDouble(FPRegisterID src, FPRegisterID dst)
     {
         ASSERT(src != dst);
         static const double negativeZeroConstant = -0.0;
-        loadDouble(&negativeZeroConstant, dst);
+        loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
         m_assembler.andnpd_rr(src, dst);
     }
 
@@ -456,10 +773,79 @@ public:
     {
         ASSERT(src != dst);
         static const double negativeZeroConstant = -0.0;
-        loadDouble(&negativeZeroConstant, dst);
+        loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
         m_assembler.xorpd_rr(src, dst);
     }
 
+    void ceilDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti);
+    }
+
+    void ceilDouble(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti);
+    }
+
+    void ceilFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti);
+    }
+
+    void ceilFloat(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti);
+    }
+
+    void floorDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
+    }
+
+    void floorDouble(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
+    }
+
+    void floorFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
+    }
+
+    void floorFloat(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
+    }
+
+    void roundTowardNearestIntDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::ToNearestWithTiesToEven);
+    }
+
+    void roundTowardNearestIntFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::ToNearestWithTiesToEven);
+    }
+
+    void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardZero);
+    }
+
+    void roundTowardZeroDouble(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardZero);
+    }
+
+    void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardZero);
+    }
+
+    void roundTowardZeroFloat(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardZero);
+    }
 
     // Memory access operations:
     //
@@ -525,15 +911,25 @@ public:
         m_assembler.movzbl_mr(address.offset, address.base, dest);
     }
     
-    void load8Signed(BaseIndex address, RegisterID dest)
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
         m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
     }
 
-    void load8Signed(ImplicitAddress address, RegisterID dest)
+    void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
     {
         m_assembler.movsbl_mr(address.offset, address.base, dest);
     }
+
+    void zeroExtend8To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movzbl_rr(src, dest);
+    }
+    
+    void signExtend8To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movsbl_rr(src, dest);
+    }
     
     void load16(BaseIndex address, RegisterID dest)
     {
@@ -545,16 +941,26 @@ public:
         m_assembler.movzwl_mr(address.offset, address.base, dest);
     }
 
-    void load16Signed(BaseIndex address, RegisterID dest)
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
         m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
     }
     
-    void load16Signed(Address address, RegisterID dest)
+    void load16SignedExtendTo32(Address address, RegisterID dest)
     {
         m_assembler.movswl_mr(address.offset, address.base, dest);
     }
 
+    void zeroExtend16To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movzwl_rr(src, dest);
+    }
+    
+    void signExtend16To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movswl_rr(src, dest);
+    }
+    
     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
     {
         padBeforePatch();
@@ -582,16 +988,26 @@ public:
         m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
     }
 
+    void storeZero32(ImplicitAddress address)
+    {
+        store32(TrustedImm32(0), address);
+    }
+
+    void storeZero32(BaseIndex address)
+    {
+        store32(TrustedImm32(0), address);
+    }
+
     void store8(TrustedImm32 imm, Address address)
     {
-        ASSERT(-128 <= imm.m_value && imm.m_value < 128);
-        m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        m_assembler.movb_i8m(imm8.m_value, address.offset, address.base);
     }
 
     void store8(TrustedImm32 imm, BaseIndex address)
     {
-        ASSERT(-128 <= imm.m_value && imm.m_value < 128);
-        m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        m_assembler.movb_i8m(imm8.m_value, address.offset, address.base, address.index, address.scale);
     }
 
     static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address)
@@ -672,26 +1088,45 @@ public:
         m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
     }
 
-
-    // Floating-point operation:
-    //
-    // Presently only supports SSE, not x87 floating point.
-
-    void moveDouble(FPRegisterID src, FPRegisterID dest)
+    void store16(RegisterID src, Address address)
     {
-        ASSERT(isSSE2Present());
+#if CPU(X86)
+        // On 32-bit x86 we can only store from the first 4 registers;
+        // esp..edi are mapped to the 'h' registers!
+        if (src >= 4) {
+            // Pick a temporary register.
+            RegisterID temp = getUnusedRegister(address);
+
+            // Swap to the temporary register to perform the store.
+            swap(src, temp);
+            m_assembler.movw_rm(temp, address.offset, address.base);
+            swap(src, temp);
+            return;
+        }
+#endif
+        m_assembler.movw_rm(src, address.offset, address.base);
+    }
+
+
+    // Floating-point operation:
+    //
+    // Presently only supports SSE, not x87 floating point.
+
+    void moveDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
         if (src != dest)
-            m_assembler.movsd_rr(src, dest);
+            m_assembler.movaps_rr(src, dest);
     }
 
-    void loadDouble(const void* address, FPRegisterID dest)
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
     {
 #if CPU(X86)
         ASSERT(isSSE2Present());
-        m_assembler.movsd_mr(address, dest);
+        m_assembler.movsd_mr(address.m_value, dest);
 #else
-        move(TrustedImmPtr(address), scratchRegister);
-        loadDouble(scratchRegister, dest);
+        move(address, scratchRegister());
+        loadDouble(scratchRegister(), dest);
 #endif
     }
 
@@ -700,12 +1135,19 @@ public:
         ASSERT(isSSE2Present());
         m_assembler.movsd_mr(address.offset, address.base, dest);
     }
-    
+
     void loadDouble(BaseIndex address, FPRegisterID dest)
     {
         ASSERT(isSSE2Present());
         m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
     }
+
+    void loadFloat(ImplicitAddress address, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movss_mr(address.offset, address.base, dest);
+    }
+
     void loadFloat(BaseIndex address, FPRegisterID dest)
     {
         ASSERT(isSSE2Present());
@@ -723,7 +1165,13 @@ public:
         ASSERT(isSSE2Present());
         m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
     }
-    
+
+    void storeFloat(FPRegisterID src, ImplicitAddress address)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movss_rm(src, address.offset, address.base);
+    }
+
     void storeFloat(FPRegisterID src, BaseIndex address)
     {
         ASSERT(isSSE2Present());
@@ -736,33 +1184,144 @@ public:
         m_assembler.cvtsd2ss_rr(src, dst);
     }
 
+    void convertDoubleToFloat(Address address, FPRegisterID dst)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsd2ss_mr(address.offset, address.base, dst);
+    }
+
     void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
     {
         ASSERT(isSSE2Present());
         m_assembler.cvtss2sd_rr(src, dst);
     }
 
-    void addDouble(FPRegisterID src, FPRegisterID dest)
+    void convertFloatToDouble(Address address, FPRegisterID dst)
     {
         ASSERT(isSSE2Present());
-        m_assembler.addsd_rr(src, dest);
+        m_assembler.cvtss2sd_mr(address.offset, address.base, dst);
+    }
+
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        addDouble(src, dest, dest);
     }
 
     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
     {
-        ASSERT(isSSE2Present());
-        if (op1 == dest)
-            addDouble(op2, dest);
+        if (supportsAVX())
+            m_assembler.vaddsd_rr(op1, op2, dest);
         else {
-            moveDouble(op2, dest);
-            addDouble(op1, dest);
+            ASSERT(isSSE2Present());
+            if (op1 == dest)
+                m_assembler.addsd_rr(op2, dest);
+            else {
+                moveDouble(op2, dest);
+                m_assembler.addsd_rr(op1, dest);
+            }
         }
     }
 
     void addDouble(Address src, FPRegisterID dest)
     {
-        ASSERT(isSSE2Present());
-        m_assembler.addsd_mr(src.offset, src.base, dest);
+        addDouble(src, dest, dest);
+    }
+
+    void addDouble(Address op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddsd_mr(op1.offset, op1.base, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.addsd_mr(op1.offset, op1.base, dest);
+                return;
+            }
+
+            loadDouble(op1, dest);
+            addDouble(op2, dest);
+        }
+    }
+
+    void addDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        addDouble(op2, op1, dest);
+    }
+
+    void addDouble(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddsd_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.addsd_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
+                return;
+            }
+            loadDouble(op1, dest);
+            addDouble(op2, dest);
+        }
+    }
+
+    void addFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        addFloat(src, dest, dest);
+    }
+
+    void addFloat(Address src, FPRegisterID dest)
+    {
+        addFloat(src, dest, dest);
+    }
+
+    void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddss_rr(op1, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op1 == dest)
+                m_assembler.addss_rr(op2, dest);
+            else {
+                moveDouble(op2, dest);
+                m_assembler.addss_rr(op1, dest);
+            }
+        }
+    }
+
+    void addFloat(Address op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddss_mr(op1.offset, op1.base, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.addss_mr(op1.offset, op1.base, dest);
+                return;
+            }
+
+            loadFloat(op1, dest);
+            addFloat(op2, dest);
+        }
+    }
+
+    void addFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        addFloat(op2, op1, dest);
+    }
+
+    void addFloat(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddss_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.addss_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
+                return;
+            }
+            loadFloat(op1, dest);
+            addFloat(op2, dest);
+        }
     }
 
     void divDouble(FPRegisterID src, FPRegisterID dest)
@@ -786,48 +1345,314 @@ public:
         m_assembler.divsd_mr(src.offset, src.base, dest);
     }
 
-    void subDouble(FPRegisterID src, FPRegisterID dest)
+    void divFloat(FPRegisterID src, FPRegisterID dest)
     {
         ASSERT(isSSE2Present());
-        m_assembler.subsd_rr(src, dest);
+        m_assembler.divss_rr(src, dest);
+    }
+
+    void divFloat(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.divss_mr(src.offset, src.base, dest);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        subDouble(dest, src, dest);
     }
 
     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
     {
-        // B := A - B is invalid.
-        ASSERT(op1 == dest || op2 != dest);
+        if (supportsAVX())
+            m_assembler.vsubsd_rr(op1, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
 
-        moveDouble(op1, dest);
-        subDouble(op2, dest);
+            // B := A - B is invalid.
+            ASSERT(op1 == dest || op2 != dest);
+            moveDouble(op1, dest);
+            m_assembler.subsd_rr(op2, dest);
+        }
+    }
+
+    void subDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubsd_mr(op1, op2.offset, op2.base, dest);
+        else {
+            moveDouble(op1, dest);
+            m_assembler.subsd_mr(op2.offset, op2.base, dest);
+        }
+    }
+
+    void subDouble(FPRegisterID op1, BaseIndex op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubsd_mr(op1, op2.offset, op2.base, op2.index, op2.scale, dest);
+        else {
+            moveDouble(op1, dest);
+            m_assembler.subsd_mr(op2.offset, op2.base, op2.index, op2.scale, dest);
+        }
     }
 
     void subDouble(Address src, FPRegisterID dest)
     {
-        ASSERT(isSSE2Present());
-        m_assembler.subsd_mr(src.offset, src.base, dest);
+        subDouble(dest, src, dest);
+    }
+
+    void subFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        subFloat(dest, src, dest);
+    }
+
+    void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubss_rr(op1, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            // B := A - B is invalid.
+            ASSERT(op1 == dest || op2 != dest);
+            moveDouble(op1, dest);
+            m_assembler.subss_rr(op2, dest);
+        }
+    }
+
+    void subFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubss_mr(op1, op2.offset, op2.base, dest);
+        else {
+            moveDouble(op1, dest);
+            m_assembler.subss_mr(op2.offset, op2.base, dest);
+        }
+    }
+
+    void subFloat(FPRegisterID op1, BaseIndex op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubss_mr(op1, op2.offset, op2.base, op2.index, op2.scale, dest);
+        else {
+            moveDouble(op1, dest);
+            m_assembler.subss_mr(op2.offset, op2.base, op2.index, op2.scale, dest);
+        }
+    }
+
+    void subFloat(Address src, FPRegisterID dest)
+    {
+        subFloat(dest, src, dest);
     }
 
     void mulDouble(FPRegisterID src, FPRegisterID dest)
     {
-        ASSERT(isSSE2Present());
-        m_assembler.mulsd_rr(src, dest);
+        mulDouble(src, dest, dest);
     }
 
     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
     {
-        ASSERT(isSSE2Present());
-        if (op1 == dest)
-            mulDouble(op2, dest);
+        if (supportsAVX())
+            m_assembler.vmulsd_rr(op1, op2, dest);
         else {
-            moveDouble(op2, dest);
-            mulDouble(op1, dest);
+            ASSERT(isSSE2Present());
+            if (op1 == dest)
+                m_assembler.mulsd_rr(op2, dest);
+            else {
+                moveDouble(op2, dest);
+                m_assembler.mulsd_rr(op1, dest);
+            }
         }
     }
 
     void mulDouble(Address src, FPRegisterID dest)
     {
-        ASSERT(isSSE2Present());
-        m_assembler.mulsd_mr(src.offset, src.base, dest);
+        mulDouble(src, dest, dest);
+    }
+
+    void mulDouble(Address op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulsd_mr(op1.offset, op1.base, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.mulsd_mr(op1.offset, op1.base, dest);
+                return;
+            }
+            loadDouble(op1, dest);
+            mulDouble(op2, dest);
+        }
+    }
+
+    void mulDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        return mulDouble(op2, op1, dest);
+    }
+
+    void mulDouble(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulsd_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.mulsd_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
+                return;
+            }
+            loadDouble(op1, dest);
+            mulDouble(op2, dest);
+        }
+    }
+
+    void mulFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        mulFloat(src, dest, dest);
+    }
+
+    void mulFloat(Address src, FPRegisterID dest)
+    {
+        mulFloat(src, dest, dest);
+    }
+
+    void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulss_rr(op1, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op1 == dest)
+                m_assembler.mulss_rr(op2, dest);
+            else {
+                moveDouble(op2, dest);
+                m_assembler.mulss_rr(op1, dest);
+            }
+        }
+    }
+
+    void mulFloat(Address op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulss_mr(op1.offset, op1.base, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.mulss_mr(op1.offset, op1.base, dest);
+                return;
+            }
+            loadFloat(op1, dest);
+            mulFloat(op2, dest);
+        }
+    }
+
+    void mulFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        mulFloat(op2, op1, dest);
+    }
+
+    void mulFloat(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulss_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.mulss_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
+                return;
+            }
+            loadFloat(op1, dest);
+            mulFloat(op2, dest);
+        }
+    }
+
+    void andDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        // ANDPS is defined on 128bits and is shorter than ANDPD.
+        m_assembler.andps_rr(src, dst);
+    }
+
+    void andDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
+    {
+        if (src1 == dst)
+            andDouble(src2, dst);
+        else {
+            moveDouble(src2, dst);
+            andDouble(src1, dst);
+        }
+    }
+
+    void andFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.andps_rr(src, dst);
+    }
+
+    void andFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
+    {
+        if (src1 == dst)
+            andFloat(src2, dst);
+        else {
+            moveDouble(src2, dst);
+            andFloat(src1, dst);
+        }
+    }
+
+    void orDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.orps_rr(src, dst);
+    }
+
+    void orDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
+    {
+        if (src1 == dst)
+            orDouble(src2, dst);
+        else {
+            moveDouble(src2, dst);
+            orDouble(src1, dst);
+        }
+    }
+
+    void orFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.orps_rr(src, dst);
+    }
+
+    void orFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
+    {
+        if (src1 == dst)
+            orFloat(src2, dst);
+        else {
+            moveDouble(src2, dst);
+            orFloat(src1, dst);
+        }
+    }
+
+    void xorDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.xorps_rr(src, dst);
+    }
+
+    void xorDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
+    {
+        if (src1 == dst)
+            xorDouble(src2, dst);
+        else {
+            moveDouble(src2, dst);
+            xorDouble(src1, dst);
+        }
+    }
+
+    void xorFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.xorps_rr(src, dst);
+    }
+
+    void xorFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
+    {
+        if (src1 == dst)
+            xorFloat(src2, dst);
+        else {
+            moveDouble(src2, dst);
+            xorFloat(src1, dst);
+        }
     }
 
     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
@@ -842,6 +1667,18 @@ public:
         m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
     }
 
+    void convertInt32ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsi2ss_rr(src, dest);
+    }
+
+    void convertInt32ToFloat(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsi2ss_mr(src.offset, src.base, dest);
+    }
+
     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
     {
         ASSERT(isSSE2Present());
@@ -850,27 +1687,18 @@ public:
             m_assembler.ucomisd_rr(left, right);
         else
             m_assembler.ucomisd_rr(right, left);
+        return jumpAfterFloatingPointCompare(cond, left, right);
+    }
 
-        if (cond == DoubleEqual) {
-            if (left == right)
-                return Jump(m_assembler.jnp());
-            Jump isUnordered(m_assembler.jp());
-            Jump result = Jump(m_assembler.je());
-            isUnordered.link(this);
-            return result;
-        } else if (cond == DoubleNotEqualOrUnordered) {
-            if (left == right)
-                return Jump(m_assembler.jp());
-            Jump isUnordered(m_assembler.jp());
-            Jump isEqual(m_assembler.je());
-            isUnordered.link(this);
-            Jump result = jump();
-            isEqual.link(this);
-            return result;
-        }
+    Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        ASSERT(isSSE2Present());
 
-        ASSERT(!(cond & DoubleConditionBitSpecial));
-        return Jump(m_assembler.jCC(static_cast(cond & ~DoubleConditionBits)));
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomiss_rr(left, right);
+        else
+            m_assembler.ucomiss_rr(right, left);
+        return jumpAfterFloatingPointCompare(cond, left, right);
     }
 
     // Truncates 'src' to an integer, and places the resulting 'dest'.
@@ -890,15 +1718,13 @@ public:
         ASSERT(isSSE2Present());
         m_assembler.cvttsd2si_rr(src, dest);
     }
-    
-#if CPU(X86_64)
-    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+
+    void truncateFloatToInt32(FPRegisterID src, RegisterID dest)
     {
         ASSERT(isSSE2Present());
-        m_assembler.cvttsd2siq_rr(src, dest);
+        m_assembler.cvttss2si_rr(src, dest);
     }
-#endif
-    
+
     // Convert 'src' to an integer, and places the resulting 'dest'.
     // If the result is not representable as a 32 bit value, branch.
     // May also branch for some values that are representable in 32 bits
@@ -909,8 +1735,17 @@ public:
         m_assembler.cvttsd2si_rr(src, dest);
 
         // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+#if CPU(X86_64)
+        if (negZeroCheck) {
+            Jump valueIsNonZero = branchTest32(NonZero, dest);
+            m_assembler.movmskpd_rr(src, scratchRegister());
+            failureCases.append(branchTest32(NonZero, scratchRegister(), TrustedImm32(1)));
+            valueIsNonZero.link(this);
+        }
+#else
         if (negZeroCheck)
             failureCases.append(branchTest32(Zero, dest));
+#endif
 
         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
         convertInt32ToDouble(dest, fpTemp);
@@ -919,6 +1754,11 @@ public:
         failureCases.append(m_assembler.jne());
     }
 
+    void moveZeroToDouble(FPRegisterID reg)
+    {
+        m_assembler.xorps_rr(reg, reg);
+    }
+
     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
     {
         ASSERT(isSSE2Present());
@@ -951,13 +1791,13 @@ public:
         m_assembler.por_rr(src, dst);
     }
 
-    void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
+    void move32ToFloat(RegisterID src, XMMRegisterID dst)
     {
         ASSERT(isSSE2Present());
         m_assembler.movd_rr(src, dst);
     }
 
-    void movePackedToInt32(XMMRegisterID src, RegisterID dst)
+    void moveFloatTo32(XMMRegisterID src, RegisterID dst)
     {
         ASSERT(isSSE2Present());
         m_assembler.movd_rr(src, dst);
@@ -976,99 +1816,411 @@ public:
         m_assembler.pop_r(dest);
     }
 
-    void push(RegisterID src)
-    {
-        m_assembler.push_r(src);
+    void push(RegisterID src)
+    {
+        m_assembler.push_r(src);
+    }
+
+    void push(Address address)
+    {
+        m_assembler.push_m(address.offset, address.base);
+    }
+
+    void push(TrustedImm32 imm)
+    {
+        m_assembler.push_i32(imm.m_value);
+    }
+
+
+    // Register move operations:
+    //
+    // Move values in registers.
+
+    void move(TrustedImm32 imm, RegisterID dest)
+    {
+        // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
+        // may be useful to have a separate version that sign extends the value?
+        if (!imm.m_value)
+            m_assembler.xorl_rr(dest, dest);
+        else
+            m_assembler.movl_i32r(imm.m_value, dest);
+    }
+
+#if CPU(X86_64)
+    void move(RegisterID src, RegisterID dest)
+    {
+        // Note: on 64-bit this is is a full register move; perhaps it would be
+        // useful to have separate move32 & movePtr, with move32 zero extending?
+        if (src != dest)
+            m_assembler.movq_rr(src, dest);
+    }
+
+    void move(TrustedImmPtr imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            m_assembler.xorq_rr(dest, dest);
+        else
+            m_assembler.movq_i64r(imm.asIntptr(), dest);
+    }
+
+    void move(TrustedImm64 imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            m_assembler.xorq_rr(dest, dest);
+        else
+            m_assembler.movq_i64r(imm.m_value, dest);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+        moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        RegisterID src;
+        if (elseCase == dest)
+            src = thenCase;
+        else {
+            cond = invert(cond);
+            src = elseCase;
+        }
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+        moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
+    }
+
+    void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomiss_rr(left, right);
+        else
+            m_assembler.ucomiss_rr(right, left);
+        moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
+    }
+
+    void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        RegisterID src;
+        if (elseCase == dest)
+            src = thenCase;
+        else {
+            cond = invert(cond);
+            src = elseCase;
+        }
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomiss_rr(left, right);
+        else
+            m_assembler.ucomiss_rr(right, left);
+        moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
+    }
+    
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        if (reg1 != reg2)
+            m_assembler.xchgq_rr(reg1, reg2);
+    }
+
+    void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            m_assembler.xorq_rr(dest, dest);
+        else
+            m_assembler.mov_i32r(imm.m_value, dest);
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movsxd_rr(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movl_rr(src, dest);
+    }
+
+    void zeroExtend32ToPtr(TrustedImm32 src, RegisterID dest)
+    {
+        m_assembler.movl_i32r(src.m_value, dest);
+    }
+#else
+    void move(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            m_assembler.movl_rr(src, dest);
+    }
+
+    void move(TrustedImmPtr imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            m_assembler.xorl_rr(dest, dest);
+        else
+            m_assembler.movl_i32r(imm.asIntptr(), dest);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+
+        if (cond == DoubleEqual) {
+            if (left == right) {
+                m_assembler.cmovnpl_rr(src, dest);
+                return;
+            }
+
+            Jump isUnordered(m_assembler.jp());
+            m_assembler.cmovel_rr(src, dest);
+            isUnordered.link(this);
+            return;
+        }
+
+        if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right) {
+                m_assembler.cmovpl_rr(src, dest);
+                return;
+            }
+
+            m_assembler.cmovpl_rr(src, dest);
+            m_assembler.cmovnel_rr(src, dest);
+            return;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        m_assembler.cmovl_rr(static_cast(cond & ~DoubleConditionBits), src, dest);
     }
 
-    void push(Address address)
+    void swap(RegisterID reg1, RegisterID reg2)
     {
-        m_assembler.push_m(address.offset, address.base);
+        if (reg1 != reg2)
+            m_assembler.xchgl_rr(reg1, reg2);
     }
 
-    void push(TrustedImm32 imm)
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
     {
-        m_assembler.push_i32(imm.m_value);
+        move(src, dest);
     }
 
-
-    // Register move operations:
-    //
-    // Move values in registers.
-
-    void move(TrustedImm32 imm, RegisterID dest)
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
     {
-        // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
-        // may be useful to have a separate version that sign extends the value?
-        if (!imm.m_value)
-            m_assembler.xorl_rr(dest, dest);
-        else
-            m_assembler.movl_i32r(imm.m_value, dest);
+        move(src, dest);
     }
+#endif
 
-#if CPU(X86_64)
-    void move(RegisterID src, RegisterID dest)
+    void swap32(RegisterID src, RegisterID dest)
     {
-        // Note: on 64-bit this is is a full register move; perhaps it would be
-        // useful to have separate move32 & movePtr, with move32 zero extending?
-        if (src != dest)
-            m_assembler.movq_rr(src, dest);
+        m_assembler.xchgl_rr(src, dest);
     }
 
-    void move(TrustedImmPtr imm, RegisterID dest)
+    void swap32(RegisterID src, Address dest)
     {
-        m_assembler.movq_i64r(imm.asIntptr(), dest);
+        m_assembler.xchgl_rm(src, dest.offset, dest.base);
     }
 
-    void move(TrustedImm64 imm, RegisterID dest)
+    void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
     {
-        m_assembler.movq_i64r(imm.m_value, dest);
+        m_assembler.cmpl_rr(right, left);
+        cmov(x86Condition(cond), src, dest);
     }
 
-    void swap(RegisterID reg1, RegisterID reg2)
+    void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
     {
-        if (reg1 != reg2)
-            m_assembler.xchgq_rr(reg1, reg2);
+        m_assembler.cmpl_rr(right, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
     }
 
-    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
     {
-        m_assembler.movsxd_rr(src, dest);
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        m_assembler.cmpl_ir(right.m_value, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
     }
 
-    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
     {
-        m_assembler.movl_rr(src, dest);
+        m_assembler.testl_rr(testReg, mask);
+        cmov(x86Condition(cond), src, dest);
     }
-#else
-    void move(RegisterID src, RegisterID dest)
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
     {
-        if (src != dest)
-            m_assembler.movl_rr(src, dest);
+        ASSERT(isInvertible(cond));
+        ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
+
+        m_assembler.testl_rr(right, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
     }
 
-    void move(TrustedImmPtr imm, RegisterID dest)
+    void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest)
     {
-        m_assembler.movl_i32r(imm.asIntptr(), dest);
+        test32(testReg, mask);
+        cmov(x86Condition(cond), src, dest);
     }
 
-    void swap(RegisterID reg1, RegisterID reg2)
+    void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
     {
-        if (reg1 != reg2)
-            m_assembler.xchgl_rr(reg1, reg2);
+        ASSERT(isInvertible(cond));
+        ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
+
+        test32(testReg, mask);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
     }
 
-    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    template
+    void moveDoubleConditionally32(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
     {
-        move(src, dest);
+        static_assert(!std::is_same::value && !std::is_same::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
+
+        if (thenCase != dest && elseCase != dest) {
+            moveDouble(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest) {
+            Jump falseCase = branch32(invert(cond), left, right);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else {
+            Jump trueCase = branch32(cond, left, right);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        }
     }
 
-    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    template
+    void moveDoubleConditionallyTest32(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
     {
-        move(src, dest);
+        static_assert(!std::is_same::value && !std::is_same::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
+
+        if (elseCase == dest && isInvertible(cond)) {
+            Jump falseCase = branchTest32(invert(cond), test, mask);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else if (thenCase == dest) {
+            Jump trueCase = branchTest32(cond, test, mask);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        }
+
+        Jump trueCase = branchTest32(cond, test, mask);
+        moveDouble(elseCase, dest);
+        Jump falseCase = jump();
+        trueCase.link(this);
+        moveDouble(thenCase, dest);
+        falseCase.link(this);
+    }
+
+    void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        if (elseCase == dest) {
+            Jump falseCase = branchDouble(invert(cond), left, right);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else if (thenCase == dest) {
+            Jump trueCase = branchDouble(cond, left, right);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        } else {
+            Jump trueCase = branchDouble(cond, left, right);
+            moveDouble(elseCase, dest);
+            Jump falseCase = jump();
+            trueCase.link(this);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        }
     }
-#endif
 
+    void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        if (elseCase == dest) {
+            Jump falseCase = branchFloat(invert(cond), left, right);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else if (thenCase == dest) {
+            Jump trueCase = branchFloat(cond, left, right);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        } else {
+            Jump trueCase = branchFloat(cond, left, right);
+            moveDouble(elseCase, dest);
+            Jump falseCase = jump();
+            trueCase.link(this);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        }
+    }
 
     // Forwards / external control flow operations:
     //
@@ -1091,7 +2243,8 @@ public:
 public:
     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
     {
-        m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+        TrustedImm32 right8(static_cast(right.m_value));
+        m_assembler.cmpb_im(right8.m_value, left.offset, left.base);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
@@ -1103,10 +2256,12 @@ public:
 
     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
     {
-        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
-            m_assembler.testl_rr(left, left);
-        else
-            m_assembler.cmpl_ir(right.m_value, left);
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+                return branchTest32(*resultCondition, left, left);
+        }
+
+        m_assembler.cmpl_ir(right.m_value, left);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
     
@@ -1149,9 +2304,12 @@ public:
     {
         if (mask.m_value == -1)
             m_assembler.testl_rr(reg, reg);
-        else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
-            m_assembler.testb_i8r(mask.m_value, reg);
-        else
+        else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
+            if (mask.m_value == 0xff)
+                m_assembler.testb_rr(reg, reg);
+            else
+                m_assembler.testb_i8r(mask.m_value, reg);
+        } else
             m_assembler.testl_i32r(mask.m_value, reg);
     }
 
@@ -1183,31 +2341,28 @@ public:
     
     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
-        ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
-        if (mask.m_value == -1)
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        if (mask8.m_value == -1)
             m_assembler.cmpb_im(0, address.offset, address.base);
         else
-            m_assembler.testb_im(mask.m_value, address.offset, address.base);
+            m_assembler.testb_im(mask8.m_value, address.offset, address.base);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
     
     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
-        ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
-        if (mask.m_value == -1)
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        if (mask8.m_value == -1)
             m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
         else
-            m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
+            m_assembler.testb_im(mask8.m_value, address.offset, address.base, address.index, address.scale);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
     {
-        ASSERT(!(right.m_value & 0xFFFFFF00));
-
-        m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
+        TrustedImm32 right8(static_cast(right.m_value));
+        m_assembler.cmpb_im(right8.m_value, left.offset, left.base, left.index, left.scale);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
@@ -1227,6 +2382,12 @@ public:
         m_assembler.jmp_m(address.offset, address.base);
     }
 
+    // Address is a memory location containing the address to jump to
+    void jump(BaseIndex address)
+    {
+        m_assembler.jmp_m(address.offset, address.base, address.index, address.scale);
+    }
+
 
     // Arithmetic control flow operations:
     //
@@ -1272,13 +2433,30 @@ public:
     {
         if (src1 == dest)
             return branchAdd32(cond, src2, dest);
-        move(src2, dest);
+        move32IfNeeded(src2, dest);
         return branchAdd32(cond, src1, dest);
     }
 
+    Jump branchAdd32(ResultCondition cond, Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            return branchAdd32(cond, op1, dest);
+        if (op1.base == dest) {
+            load32(op1, dest);
+            return branchAdd32(cond, op2, dest);
+        }
+        zeroExtend32ToPtr(op2, dest);
+        return branchAdd32(cond, op1, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest)
+    {
+        return branchAdd32(cond, src2, src1, dest);
+    }
+
     Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
-        move(src, dest);
+        move32IfNeeded(src, dest);
         return branchAdd32(cond, imm, dest);
     }
 
@@ -1298,7 +2476,7 @@ public:
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
     
-    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
     {
         mul32(imm, src, dest);
         if (cond != Overflow)
@@ -1310,7 +2488,7 @@ public:
     {
         if (src1 == dest)
             return branchMul32(cond, src2, dest);
-        move(src2, dest);
+        move32IfNeeded(src2, dest);
         return branchMul32(cond, src1, dest);
     }
 
@@ -1349,13 +2527,13 @@ public:
         // B := A - B is invalid.
         ASSERT(src1 == dest || src2 != dest);
 
-        move(src1, dest);
+        move32IfNeeded(src1, dest);
         return branchSub32(cond, src2, dest);
     }
 
     Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
     {
-        move(src1, dest);
+        move32IfNeeded(src1, dest);
         return branchSub32(cond, src2, dest);
     }
 
@@ -1379,6 +2557,11 @@ public:
         m_assembler.int3();
     }
 
+    Call nearTailCall()
+    {
+        return Call(m_assembler.jmp(), Call::LinkableNearTail);
+    }
+
     Call nearCall()
     {
         return Call(m_assembler.call(), Call::LinkableNear);
@@ -1401,7 +2584,8 @@ public:
 
     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
     {
-        m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+        TrustedImm32 right8(static_cast(right.m_value));
+        m_assembler.cmpb_im(right8.m_value, left.offset, left.base);
         set32(x86Condition(cond), dest);
     }
     
@@ -1413,10 +2597,14 @@ public:
 
     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
     {
-        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
-            m_assembler.testl_rr(left, left);
-        else
-            m_assembler.cmpl_ir(right.m_value, left);
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                test32(*resultCondition, left, left, dest);
+                return;
+            }
+        }
+
+        m_assembler.cmpl_ir(right.m_value, left);
         set32(x86Condition(cond), dest);
     }
 
@@ -1427,10 +2615,11 @@ public:
 
     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
     {
-        if (mask.m_value == -1)
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        if (mask8.m_value == -1)
             m_assembler.cmpb_im(0, address.offset, address.base);
         else
-            m_assembler.testb_im(mask.m_value, address.offset, address.base);
+            m_assembler.testb_im(mask8.m_value, address.offset, address.base);
         set32(x86Condition(cond), dest);
     }
 
@@ -1440,20 +2629,129 @@ public:
         set32(x86Condition(cond), dest);
     }
 
+    void test32(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+    {
+        m_assembler.testl_rr(reg, mask);
+        set32(x86Condition(cond), dest);
+    }
+
+    void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+    {
+        test32(reg, mask);
+        set32(x86Condition(cond), dest);
+    }
+
+    void setCarry(RegisterID dest)
+    {
+        set32(X86Assembler::ConditionC, dest);
+    }
+
     // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
     static RelationalCondition invert(RelationalCondition cond)
     {
         return static_cast(cond ^ 1);
     }
 
+    static DoubleCondition invert(DoubleCondition cond)
+    {
+        switch (cond) {
+        case DoubleEqual:
+            return DoubleNotEqualOrUnordered;
+        case DoubleNotEqual:
+            return DoubleEqualOrUnordered;
+        case DoubleGreaterThan:
+            return DoubleLessThanOrEqualOrUnordered;
+        case DoubleGreaterThanOrEqual:
+            return DoubleLessThanOrUnordered;
+        case DoubleLessThan:
+            return DoubleGreaterThanOrEqualOrUnordered;
+        case DoubleLessThanOrEqual:
+            return DoubleGreaterThanOrUnordered;
+        case DoubleEqualOrUnordered:
+            return DoubleNotEqual;
+        case DoubleNotEqualOrUnordered:
+            return DoubleEqual;
+        case DoubleGreaterThanOrUnordered:
+            return DoubleLessThanOrEqual;
+        case DoubleGreaterThanOrEqualOrUnordered:
+            return DoubleLessThan;
+        case DoubleLessThanOrUnordered:
+            return DoubleGreaterThanOrEqual;
+        case DoubleLessThanOrEqualOrUnordered:
+            return DoubleGreaterThan;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return DoubleEqual; // make compiler happy
+    }
+
+    static bool isInvertible(ResultCondition cond)
+    {
+        switch (cond) {
+        case Zero:
+        case NonZero:
+        case Signed:
+        case PositiveOrZero:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    static ResultCondition invert(ResultCondition cond)
+    {
+        switch (cond) {
+        case Zero:
+            return NonZero;
+        case NonZero:
+            return Zero;
+        case Signed:
+            return PositiveOrZero;
+        case PositiveOrZero:
+            return Signed;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return Zero; // Make compiler happy for release builds.
+        }
+    }
+
+    static std::optional commuteCompareToZeroIntoTest(RelationalCondition cond)
+    {
+        switch (cond) {
+        case Equal:
+            return Zero;
+        case NotEqual:
+            return NonZero;
+        case LessThan:
+            return Signed;
+        case GreaterThanOrEqual:
+            return PositiveOrZero;
+            break;
+        default:
+            return std::nullopt;
+        }
+    }
+
     void nop()
     {
         m_assembler.nop();
     }
     
+    // We take memoryFence to mean acqrel. This has acqrel semantics on x86.
     void memoryFence()
     {
-        m_assembler.mfence();
+        // lock; orl $0, (%rsp)
+        m_assembler.lock();
+        m_assembler.orl_im(0, 0, X86Registers::esp);
+    }
+
+    // We take this to mean that it prevents motion of normal stores. So, it's a no-op on x86.
+    void storeFence()
+    {
+    }
+
+    // We take this to mean that it prevents motion of normal loads. So, it's a no-op on x86.
+    void loadFence()
+    {
     }
 
     static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
@@ -1466,28 +2764,61 @@ public:
         return X86Assembler::maxJumpReplacementSize();
     }
 
-#if USE(MASM_PROBE)
-    struct CPUState {
-        #define DECLARE_REGISTER(_type, _regName) \
-            _type _regName;
-        FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
-        #undef DECLARE_REGISTER
-    };
+    static ptrdiff_t patchableJumpSize()
+    {
+        return X86Assembler::patchableJumpSize();
+    }
+
+    static bool supportsFloatingPointRounding()
+    {
+        if (s_sse4_1CheckState == CPUIDCheckState::NotChecked)
+            updateEax1EcxFlags();
+        return s_sse4_1CheckState == CPUIDCheckState::Set;
+    }
 
-    struct ProbeContext;
-    typedef void (*ProbeFunction)(struct ProbeContext*);
+    static bool supportsAVX()
+    {
+        // AVX still causes mysterious regressions and those regressions can be massive.
+        return false;
+    }
 
-    struct ProbeContext {
-        ProbeFunction probeFunction;
-        void* arg1;
-        void* arg2;
-        CPUState cpu;
+    static void updateEax1EcxFlags()
+    {
+        int flags = 0;
+#if COMPILER(MSVC)
+        int cpuInfo[4];
+        __cpuid(cpuInfo, 0x1);
+        flags = cpuInfo[2];
+#elif COMPILER(GCC_OR_CLANG)
+#if CPU(X86_64)
+        asm (
+            "movl $0x1, %%eax;"
+            "cpuid;"
+            "movl %%ecx, %0;"
+            : "=g" (flags)
+            :
+            : "%eax", "%ebx", "%ecx", "%edx"
+            );
+#else
+        asm (
+            "movl $0x1, %%eax;"
+            "pushl %%ebx;"
+            "cpuid;"
+            "popl %%ebx;"
+            "movl %%ecx, %0;"
+            : "=g" (flags)
+            :
+            : "%eax", "%ecx", "%edx"
+            );
+#endif
+#endif // COMPILER(GCC_OR_CLANG)
+        s_sse4_1CheckState = (flags & (1 << 19)) ? CPUIDCheckState::Set : CPUIDCheckState::Clear;
+        s_avxCheckState = (flags & (1 << 28)) ? CPUIDCheckState::Set : CPUIDCheckState::Clear;
+    }
 
-        void dump(const char* indentation = 0);
-    private:
-        void dumpCPURegisters(const char* indentation);
-    };
-#endif // USE(MASM_PROBE)
+#if ENABLE(MASM_PROBE)
+    void probe(ProbeFunction, void* arg1, void* arg2);
+#endif // ENABLE(MASM_PROBE)
 
 protected:
     X86Assembler::Condition x86Condition(RelationalCondition cond)
@@ -1517,6 +2848,84 @@ protected:
         m_assembler.movzbl_rr(dest, dest);
     }
 
+    void cmov(X86Assembler::Condition cond, RegisterID src, RegisterID dest)
+    {
+#if CPU(X86_64)
+        m_assembler.cmovq_rr(cond, src, dest);
+#else
+        m_assembler.cmovl_rr(cond, src, dest);
+#endif
+    }
+
+    static bool supportsLZCNT()
+    {
+        if (s_lzcntCheckState == CPUIDCheckState::NotChecked) {
+            int flags = 0;
+#if COMPILER(MSVC)
+            int cpuInfo[4];
+            __cpuid(cpuInfo, 0x80000001);
+            flags = cpuInfo[2];
+#elif COMPILER(GCC_OR_CLANG)
+#if CPU(X86_64)
+            asm (
+                "movl $0x80000001, %%eax;"
+                "cpuid;"
+                "movl %%ecx, %0;"
+                : "=g" (flags)
+                :
+                : "%eax", "%ebx", "%ecx", "%edx"
+                );
+#else
+            asm (
+                "movl $0x80000001, %%eax;"
+                "pushl %%ebx;"
+                "cpuid;"
+                "popl %%ebx;"
+                "movl %%ecx, %0;"
+                : "=g" (flags)
+                :
+                : "%eax", "%ecx", "%edx"
+                );
+#endif
+#endif // COMPILER(GCC_OR_CLANG)
+            s_lzcntCheckState = (flags & 0x20) ? CPUIDCheckState::Set : CPUIDCheckState::Clear;
+        }
+        return s_lzcntCheckState == CPUIDCheckState::Set;
+    }
+
+    static bool supportsBMI1()
+    {
+        if (s_bmi1CheckState == CPUIDCheckState::NotChecked) {
+            int flags = 0;
+#if COMPILER(MSVC)
+            int cpuInfo[4];
+            __cpuid(cpuInfo, 0x80000001);
+            flags = cpuInfo[2];
+#elif COMPILER(GCC_OR_CLANG)
+            asm (
+                 "movl $0x7, %%eax;"
+                 "movl $0x0, %%ecx;"
+                 "cpuid;"
+                 "movl %%ebx, %0;"
+                 : "=g" (flags)
+                 :
+                 : "%eax", "%ebx", "%ecx", "%edx"
+                 );
+#endif // COMPILER(GCC_OR_CLANG)
+            static int BMI1FeatureBit = 1 << 3;
+            s_bmi1CheckState = (flags & BMI1FeatureBit) ? CPUIDCheckState::Set : CPUIDCheckState::Clear;
+        }
+        return s_bmi1CheckState == CPUIDCheckState::Set;
+    }
+
+    template
+    void ctzAfterBsf(RegisterID dst)
+    {
+        Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
+        move(TrustedImm32(sizeOfRegister), dst);
+        srcIsNonZero.link(this);
+    }
+
 private:
     // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
     // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
@@ -1538,6 +2947,84 @@ private:
             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
     }
 
+    // If lzcnt is not available, use this after BSR
+    // to count the leading zeros.
+    void clz32AfterBsr(RegisterID dst)
+    {
+        Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
+        move(TrustedImm32(32), dst);
+
+        Jump skipNonZeroCase = jump();
+        srcIsNonZero.link(this);
+        xor32(TrustedImm32(0x1f), dst);
+        skipNonZeroCase.link(this);
+    }
+
+    Jump jumpAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        if (cond == DoubleEqual) {
+            if (left == right)
+                return Jump(m_assembler.jnp());
+            Jump isUnordered(m_assembler.jp());
+            Jump result = Jump(m_assembler.je());
+            isUnordered.link(this);
+            return result;
+        }
+        if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right)
+                return Jump(m_assembler.jp());
+            Jump isUnordered(m_assembler.jp());
+            Jump isEqual(m_assembler.je());
+            isUnordered.link(this);
+            Jump result = jump();
+            isEqual.link(this);
+            return result;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        return Jump(m_assembler.jCC(static_cast(cond & ~DoubleConditionBits)));
+    }
+
+    // The 32bit Move does not need the REX byte for low registers, making it shorter.
+    // Use this if the top bits are irrelevant because they will be reset by the next instruction.
+    void move32IfNeeded(RegisterID src, RegisterID dest)
+    {
+        if (src == dest)
+            return;
+        m_assembler.movl_rr(src, dest);
+    }
+
+#if CPU(X86_64)
+    void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        if (cond == DoubleEqual) {
+            if (left == right) {
+                m_assembler.cmovnpq_rr(src, dest);
+                return;
+            }
+
+            Jump isUnordered(m_assembler.jp());
+            m_assembler.cmoveq_rr(src, dest);
+            isUnordered.link(this);
+            return;
+        }
+
+        if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right) {
+                m_assembler.cmovpq_rr(src, dest);
+                return;
+            }
+
+            m_assembler.cmovpq_rr(src, dest);
+            m_assembler.cmovneq_rr(src, dest);
+            return;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        cmov(static_cast(cond & ~DoubleConditionBits), src, dest);
+    }
+#endif
+
 #if CPU(X86)
 #if OS(MAC_OS_X)
 
@@ -1567,7 +3054,7 @@ private:
                 cpuid;
                 mov flags, edx;
             }
-#elif COMPILER(GCC)
+#elif COMPILER(GCC_OR_CLANG)
             asm (
                  "movl $0x1, %%eax;"
                  "pushl %%ebx;"
@@ -1588,7 +3075,7 @@ private:
         return s_sse2CheckState == HasSSE2;
     }
     
-    static SSE2CheckState s_sse2CheckState;
+    JS_EXPORTDATA static SSE2CheckState s_sse2CheckState;
 
 #endif // OS(MAC_OS_X)
 #elif !defined(NDEBUG) // CPU(X86)
@@ -1601,10 +3088,18 @@ private:
     }
 
 #endif
+
+    enum class CPUIDCheckState {
+        NotChecked,
+        Clear,
+        Set
+    };
+    JS_EXPORT_PRIVATE static CPUIDCheckState s_sse4_1CheckState;
+    JS_EXPORT_PRIVATE static CPUIDCheckState s_avxCheckState;
+    static CPUIDCheckState s_bmi1CheckState;
+    static CPUIDCheckState s_lzcntCheckState;
 };
 
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
-
-#endif // MacroAssemblerX86Common_h
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index 4fbc5a3dd..7e1841270 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,27 +23,28 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef MacroAssemblerX86_64_h
-#define MacroAssemblerX86_64_h
+#pragma once
 
 #if ENABLE(ASSEMBLER) && CPU(X86_64)
 
 #include "MacroAssemblerX86Common.h"
 
-#if USE(MASM_PROBE)
-#include 
-#endif
+#define REPATCH_OFFSET_CALL_R11 3
 
-#define REPTACH_OFFSET_CALL_R11 3
+inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; }
 
 namespace JSC {
 
 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
 public:
+    static const unsigned numGPRs = 16;
+    static const unsigned numFPRs = 16;
+    
     static const Scale ScalePtr = TimesEight;
 
     using MacroAssemblerX86Common::add32;
     using MacroAssemblerX86Common::and32;
+    using MacroAssemblerX86Common::branch32;
     using MacroAssemblerX86Common::branchAdd32;
     using MacroAssemblerX86Common::or32;
     using MacroAssemblerX86Common::sub32;
@@ -59,38 +60,38 @@ public:
 
     void add32(TrustedImm32 imm, AbsoluteAddress address)
     {
-        move(TrustedImmPtr(address.m_ptr), scratchRegister);
-        add32(imm, Address(scratchRegister));
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        add32(imm, Address(scratchRegister()));
     }
     
     void and32(TrustedImm32 imm, AbsoluteAddress address)
     {
-        move(TrustedImmPtr(address.m_ptr), scratchRegister);
-        and32(imm, Address(scratchRegister));
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        and32(imm, Address(scratchRegister()));
     }
     
     void add32(AbsoluteAddress address, RegisterID dest)
     {
-        move(TrustedImmPtr(address.m_ptr), scratchRegister);
-        add32(Address(scratchRegister), dest);
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        add32(Address(scratchRegister()), dest);
     }
     
     void or32(TrustedImm32 imm, AbsoluteAddress address)
     {
-        move(TrustedImmPtr(address.m_ptr), scratchRegister);
-        or32(imm, Address(scratchRegister));
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        or32(imm, Address(scratchRegister()));
     }
 
     void or32(RegisterID reg, AbsoluteAddress address)
     {
-        move(TrustedImmPtr(address.m_ptr), scratchRegister);
-        or32(reg, Address(scratchRegister));
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        or32(reg, Address(scratchRegister()));
     }
 
     void sub32(TrustedImm32 imm, AbsoluteAddress address)
     {
-        move(TrustedImmPtr(address.m_ptr), scratchRegister);
-        sub32(imm, Address(scratchRegister));
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        sub32(imm, Address(scratchRegister()));
     }
     
     void load8(const void* address, RegisterID dest)
@@ -111,70 +112,145 @@ public:
 
     void addDouble(AbsoluteAddress address, FPRegisterID dest)
     {
-        move(TrustedImmPtr(address.m_ptr), scratchRegister);
-        m_assembler.addsd_mr(0, scratchRegister, dest);
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        m_assembler.addsd_mr(0, scratchRegister(), dest);
     }
 
     void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
     {
-        move(imm, scratchRegister);
-        m_assembler.cvtsi2sd_rr(scratchRegister, dest);
+        move(imm, scratchRegister());
+        m_assembler.cvtsi2sd_rr(scratchRegister(), dest);
     }
 
     void store32(TrustedImm32 imm, void* address)
     {
-        move(TrustedImmPtr(address), scratchRegister);
-        store32(imm, scratchRegister);
+        move(TrustedImmPtr(address), scratchRegister());
+        store32(imm, scratchRegister());
+    }
+
+    void store32(RegisterID source, void* address)
+    {
+        if (source == X86Registers::eax)
+            m_assembler.movl_EAXm(address);
+        else {
+            move(TrustedImmPtr(address), scratchRegister());
+            store32(source, scratchRegister());
+        }
     }
     
     void store8(TrustedImm32 imm, void* address)
     {
-        move(TrustedImmPtr(address), scratchRegister);
-        store8(imm, Address(scratchRegister));
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        move(TrustedImmPtr(address), scratchRegister());
+        store8(imm8, Address(scratchRegister()));
     }
 
     void store8(RegisterID reg, void* address)
     {
-        move(TrustedImmPtr(address), scratchRegister);
-        store8(reg, Address(scratchRegister));
+        move(TrustedImmPtr(address), scratchRegister());
+        store8(reg, Address(scratchRegister()));
     }
 
+#if OS(WINDOWS)
+    Call callWithSlowPathReturnType()
+    {
+        // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
+        // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right,
+        // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument.
+        // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
+        // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two.
+        // It is assumed that the parameters are already shifted to the right, when entering this method.
+        // Note: this implementation supports up to 3 parameters.
+
+        // JIT relies on the CallerFrame (frame pointer) being put on the stack,
+        // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
+        // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
+        store64(X86Registers::ebp, Address(X86Registers::esp, -16));
+
+        // We also need to allocate the shadow space on the stack for the 4 parameter registers.
+        // In addition, we need to allocate 16 bytes for the return value.
+        // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
+        sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+
+        // The first parameter register should contain a pointer to the stack allocated space for the return value.
+        move(X86Registers::esp, X86Registers::ecx);
+        add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx);
+
+        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
+        Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable);
+
+        add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+
+        // Copy the return value into rax and rdx.
+        load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx);
+        load64(Address(X86Registers::eax), X86Registers::eax);
+
+        ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11);
+        return result;
+    }
+#endif
+
     Call call()
     {
-        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
-        Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
-        ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
+#if OS(WINDOWS)
+        // JIT relies on the CallerFrame (frame pointer) being put on the stack,
+        // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
+        // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
+        store64(X86Registers::ebp, Address(X86Registers::esp, -16));
+
+        // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them.
+        // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied.
+
+        // Copy argument 5
+        load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister());
+        store64(scratchRegister(), Address(X86Registers::esp, -4 * static_cast(sizeof(int64_t))));
+
+        // Copy argument 6
+        load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister());
+        store64(scratchRegister(), Address(X86Registers::esp, -3 * static_cast(sizeof(int64_t))));
+
+        // We also need to allocate the shadow space on the stack for the 4 parameter registers.
+        // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
+        // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters.
+        sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+#endif
+        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
+        Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable);
+#if OS(WINDOWS)
+        add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+#endif
+        ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11);
         return result;
     }
 
     // Address is a memory location containing the address to jump to
     void jump(AbsoluteAddress address)
     {
-        move(TrustedImmPtr(address.m_ptr), scratchRegister);
-        jump(Address(scratchRegister));
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        jump(Address(scratchRegister()));
     }
 
     Call tailRecursiveCall()
     {
-        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
-        Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
-        ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
+        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
+        Jump newJump = Jump(m_assembler.jmp_r(scratchRegister()));
+        ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11);
         return Call::fromTailJump(newJump);
     }
 
     Call makeTailRecursiveCall(Jump oldJump)
     {
         oldJump.link(this);
-        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
-        Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
-        ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
+        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
+        Jump newJump = Jump(m_assembler.jmp_r(scratchRegister()));
+        ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11);
         return Call::fromTailJump(newJump);
     }
 
     Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
     {
-        move(TrustedImmPtr(dest.m_ptr), scratchRegister);
-        add32(src, Address(scratchRegister));
+        move(TrustedImmPtr(dest.m_ptr), scratchRegister());
+        add32(src, Address(scratchRegister()));
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
@@ -188,10 +264,15 @@ public:
         m_assembler.addq_mr(src.offset, src.base, dest);
     }
 
+    void add64(RegisterID src, Address dest)
+    {
+        m_assembler.addq_rm(src, dest.offset, dest.base);
+    }
+
     void add64(AbsoluteAddress src, RegisterID dest)
     {
-        move(TrustedImmPtr(src.m_ptr), scratchRegister);
-        add64(Address(scratchRegister), dest);
+        move(TrustedImmPtr(src.m_ptr), scratchRegister());
+        add64(Address(scratchRegister()), dest);
     }
 
     void add64(TrustedImm32 imm, RegisterID srcDest)
@@ -207,8 +288,8 @@ public:
         if (imm.m_value == 1)
             m_assembler.incq_r(dest);
         else {
-            move(imm, scratchRegister);
-            add64(scratchRegister, dest);
+            move(imm, scratchRegister());
+            add64(scratchRegister(), dest);
         }
     }
 
@@ -219,13 +300,36 @@ public:
 
     void add64(TrustedImm32 imm, Address address)
     {
-        m_assembler.addq_im(imm.m_value, address.offset, address.base);
+        if (imm.m_value == 1)
+            m_assembler.incq_m(address.offset, address.base);
+        else
+            m_assembler.addq_im(imm.m_value, address.offset, address.base);
     }
 
     void add64(TrustedImm32 imm, AbsoluteAddress address)
     {
-        move(TrustedImmPtr(address.m_ptr), scratchRegister);
-        add64(imm, Address(scratchRegister));
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        add64(imm, Address(scratchRegister()));
+    }
+
+    void add64(RegisterID a, RegisterID b, RegisterID dest)
+    {
+        x86Lea64(BaseIndex(a, b, TimesOne), dest);
+    }
+
+    void x86Lea64(BaseIndex index, RegisterID dest)
+    {
+        if (!index.scale && !index.offset) {
+            if (index.base == dest) {
+                add64(index.index, dest);
+                return;
+            }
+            if (index.index == dest) {
+                add64(index.base, dest);
+                return;
+            }
+        }
+        m_assembler.leaq_mr(index.offset, index.base, index.index, index.scale, dest);
     }
 
     void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
@@ -245,8 +349,56 @@ public:
 
     void and64(TrustedImmPtr imm, RegisterID srcDest)
     {
-        move(imm, scratchRegister);
-        and64(scratchRegister, srcDest);
+        intptr_t intValue = imm.asIntptr();
+        if (intValue <= std::numeric_limits::max()
+            && intValue >= std::numeric_limits::min()) {
+            and64(TrustedImm32(static_cast(intValue)), srcDest);
+            return;
+        }
+        move(imm, scratchRegister());
+        and64(scratchRegister(), srcDest);
+    }
+
+    void and64(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op1 == op2 && op1 != dest && op2 != dest)
+            move(op1, dest);
+        else if (op1 == dest)
+            and64(op2, dest);
+        else {
+            move(op2, dest);
+            and64(op1, dest);
+        }
+    }
+
+    void countLeadingZeros64(RegisterID src, RegisterID dst)
+    {
+        if (supportsLZCNT()) {
+            m_assembler.lzcntq_rr(src, dst);
+            return;
+        }
+        m_assembler.bsrq_rr(src, dst);
+        clz64AfterBsr(dst);
+    }
+
+    void countLeadingZeros64(Address src, RegisterID dst)
+    {
+        if (supportsLZCNT()) {
+            m_assembler.lzcntq_mr(src.offset, src.base, dst);
+            return;
+        }
+        m_assembler.bsrq_mr(src.offset, src.base, dst);
+        clz64AfterBsr(dst);
+    }
+
+    void countTrailingZeros64(RegisterID src, RegisterID dst)
+    {
+        if (supportsBMI1()) {
+            m_assembler.tzcntq_rr(src, dst);
+            return;
+        }
+        m_assembler.bsfq_rr(src, dst);
+        ctzAfterBsf<64>(dst);
     }
 
     void lshift64(TrustedImm32 imm, RegisterID dest)
@@ -254,16 +406,147 @@ public:
         m_assembler.shlq_i8r(imm.m_value, dest);
     }
     
+    void lshift64(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.shlq_CLr(dest);
+        else {
+            ASSERT(src != dest);
+
+            // Can only shift by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.shlq_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+    
     void rshift64(TrustedImm32 imm, RegisterID dest)
     {
         m_assembler.sarq_i8r(imm.m_value, dest);
     }
-    
+
+    void rshift64(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.sarq_CLr(dest);
+        else {
+            ASSERT(src != dest);
+            
+            // Can only shift by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.sarq_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
+    void urshift64(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.shrq_i8r(imm.m_value, dest);
+    }
+
+    void urshift64(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.shrq_CLr(dest);
+        else {
+            ASSERT(src != dest);
+            
+            // Can only shift by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.shrq_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
+    void rotateRight64(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.rorq_i8r(imm.m_value, dest);
+    }
+
+    void rotateRight64(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.rorq_CLr(dest);
+        else {
+            ASSERT(src != dest);
+
+            // Can only rotate by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.rorq_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
+    void rotateLeft64(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.rolq_i8r(imm.m_value, dest);
+    }
+
+    void rotateLeft64(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.rolq_CLr(dest);
+        else {
+            ASSERT(src != dest);
+
+            // Can only rotate by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.rolq_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
     void mul64(RegisterID src, RegisterID dest)
     {
         m_assembler.imulq_rr(src, dest);
     }
+
+    void mul64(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src2 == dest) {
+            m_assembler.imulq_rr(src1, dest);
+            return;
+        }
+        move(src1, dest);
+        m_assembler.imulq_rr(src2, dest);
+    }
     
+    void x86ConvertToQuadWord64()
+    {
+        m_assembler.cqo();
+    }
+
+    void x86ConvertToQuadWord64(RegisterID rax, RegisterID rdx)
+    {
+        ASSERT_UNUSED(rax, rax == X86Registers::eax);
+        ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
+        x86ConvertToQuadWord64();
+    }
+
+    void x86Div64(RegisterID denominator)
+    {
+        m_assembler.idivq_r(denominator);
+    }
+
+    void x86Div64(RegisterID rax, RegisterID rdx, RegisterID denominator)
+    {
+        ASSERT_UNUSED(rax, rax == X86Registers::eax);
+        ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
+        x86Div64(denominator);
+    }
+
+    void x86UDiv64(RegisterID denominator)
+    {
+        m_assembler.divq_r(denominator);
+    }
+
+    void x86UDiv64(RegisterID rax, RegisterID rdx, RegisterID denominator)
+    {
+        ASSERT_UNUSED(rax, rax == X86Registers::eax);
+        ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
+        x86UDiv64(denominator);
+    }
+
     void neg64(RegisterID dest)
     {
         m_assembler.negq_r(dest);
@@ -274,10 +557,15 @@ public:
         m_assembler.orq_rr(src, dest);
     }
 
-    void or64(TrustedImm64 imm, RegisterID dest)
+    void or64(TrustedImm64 imm, RegisterID srcDest)
     {
-        move(imm, scratchRegister);
-        or64(scratchRegister, dest);
+        if (imm.m_value <= std::numeric_limits::max()
+            && imm.m_value >= std::numeric_limits::min()) {
+            or64(TrustedImm32(static_cast(imm.m_value)), srcDest);
+            return;
+        }
+        move(imm, scratchRegister());
+        or64(scratchRegister(), srcDest);
     }
 
     void or64(TrustedImm32 imm, RegisterID dest)
@@ -302,11 +590,6 @@ public:
         move(src, dest);
         or64(imm, dest);
     }
-    
-    void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
-    {
-        m_assembler.rorq_i8r(imm.m_value, srcDst);
-    }
 
     void sub64(RegisterID src, RegisterID dest)
     {
@@ -326,15 +609,42 @@ public:
         if (imm.m_value == 1)
             m_assembler.decq_r(dest);
         else {
-            move(imm, scratchRegister);
-            sub64(scratchRegister, dest);
+            move(imm, scratchRegister());
+            sub64(scratchRegister(), dest);
         }
     }
 
+    void sub64(TrustedImm32 imm, Address address)
+    {
+        m_assembler.subq_im(imm.m_value, address.offset, address.base);
+    }
+
+    void sub64(Address src, RegisterID dest)
+    {
+        m_assembler.subq_mr(src.offset, src.base, dest);
+    }
+
+    void sub64(RegisterID src, Address dest)
+    {
+        m_assembler.subq_rm(src, dest.offset, dest.base);
+    }
+
     void xor64(RegisterID src, RegisterID dest)
     {
         m_assembler.xorq_rr(src, dest);
     }
+
+    void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op1 == op2)
+            move(TrustedImm32(0), dest);
+        else if (op1 == dest)
+            xor64(op2, dest);
+        else {
+            move(op2, dest);
+            xor64(op1, dest);
+        }
+    }
     
     void xor64(RegisterID src, Address dest)
     {
@@ -346,6 +656,16 @@ public:
         m_assembler.xorq_ir(imm.m_value, srcDest);
     }
 
+    void not64(RegisterID srcDest)
+    {
+        m_assembler.notq_r(srcDest);
+    }
+
+    void not64(Address dest)
+    {
+        m_assembler.notq_m(dest.offset, dest.base);
+    }
+
     void load64(ImplicitAddress address, RegisterID dest)
     {
         m_assembler.movq_mr(address.offset, address.base, dest);
@@ -395,21 +715,31 @@ public:
         if (src == X86Registers::eax)
             m_assembler.movq_EAXm(address);
         else {
-            move(TrustedImmPtr(address), scratchRegister);
-            store64(src, scratchRegister);
+            move(TrustedImmPtr(address), scratchRegister());
+            store64(src, scratchRegister());
         }
     }
 
+    void store64(TrustedImm32 imm, ImplicitAddress address)
+    {
+        m_assembler.movq_i32m(imm.m_value, address.offset, address.base);
+    }
+
     void store64(TrustedImm64 imm, ImplicitAddress address)
     {
-        move(imm, scratchRegister);
-        store64(scratchRegister, address);
+        if (CAN_SIGN_EXTEND_32_64(imm.m_value)) {
+            store64(TrustedImm32(static_cast(imm.m_value)), address);
+            return;
+        }
+
+        move(imm, scratchRegister());
+        store64(scratchRegister(), address);
     }
 
     void store64(TrustedImm64 imm, BaseIndex address)
     {
-        move(imm, scratchRegister);
-        m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale);
+        move(imm, scratchRegister());
+        m_assembler.movq_rm(scratchRegister(), address.offset, address.base, address.index, address.scale);
     }
     
     DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
@@ -419,6 +749,16 @@ public:
         return DataLabel32(this);
     }
 
+    void swap64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.xchgq_rr(src, dest);
+    }
+
+    void swap64(RegisterID src, Address dest)
+    {
+        m_assembler.xchgq_rm(src, dest.offset, dest.base);
+    }
+
     void move64ToDouble(RegisterID src, FPRegisterID dest)
     {
         m_assembler.movq_rr(src, dest);
@@ -431,35 +771,81 @@ public:
 
     void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
     {
-        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
-            m_assembler.testq_rr(left, left);
-        else
-            m_assembler.cmpq_ir(right.m_value, left);
-        m_assembler.setCC_r(x86Condition(cond), dest);
-        m_assembler.movzbl_rr(dest, dest);
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                test64(*resultCondition, left, left, dest);
+                return;
+            }
+        }
+
+        m_assembler.cmpq_ir(right.m_value, left);
+        set32(x86Condition(cond), dest);
     }
     
     void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
     {
         m_assembler.cmpq_rr(right, left);
-        m_assembler.setCC_r(x86Condition(cond), dest);
-        m_assembler.movzbl_rr(dest, dest);
+        set32(x86Condition(cond), dest);
     }
-    
+
+    void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
+    {
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+
+        if (cond == DoubleEqual) {
+            if (left == right) {
+                m_assembler.setnp_r(dest);
+                return;
+            }
+
+            Jump isUnordered(m_assembler.jp());
+            m_assembler.sete_r(dest);
+            isUnordered.link(this);
+            return;
+        }
+
+        if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right) {
+                m_assembler.setp_r(dest);
+                return;
+            }
+
+            m_assembler.setp_r(dest);
+            m_assembler.setne_r(dest);
+            return;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        m_assembler.setCC_r(static_cast(cond & ~DoubleConditionBits), dest);
+    }
+
     Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
     {
         m_assembler.cmpq_rr(right, left);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
+    Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+                return branchTest64(*resultCondition, left, left);
+        }
+        m_assembler.cmpq_ir(right.m_value, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
     Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
     {
         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
             m_assembler.testq_rr(left, left);
             return Jump(m_assembler.jCC(x86Condition(cond)));
         }
-        move(right, scratchRegister);
-        return branch64(cond, left, scratchRegister);
+        move(right, scratchRegister());
+        return branch64(cond, left, scratchRegister());
     }
 
     Jump branch64(RelationalCondition cond, RegisterID left, Address right)
@@ -470,8 +856,8 @@ public:
 
     Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
     {
-        move(TrustedImmPtr(left.m_ptr), scratchRegister);
-        return branch64(cond, Address(scratchRegister), right);
+        move(TrustedImmPtr(left.m_ptr), scratchRegister());
+        return branch64(cond, Address(scratchRegister()), right);
     }
 
     Jump branch64(RelationalCondition cond, Address left, RegisterID right)
@@ -480,10 +866,16 @@ public:
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
+    Jump branch64(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        m_assembler.cmpq_im(right.m_value, left.offset, left.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
     Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
     {
-        move(right, scratchRegister);
-        return branch64(cond, left, scratchRegister);
+        move(right, scratchRegister());
+        return branch64(cond, left, scratchRegister());
     }
 
     Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right)
@@ -491,6 +883,12 @@ public:
         m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
+    
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load32(left.m_ptr, scratchRegister());
+        return branch32(cond, scratchRegister(), right);
+    }
 
     Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
     {
@@ -499,8 +897,8 @@ public:
 
     Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right)
     {
-        move(right, scratchRegister);
-        return branchPtr(cond, left, scratchRegister);
+        move(right, scratchRegister());
+        return branchPtr(cond, left, scratchRegister());
     }
 
     Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
@@ -521,6 +919,12 @@ public:
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
+    Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
+    {
+        move(mask, scratchRegister());
+        return branchTest64(cond, reg, scratchRegister());
+    }
+
     void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
     {
         if (mask.m_value == -1)
@@ -540,8 +944,8 @@ public:
 
     Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        load64(address.m_ptr, scratchRegister);
-        return branchTest64(cond, scratchRegister, mask);
+        load64(address.m_ptr, scratchRegister());
+        return branchTest64(cond, scratchRegister(), mask);
     }
 
     Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
@@ -575,12 +979,43 @@ public:
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
+    Jump branchAdd64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src1 == dest)
+            return branchAdd64(cond, src2, dest);
+        move(src2, dest);
+        return branchAdd64(cond, src1, dest);
+    }
+
+    Jump branchAdd64(ResultCondition cond, Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            return branchAdd64(cond, op1, dest);
+        if (op1.base == dest) {
+            load32(op1, dest);
+            return branchAdd64(cond, op2, dest);
+        }
+        move(op2, dest);
+        return branchAdd64(cond, op1, dest);
+    }
+
+    Jump branchAdd64(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest)
+    {
+        return branchAdd64(cond, src2, src1, dest);
+    }
+
     Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
     {
         add64(src, dest);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
+    Jump branchAdd64(ResultCondition cond, Address src, RegisterID dest)
+    {
+        add64(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
     Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
     {
         mul64(src, dest);
@@ -589,6 +1024,14 @@ public:
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
+    Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src1 == dest)
+            return branchMul64(cond, src2, dest);
+        move(src2, dest);
+        return branchMul64(cond, src1, dest);
+    }
+
     Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
     {
         sub64(imm, dest);
@@ -613,6 +1056,164 @@ public:
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
+    void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.cmpq_rr(right, left);
+        cmov(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.cmpq_rr(right, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        m_assembler.cmpq_ir(right.m_value, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+
+    void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+    {
+        m_assembler.testq_rr(testReg, mask);
+        cmov(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        ASSERT(isInvertible(cond));
+        ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
+
+        m_assembler.testq_rr(right, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+    
+    void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest)
+    {
+        // if we are only interested in the low seven bits, this can be tested with a testb
+        if (mask.m_value == -1)
+            m_assembler.testq_rr(testReg, testReg);
+        else if ((mask.m_value & ~0x7f) == 0)
+            m_assembler.testb_i8r(mask.m_value, testReg);
+        else
+            m_assembler.testq_i32r(mask.m_value, testReg);
+        cmov(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        ASSERT(isInvertible(cond));
+        ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
+
+        if (mask.m_value == -1)
+            m_assembler.testq_rr(testReg, testReg);
+        else if (!(mask.m_value & ~0x7f))
+            m_assembler.testb_i8r(mask.m_value, testReg);
+        else
+            m_assembler.testq_i32r(mask.m_value, testReg);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+
+    template
+    void moveDoubleConditionally64(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        static_assert(!std::is_same::value && !std::is_same::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
+
+        if (thenCase != dest && elseCase != dest) {
+            moveDouble(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest) {
+            Jump falseCase = branch64(invert(cond), left, right);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else {
+            Jump trueCase = branch64(cond, left, right);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        }
+    }
+
+    template
+    void moveDoubleConditionallyTest64(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        static_assert(!std::is_same::value && !std::is_same::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
+
+        if (elseCase == dest && isInvertible(cond)) {
+            Jump falseCase = branchTest64(invert(cond), test, mask);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else if (thenCase == dest) {
+            Jump trueCase = branchTest64(cond, test, mask);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        }
+
+        Jump trueCase = branchTest64(cond, test, mask);
+        moveDouble(elseCase, dest);
+        Jump falseCase = jump();
+        trueCase.link(this);
+        moveDouble(thenCase, dest);
+        falseCase.link(this);
+    }
+    
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), X86Registers::r11);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm64(misc), X86Registers::r10);
+        abortWithReason(reason);
+    }
+
     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
     {
         ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
@@ -627,44 +1228,137 @@ public:
         return DataLabelPtr(this);
     }
 
+    DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
+    {
+        padBeforePatch();
+        m_assembler.movq_i64r(initialValue.m_value, dest);
+        return DataLabelPtr(this);
+    }
+
     Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
     {
-        dataLabel = moveWithPatch(initialRightValue, scratchRegister);
-        return branch64(cond, left, scratchRegister);
+        dataLabel = moveWithPatch(initialRightValue, scratchRegister());
+        return branch64(cond, left, scratchRegister());
     }
 
     Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
     {
-        dataLabel = moveWithPatch(initialRightValue, scratchRegister);
-        return branch64(cond, left, scratchRegister);
+        dataLabel = moveWithPatch(initialRightValue, scratchRegister());
+        return branch64(cond, left, scratchRegister());
+    }
+
+    Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        padBeforePatch();
+        m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister());
+        dataLabel = DataLabel32(this);
+        return branch32(cond, left, scratchRegister());
     }
 
     DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
     {
-        DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
-        store64(scratchRegister, address);
+        DataLabelPtr label = moveWithPatch(initialValue, scratchRegister());
+        store64(scratchRegister(), address);
         return label;
     }
+
+    PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
+    {
+        return PatchableJump(branch64(cond, reg, imm));
+    }
+
+    PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        return PatchableJump(branch64(cond, left, right));
+    }
     
     using MacroAssemblerX86Common::branch8;
     Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
     {
-        MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister);
-        return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister), right);
+        MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister());
+        return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister()), right);
     }
     
     using MacroAssemblerX86Common::branchTest8;
     Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
+        TrustedImm32 mask8(static_cast(mask.m_value));
         TrustedImmPtr addr(reinterpret_cast(address.offset));
-        MacroAssemblerX86Common::move(addr, scratchRegister);
-        return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
+        MacroAssemblerX86Common::move(addr, scratchRegister());
+        return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister(), address.base, TimesOne), mask8);
     }
     
     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister);
-        return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister), mask);
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister()), mask8);
+    }
+
+    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.cvttsd2siq_rr(src, dest);
+    }
+
+    void truncateDoubleToInt64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.cvttsd2siq_rr(src, dest);
+    }
+
+    // int64Min should contain exactly 0x43E0000000000000 == static_cast(int64_t::min()). scratch may
+    // be the same FPR as src.
+    void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min)
+    {
+        ASSERT(scratch != int64Min);
+
+        // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed
+        // integer conversion instruction. If the src is less than int64_t::min() then the results of the two
+        // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to
+        // uint64_t; then add back int64_t::min() in the destination gpr.
+
+        Jump large = branchDouble(DoubleGreaterThanOrEqual, src, int64Min);
+        m_assembler.cvttsd2siq_rr(src, dest);
+        Jump done = jump();
+        large.link(this);
+        moveDouble(src, scratch);
+        m_assembler.subsd_rr(int64Min, scratch);
+        m_assembler.movq_i64r(0x8000000000000000, scratchRegister());
+        m_assembler.cvttsd2siq_rr(scratch, dest);
+        m_assembler.orq_rr(scratchRegister(), dest);
+        done.link(this);
+    }
+
+    void truncateFloatToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.cvttss2siq_rr(src, dest);
+    }
+
+    void truncateFloatToInt64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.cvttss2siq_rr(src, dest);
+    }
+
+    // int64Min should contain exactly 0x5f000000 == static_cast(int64_t::min()). scratch may be the
+    // same FPR as src.
+    void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min)
+    {
+        ASSERT(scratch != int64Min);
+
+        // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed
+        // integer conversion instruction. If the src is less than int64_t::min() then the results of the two
+        // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to
+        // uint64_t; then add back int64_t::min() in the destination gpr.
+
+        Jump large = branchFloat(DoubleGreaterThanOrEqual, src, int64Min);
+        m_assembler.cvttss2siq_rr(src, dest);
+        Jump done = jump();
+        large.link(this);
+        moveDouble(src, scratch);
+        m_assembler.subss_rr(int64Min, scratch);
+        m_assembler.movq_i64r(0x8000000000000000, scratchRegister());
+        m_assembler.cvttss2siq_rr(scratch, dest);
+        m_assembler.orq_rr(scratchRegister(), dest);
+        done.link(this);
     }
 
     void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
@@ -672,21 +1366,77 @@ public:
         m_assembler.cvtsi2sdq_rr(src, dest);
     }
 
+    void convertInt64ToDouble(Address src, FPRegisterID dest)
+    {
+        m_assembler.cvtsi2sdq_mr(src.offset, src.base, dest);
+    }
+
+    void convertInt64ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.cvtsi2ssq_rr(src, dest);
+    }
+
+    void convertInt64ToFloat(Address src, FPRegisterID dest)
+    {
+        m_assembler.cvtsi2ssq_mr(src.offset, src.base, dest);
+    }
+
+    // One of scratch or scratch2 may be the same as src
+    void convertUInt64ToDouble(RegisterID src, FPRegisterID dest, RegisterID scratch)
+    {
+        RegisterID scratch2 = scratchRegister();
+
+        m_assembler.testq_rr(src, src);
+        AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed));
+        m_assembler.cvtsi2sdq_rr(src, dest);
+        AssemblerLabel done = m_assembler.jmp();
+        m_assembler.linkJump(signBitSet, m_assembler.label());
+        if (scratch != src)
+            m_assembler.movq_rr(src, scratch);
+        m_assembler.movq_rr(src, scratch2);
+        m_assembler.shrq_i8r(1, scratch);
+        m_assembler.andq_ir(1, scratch2);
+        m_assembler.orq_rr(scratch, scratch2);
+        m_assembler.cvtsi2sdq_rr(scratch2, dest);
+        m_assembler.addsd_rr(dest, dest);
+        m_assembler.linkJump(done, m_assembler.label());
+    }
+
+    // One of scratch or scratch2 may be the same as src
+    void convertUInt64ToFloat(RegisterID src, FPRegisterID dest, RegisterID scratch)
+    {
+        RegisterID scratch2 = scratchRegister();
+        m_assembler.testq_rr(src, src);
+        AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed));
+        m_assembler.cvtsi2ssq_rr(src, dest);
+        AssemblerLabel done = m_assembler.jmp();
+        m_assembler.linkJump(signBitSet, m_assembler.label());
+        if (scratch != src)
+            m_assembler.movq_rr(src, scratch);
+        m_assembler.movq_rr(src, scratch2);
+        m_assembler.shrq_i8r(1, scratch);
+        m_assembler.andq_ir(1, scratch2);
+        m_assembler.orq_rr(scratch, scratch2);
+        m_assembler.cvtsi2ssq_rr(scratch2, dest);
+        m_assembler.addss_rr(dest, dest);
+        m_assembler.linkJump(done, m_assembler.label());
+    }
+
     static bool supportsFloatingPoint() { return true; }
-    // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
     static bool supportsFloatingPointTruncate() { return true; }
     static bool supportsFloatingPointSqrt() { return true; }
     static bool supportsFloatingPointAbs() { return true; }
     
     static FunctionPtr readCallTarget(CodeLocationCall call)
     {
-        return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation()));
+        return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation()));
     }
 
-    static bool haveScratchRegisterForBlinding() { return true; }
-    static RegisterID scratchRegisterForBlinding() { return scratchRegister; }
+    bool haveScratchRegisterForBlinding() { return m_allowScratchRegister; }
+    RegisterID scratchRegisterForBlinding() { return scratchRegister(); }
 
     static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
     
     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
     {
@@ -698,127 +1448,78 @@ public:
         return label.labelAtOffset(-totalBytes);
     }
     
+    static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label)
+    {
+        const int rexBytes = 1;
+        const int opcodeBytes = 1;
+        const int immediateBytes = 4;
+        const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
+        ASSERT(totalBytes >= maxJumpReplacementSize());
+        return label.labelAtOffset(-totalBytes);
+    }
+    
     static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
     {
         return startOfBranchPtrWithPatchOnRegister(label);
     }
+
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
+    {
+        return startOfBranch32WithPatchOnRegister(label);
+    }
     
     static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
     {
-        X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast(initialValue), scratchRegister);
+        X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast(initialValue), s_scratchRegister);
     }
 
-    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue)
     {
-        X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast(initialValue), scratchRegister);
-    }
-
-#if USE(MASM_PROBE)
-    // This function emits code to preserve the CPUState (e.g. registers),
-    // call a user supplied probe function, and restore the CPUState before
-    // continuing with other JIT generated code.
-    //
-    // The user supplied probe function will be called with a single pointer to
-    // a ProbeContext struct (defined above) which contains, among other things,
-    // the preserved CPUState. This allows the user probe function to inspect
-    // the CPUState at that point in the JIT generated code.
-    //
-    // If the user probe function alters the register values in the ProbeContext,
-    // the altered values will be loaded into the CPU registers when the probe
-    // returns.
-    //
-    // The ProbeContext is stack allocated and is only valid for the duration
-    // of the call to the user probe function.
-
-    void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
-#endif // USE(MASM_PROBE)
-
-private:
-    friend class LinkBuffer;
-    friend class RepatchBuffer;
+        X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, s_scratchRegister);
+    }
 
-    static void linkCall(void* code, Call call, FunctionPtr function)
+    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
     {
-        if (!call.isFlagSet(Call::Near))
-            X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value());
-        else
-            X86Assembler::linkCall(code, call.m_label, function.value());
+        X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast(initialValue), s_scratchRegister);
     }
 
     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
     {
-        X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+        X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
     }
 
     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
     {
-        X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+        X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
     }
 
-#if USE(MASM_PROBE)
-    inline TrustedImm64 trustedImm64FromPtr(void* ptr)
+private:
+    // If lzcnt is not available, use this after BSR
+    // to count the leading zeros.
+    void clz64AfterBsr(RegisterID dst)
     {
-        return TrustedImm64(TrustedImmPtr(ptr));
-    }
+        Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
+        move(TrustedImm32(64), dst);
 
-    inline TrustedImm64 trustedImm64FromPtr(ProbeFunction function)
-    {
-        return TrustedImm64(TrustedImmPtr(reinterpret_cast(function)));
+        Jump skipNonZeroCase = jump();
+        srcIsNonZero.link(this);
+        xor64(TrustedImm32(0x3f), dst);
+        skipNonZeroCase.link(this);
     }
 
-    inline TrustedImm64 trustedImm64FromPtr(void (*function)())
+    friend class LinkBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        return TrustedImm64(TrustedImmPtr(reinterpret_cast(function)));
+        if (!call.isFlagSet(Call::Near))
+            X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPATCH_OFFSET_CALL_R11), function.value());
+        else if (call.isFlagSet(Call::Tail))
+            X86Assembler::linkJump(code, call.m_label, function.value());
+        else
+            X86Assembler::linkCall(code, call.m_label, function.value());
     }
-#endif
 };
 
-#if USE(MASM_PROBE)
-
-extern "C" void ctiMasmProbeTrampoline();
-
-// What code is emitted for the probe?
-// ==================================
-// We want to keep the size of the emitted probe invocation code as compact as
-// possible to minimize the perturbation to the JIT generated code. However,
-// we also need to preserve the CPU registers and set up the ProbeContext to be
-// passed to the user probe function.
-//
-// Hence, we do only the minimum here to preserve a scratch register (i.e. rax
-// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments.
-// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation
-// work i.e. saving the CPUState (and setting up the ProbeContext), calling the
-// user probe function, and restoring the CPUState before returning to JIT
-// generated code.
-//
-// What values are in the saved registers?
-// ======================================
-// Conceptually, the saved registers should contain values as if the probe
-// is not present in the JIT generated code. Hence, they should contain values
-// that are expected at the start of the instruction immediately following the
-// probe.
-//
-// Specifcally, the saved stack pointer register will point to the stack
-// position before we push the ProbeContext frame. The saved rip will point to
-// the address of the instruction immediately following the probe. 
-
-inline void MacroAssemblerX86_64::probe(MacroAssemblerX86_64::ProbeFunction function, void* arg1, void* arg2)
-{
-    push(RegisterID::esp);
-    push(RegisterID::eax);
-    move(trustedImm64FromPtr(arg2), RegisterID::eax);
-    push(RegisterID::eax);
-    move(trustedImm64FromPtr(arg1), RegisterID::eax);
-    push(RegisterID::eax);
-    move(trustedImm64FromPtr(function), RegisterID::eax);
-    push(RegisterID::eax);
-    move(trustedImm64FromPtr(ctiMasmProbeTrampoline), RegisterID::eax);
-    call(RegisterID::eax);
-}
-#endif // USE(MASM_PROBE)
-
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
-
-#endif // MacroAssemblerX86_64_h
diff --git a/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h b/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h
new file mode 100644
index 000000000..c6f53b347
--- /dev/null
+++ b/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2013, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "Register.h"
+#include "StackAlignment.h"
+#include 
+
+namespace JSC {
+
+// The maxFrameExtentForSlowPathCall is the max amount of stack space (in bytes)
+// that can be used for outgoing args when calling a slow path C function
+// from JS code.
+
+#if !ENABLE(JIT)
+static const size_t maxFrameExtentForSlowPathCall = 0;
+
+#elif CPU(X86_64) && OS(WINDOWS)
+// 4 args in registers, but stack space needs to be allocated for all args.
+static const size_t maxFrameExtentForSlowPathCall = 64;
+
+#elif CPU(X86_64)
+// All args in registers.
+static const size_t maxFrameExtentForSlowPathCall = 0;
+
+#elif CPU(X86)
+// 7 args on stack (28 bytes).
+static const size_t maxFrameExtentForSlowPathCall = 40;
+
+#elif CPU(ARM64)
+// All args in registers.
+static const size_t maxFrameExtentForSlowPathCall = 0;
+
+#elif CPU(ARM)
+// First four args in registers, remaining 4 args on stack.
+static const size_t maxFrameExtentForSlowPathCall = 24;
+
+#elif CPU(MIPS)
+// Though args are in registers, there need to be space on the stack for all args.
+static const size_t maxFrameExtentForSlowPathCall = 40;
+
+#else
+#error "Unsupported CPU: need value for maxFrameExtentForSlowPathCall"
+
+#endif
+
+COMPILE_ASSERT(!(maxFrameExtentForSlowPathCall % sizeof(Register)), extent_must_be_in_multiples_of_registers);
+
+#if ENABLE(JIT)
+// Make sure that cfr - maxFrameExtentForSlowPathCall bytes will make the stack pointer aligned
+COMPILE_ASSERT((maxFrameExtentForSlowPathCall % 16) == 16 - sizeof(CallerFrameAndPC), extent_must_align_stack_from_callframe_pointer);
+#endif
+
+static const size_t maxFrameExtentForSlowPathCallInRegisters = maxFrameExtentForSlowPathCall / sizeof(Register);
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/assembler/RepatchBuffer.h b/Source/JavaScriptCore/assembler/RepatchBuffer.h
deleted file mode 100644
index 41e950ad8..000000000
--- a/Source/JavaScriptCore/assembler/RepatchBuffer.h
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef RepatchBuffer_h
-#define RepatchBuffer_h
-
-#if ENABLE(JIT)
-
-#include "CodeBlock.h"
-#include 
-#include 
-
-namespace JSC {
-
-// RepatchBuffer:
-//
-// This class is used to modify code after code generation has been completed,
-// and after the code has potentially already been executed.  This mechanism is
-// used to apply optimizations to the code.
-//
-class RepatchBuffer {
-    typedef MacroAssemblerCodePtr CodePtr;
-
-public:
-    RepatchBuffer(CodeBlock* codeBlock)
-        : m_codeBlock(codeBlock)
-    {
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-        RefPtr code = codeBlock->jitCode();
-        m_start = code->start();
-        m_size = code->size();
-
-        ExecutableAllocator::makeWritable(m_start, m_size);
-#endif
-    }
-
-    ~RepatchBuffer()
-    {
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-        ExecutableAllocator::makeExecutable(m_start, m_size);
-#endif
-    }
-    
-    CodeBlock* codeBlock() const { return m_codeBlock; }
-
-    void relink(CodeLocationJump jump, CodeLocationLabel destination)
-    {
-        MacroAssembler::repatchJump(jump, destination);
-    }
-
-    void relink(CodeLocationCall call, CodeLocationLabel destination)
-    {
-        MacroAssembler::repatchCall(call, destination);
-    }
-
-    void relink(CodeLocationCall call, FunctionPtr destination)
-    {
-        MacroAssembler::repatchCall(call, destination);
-    }
-
-    void relink(CodeLocationNearCall nearCall, CodePtr destination)
-    {
-        MacroAssembler::repatchNearCall(nearCall, CodeLocationLabel(destination));
-    }
-
-    void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination)
-    {
-        MacroAssembler::repatchNearCall(nearCall, destination);
-    }
-
-    void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value)
-    {
-        MacroAssembler::repatchInt32(dataLabel32, value);
-    }
-
-    void repatch(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
-    {
-        MacroAssembler::repatchCompact(dataLabelCompact, value);
-    }
-
-    void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value)
-    {
-        MacroAssembler::repatchPointer(dataLabelPtr, value);
-    }
-
-    void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
-    {
-        relink(CodeLocationCall(CodePtr(returnAddress)), label);
-    }
-    
-    void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
-    {
-        relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
-    }
-
-    void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function)
-    {
-        relink(CodeLocationCall(CodePtr(returnAddress)), function);
-    }
-    
-    void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
-    {
-        relink(CodeLocationNearCall(CodePtr(returnAddress)), label);
-    }
-    
-    void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
-    {
-        relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
-    }
-    
-    void replaceWithLoad(CodeLocationConvertibleLoad label)
-    {
-        MacroAssembler::replaceWithLoad(label);
-    }
-    
-    void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
-    {
-        MacroAssembler::replaceWithAddressComputation(label);
-    }
-    
-    void setLoadInstructionIsActive(CodeLocationConvertibleLoad label, bool isActive)
-    {
-        if (isActive)
-            replaceWithLoad(label);
-        else
-            replaceWithAddressComputation(label);
-    }
-
-    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
-    {
-        return MacroAssembler::startOfBranchPtrWithPatchOnRegister(label);
-    }
-    
-    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
-    {
-        return MacroAssembler::startOfPatchableBranchPtrWithPatchOnAddress(label);
-    }
-    
-    void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
-    {
-        MacroAssembler::replaceWithJump(instructionStart, destination);
-    }
-    
-    // This is a *bit* of a silly API, since we currently always also repatch the
-    // immediate after calling this. But I'm fine with that, since this just feels
-    // less yucky.
-    void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::RegisterID reg, void* value)
-    {
-        MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart, reg, value);
-    }
-
-    void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::Address address, void* value)
-    {
-        MacroAssembler::revertJumpReplacementToPatchableBranchPtrWithPatch(instructionStart, address, value);
-    }
-
-private:
-    CodeBlock* m_codeBlock;
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-    void* m_start;
-    size_t m_size;
-#endif
-};
-
-} // namespace JSC
-
-#endif // ENABLE(ASSEMBLER)
-
-#endif // RepatchBuffer_h
diff --git a/Source/JavaScriptCore/assembler/SH4Assembler.h b/Source/JavaScriptCore/assembler/SH4Assembler.h
deleted file mode 100644
index d326279c5..000000000
--- a/Source/JavaScriptCore/assembler/SH4Assembler.h
+++ /dev/null
@@ -1,2225 +0,0 @@
-/*
- * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
- * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef SH4Assembler_h
-#define SH4Assembler_h
-
-#if ENABLE(ASSEMBLER) && CPU(SH4)
-
-#include "AssemblerBuffer.h"
-#include "AssemblerBufferWithConstantPool.h"
-#include "JITCompilationEffort.h"
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-
-#ifndef NDEBUG
-#define SH4_ASSEMBLER_TRACING
-#endif
-
-namespace JSC {
-typedef uint16_t SH4Word;
-
-enum {
-    INVALID_OPCODE = 0xffff,
-    ADD_OPCODE = 0x300c,
-    ADDIMM_OPCODE = 0x7000,
-    ADDC_OPCODE = 0x300e,
-    ADDV_OPCODE = 0x300f,
-    AND_OPCODE = 0x2009,
-    ANDIMM_OPCODE = 0xc900,
-    DIV0_OPCODE = 0x2007,
-    DIV1_OPCODE = 0x3004,
-    BF_OPCODE = 0x8b00,
-    BFS_OPCODE = 0x8f00,
-    BRA_OPCODE = 0xa000,
-    BRAF_OPCODE = 0x0023,
-    NOP_OPCODE = 0x0009,
-    BSR_OPCODE = 0xb000,
-    RTS_OPCODE = 0x000b,
-    BT_OPCODE = 0x8900,
-    BTS_OPCODE = 0x8d00,
-    BSRF_OPCODE = 0x0003,
-    BRK_OPCODE = 0x003b,
-    FTRC_OPCODE = 0xf03d,
-    CMPEQ_OPCODE = 0x3000,
-    CMPEQIMM_OPCODE = 0x8800,
-    CMPGE_OPCODE = 0x3003,
-    CMPGT_OPCODE = 0x3007,
-    CMPHI_OPCODE = 0x3006,
-    CMPHS_OPCODE = 0x3002,
-    CMPPL_OPCODE = 0x4015,
-    CMPPZ_OPCODE = 0x4011,
-    CMPSTR_OPCODE = 0x200c,
-    DT_OPCODE = 0x4010,
-    FCMPEQ_OPCODE = 0xf004,
-    FCMPGT_OPCODE = 0xf005,
-    FMOV_OPCODE = 0xf00c,
-    FADD_OPCODE = 0xf000,
-    FMUL_OPCODE = 0xf002,
-    FSUB_OPCODE = 0xf001,
-    FDIV_OPCODE = 0xf003,
-    FNEG_OPCODE = 0xf04d,
-    JMP_OPCODE = 0x402b,
-    JSR_OPCODE = 0x400b,
-    LDSPR_OPCODE = 0x402a,
-    LDSLPR_OPCODE = 0x4026,
-    MOV_OPCODE = 0x6003,
-    MOVIMM_OPCODE = 0xe000,
-    MOVB_WRITE_RN_OPCODE = 0x2000,
-    MOVB_WRITE_RNDEC_OPCODE = 0x2004,
-    MOVB_WRITE_R0RN_OPCODE = 0x0004,
-    MOVB_WRITE_OFFGBR_OPCODE = 0xc000,
-    MOVB_WRITE_OFFRN_OPCODE = 0x8000,
-    MOVB_READ_RM_OPCODE = 0x6000,
-    MOVB_READ_RMINC_OPCODE = 0x6004,
-    MOVB_READ_R0RM_OPCODE = 0x000c,
-    MOVB_READ_OFFGBR_OPCODE = 0xc400,
-    MOVB_READ_OFFRM_OPCODE = 0x8400,
-    MOVL_WRITE_RN_OPCODE = 0x2002,
-    MOVL_WRITE_RNDEC_OPCODE = 0x2006,
-    MOVL_WRITE_R0RN_OPCODE = 0x0006,
-    MOVL_WRITE_OFFGBR_OPCODE = 0xc200,
-    MOVL_WRITE_OFFRN_OPCODE = 0x1000,
-    MOVL_READ_RM_OPCODE = 0x6002,
-    MOVL_READ_RMINC_OPCODE = 0x6006,
-    MOVL_READ_R0RM_OPCODE = 0x000e,
-    MOVL_READ_OFFGBR_OPCODE = 0xc600,
-    MOVL_READ_OFFPC_OPCODE = 0xd000,
-    MOVL_READ_OFFRM_OPCODE = 0x5000,
-    MOVW_WRITE_RN_OPCODE = 0x2001,
-    MOVW_WRITE_R0RN_OPCODE = 0x0005,
-    MOVW_READ_RM_OPCODE = 0x6001,
-    MOVW_READ_RMINC_OPCODE = 0x6005,
-    MOVW_READ_R0RM_OPCODE = 0x000d,
-    MOVW_READ_OFFRM_OPCODE = 0x8500,
-    MOVW_READ_OFFPC_OPCODE = 0x9000,
-    MOVA_READ_OFFPC_OPCODE = 0xc700,
-    MOVT_OPCODE = 0x0029,
-    MULL_OPCODE = 0x0007,
-    DMULL_L_OPCODE = 0x3005,
-    STSMACL_OPCODE = 0x001a,
-    STSMACH_OPCODE = 0x000a,
-    DMULSL_OPCODE = 0x300d,
-    NEG_OPCODE = 0x600b,
-    NEGC_OPCODE = 0x600a,
-    NOT_OPCODE = 0x6007,
-    OR_OPCODE = 0x200b,
-    ORIMM_OPCODE = 0xcb00,
-    ORBIMM_OPCODE = 0xcf00,
-    SETS_OPCODE = 0x0058,
-    SETT_OPCODE = 0x0018,
-    SHAD_OPCODE = 0x400c,
-    SHAL_OPCODE = 0x4020,
-    SHAR_OPCODE = 0x4021,
-    SHLD_OPCODE = 0x400d,
-    SHLL_OPCODE = 0x4000,
-    SHLL2_OPCODE = 0x4008,
-    SHLL8_OPCODE = 0x4018,
-    SHLL16_OPCODE = 0x4028,
-    SHLR_OPCODE = 0x4001,
-    SHLR2_OPCODE = 0x4009,
-    SHLR8_OPCODE = 0x4019,
-    SHLR16_OPCODE = 0x4029,
-    STSPR_OPCODE = 0x002a,
-    STSLPR_OPCODE = 0x4022,
-    FLOAT_OPCODE = 0xf02d,
-    SUB_OPCODE = 0x3008,
-    SUBC_OPCODE = 0x300a,
-    SUBV_OPCODE = 0x300b,
-    TST_OPCODE = 0x2008,
-    TSTIMM_OPCODE = 0xc800,
-    TSTB_OPCODE = 0xcc00,
-    EXTUB_OPCODE = 0x600c,
-    EXTUW_OPCODE = 0x600d,
-    XOR_OPCODE = 0x200a,
-    XORIMM_OPCODE = 0xca00,
-    XORB_OPCODE = 0xce00,
-    FMOVS_READ_RM_INC_OPCODE = 0xf009,
-    FMOVS_READ_RM_OPCODE = 0xf008,
-    FMOVS_READ_R0RM_OPCODE = 0xf006,
-    FMOVS_WRITE_RN_OPCODE = 0xf00a,
-    FMOVS_WRITE_RN_DEC_OPCODE = 0xf00b,
-    FMOVS_WRITE_R0RN_OPCODE = 0xf007,
-    FCNVDS_DRM_FPUL_OPCODE = 0xf0bd,
-    FCNVSD_FPUL_DRN_OPCODE = 0xf0ad,
-    LDS_RM_FPUL_OPCODE = 0x405a,
-    FLDS_FRM_FPUL_OPCODE = 0xf01d,
-    STS_FPUL_RN_OPCODE = 0x005a,
-    FSTS_FPUL_FRN_OPCODE = 0xF00d,
-    LDSFPSCR_OPCODE = 0x406a,
-    STSFPSCR_OPCODE = 0x006a,
-    LDSRMFPUL_OPCODE = 0x405a,
-    FSTSFPULFRN_OPCODE = 0xf00d,
-    FABS_OPCODE = 0xf05d,
-    FSQRT_OPCODE = 0xf06d,
-    FSCHG_OPCODE = 0xf3fd,
-    CLRT_OPCODE = 8,
-    SYNCO_OPCODE = 0x00ab,
-};
-
-namespace SH4Registers {
-typedef enum {
-    r0,
-    r1,
-    r2,
-    r3,
-    r4,
-    r5,
-    r6,
-    r7,
-    r8,
-    r9,
-    r10,
-    r11,
-    r12,
-    r13,
-    r14, fp = r14,
-    r15, sp = r15,
-    pc,
-    pr,
-} RegisterID;
-
-typedef enum {
-    fr0, dr0 = fr0,
-    fr1,
-    fr2, dr2 = fr2,
-    fr3,
-    fr4, dr4 = fr4,
-    fr5,
-    fr6, dr6 = fr6,
-    fr7,
-    fr8, dr8 = fr8,
-    fr9,
-    fr10, dr10 = fr10,
-    fr11,
-    fr12, dr12 = fr12,
-    fr13,
-    fr14, dr14 = fr14,
-    fr15,
-} FPRegisterID;
-}
-
-inline uint16_t getOpcodeGroup1(uint16_t opc, int rm, int rn)
-{
-    return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4));
-}
-
-inline uint16_t getOpcodeGroup2(uint16_t opc, int rm)
-{
-    return (opc | ((rm & 0xf) << 8));
-}
-
-inline uint16_t getOpcodeGroup3(uint16_t opc, int rm, int rn)
-{
-    return (opc | ((rm & 0xf) << 8) | (rn & 0xff));
-}
-
-inline uint16_t getOpcodeGroup4(uint16_t opc, int rm, int rn, int offset)
-{
-    return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4) | (offset & 0xf));
-}
-
-inline uint16_t getOpcodeGroup5(uint16_t opc, int rm)
-{
-    return (opc | (rm & 0xff));
-}
-
-inline uint16_t getOpcodeGroup6(uint16_t opc, int rm)
-{
-    return (opc | (rm & 0xfff));
-}
-
-inline uint16_t getOpcodeGroup7(uint16_t opc, int rm)
-{
-    return (opc | ((rm & 0x7) << 9));
-}
-
-inline uint16_t getOpcodeGroup8(uint16_t opc, int rm, int rn)
-{
-    return (opc | ((rm & 0x7) << 9) | ((rn & 0x7) << 5));
-}
-
-inline uint16_t getOpcodeGroup9(uint16_t opc, int rm, int rn)
-{
-    return (opc | ((rm & 0xf) << 8) | ((rn & 0x7) << 5));
-}
-
-inline uint16_t getOpcodeGroup10(uint16_t opc, int rm, int rn)
-{
-    return (opc | ((rm & 0x7) << 9) | ((rn & 0xf) << 4));
-}
-
-inline uint16_t getOpcodeGroup11(uint16_t opc, int rm, int rn)
-{
-    return (opc | ((rm & 0xf) << 4) | (rn & 0xf));
-}
-
-inline uint16_t getRn(uint16_t x)
-{
-    return ((x & 0xf00) >> 8);
-}
-
-inline uint16_t getRm(uint16_t x)
-{
-    return ((x & 0xf0) >> 4);
-}
-
-inline uint16_t getDisp(uint16_t x)
-{
-    return (x & 0xf);
-}
-
-inline uint16_t getImm8(uint16_t x)
-{
-    return (x & 0xff);
-}
-
-inline uint16_t getImm12(uint16_t x)
-{
-    return (x & 0xfff);
-}
-
-inline uint16_t getDRn(uint16_t x)
-{
-    return ((x & 0xe00) >> 9);
-}
-
-inline uint16_t getDRm(uint16_t x)
-{
-    return ((x & 0xe0) >> 5);
-}
-
-class SH4Assembler {
-public:
-    typedef SH4Registers::RegisterID RegisterID;
-    typedef SH4Registers::FPRegisterID FPRegisterID;
-    typedef AssemblerBufferWithConstantPool<512, 4, 2, SH4Assembler> SH4Buffer;
-    static const RegisterID scratchReg1 = SH4Registers::r3;
-    static const RegisterID scratchReg2 = SH4Registers::r11;
-    static const uint32_t maxInstructionSize = 16;
-
-    static RegisterID firstRegister() { return SH4Registers::r0; }
-    static RegisterID lastRegister() { return SH4Registers::r15; }
-
-    static FPRegisterID firstFPRegister() { return SH4Registers::dr0; }
-    static FPRegisterID lastFPRegister() { return SH4Registers::dr14; }
-
-    enum {
-        padForAlign8 = 0x00,
-        padForAlign16 = 0x0009,
-        padForAlign32 = 0x00090009,
-    };
-
-    enum JumpType {
-        JumpFar,
-        JumpNear
-    };
-
-    SH4Assembler()
-        : m_claimscratchReg(0x0)
-        , m_indexOfLastWatchpoint(INT_MIN)
-        , m_indexOfTailOfLastWatchpoint(INT_MIN)
-    {
-    }
-
-    SH4Buffer& buffer() { return m_buffer; }
-
-    // SH4 condition codes
-    typedef enum {
-        EQ = 0x0, // Equal
-        NE = 0x1, // Not Equal
-        HS = 0x2, // Unsigned Greater Than equal
-        HI = 0x3, // Unsigned Greater Than
-        LS = 0x4, // Unsigned Lower or Same
-        LI = 0x5, // Unsigned Lower
-        GE = 0x6, // Greater or Equal
-        LT = 0x7, // Less Than
-        GT = 0x8, // Greater Than
-        LE = 0x9, // Less or Equal
-        OF = 0xa, // OverFlow
-        SI = 0xb, // Signed
-        NS = 0xc, // Not Signed
-        EQU= 0xd, // Equal or unordered(NaN)
-        NEU= 0xe,
-        GTU= 0xf,
-        GEU= 0x10,
-        LTU= 0x11,
-        LEU= 0x12,
-    } Condition;
-
-    // Opaque label types
-public:
-    bool isImmediate(int constant)
-    {
-        return ((constant <= 127) && (constant >= -128));
-    }
-
-    RegisterID claimScratch()
-    {
-        ASSERT((m_claimscratchReg != 0x3));
-
-        if (!(m_claimscratchReg & 0x1)) {
-            m_claimscratchReg = (m_claimscratchReg | 0x1);
-            return scratchReg1;
-        }
-
-        m_claimscratchReg = (m_claimscratchReg | 0x2);
-        return scratchReg2;
-    }
-
-    void releaseScratch(RegisterID scratchR)
-    {
-        if (scratchR == scratchReg1)
-            m_claimscratchReg = (m_claimscratchReg & 0x2);
-        else
-            m_claimscratchReg = (m_claimscratchReg & 0x1);
-    }
-
-    // Stack operations
-
-    void pushReg(RegisterID reg)
-    {
-        if (reg == SH4Registers::pr) {
-            oneShortOp(getOpcodeGroup2(STSLPR_OPCODE, SH4Registers::sp));
-            return;
-        }
-
-        oneShortOp(getOpcodeGroup1(MOVL_WRITE_RNDEC_OPCODE, SH4Registers::sp, reg));
-    }
-
-    void popReg(RegisterID reg)
-    {
-        if (reg == SH4Registers::pr) {
-            oneShortOp(getOpcodeGroup2(LDSLPR_OPCODE, SH4Registers::sp));
-            return;
-        }
-
-        oneShortOp(getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, reg, SH4Registers::sp));
-    }
-
-    void movt(RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup2(MOVT_OPCODE, dst);
-        oneShortOp(opc);
-    }
-
-    // Arithmetic operations
-
-    void addlRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(ADD_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void addclRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(ADDC_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void addvlRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(ADDV_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void addlImm8r(int imm8, RegisterID dst)
-    {
-        ASSERT((imm8 <= 127) && (imm8 >= -128));
-
-        uint16_t opc = getOpcodeGroup3(ADDIMM_OPCODE, dst, imm8);
-        oneShortOp(opc);
-    }
-
-    void andlRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(AND_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void andlImm8r(int imm8, RegisterID dst)
-    {
-        ASSERT((imm8 <= 255) && (imm8 >= 0));
-        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
-
-        uint16_t opc = getOpcodeGroup5(ANDIMM_OPCODE, imm8);
-        oneShortOp(opc);
-    }
-
-    void div1lRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(DIV1_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void div0lRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(DIV0_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void notlReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(NOT_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void orlRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(OR_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void orlImm8r(int imm8, RegisterID dst)
-    {
-        ASSERT((imm8 <= 255) && (imm8 >= 0));
-        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
-
-        uint16_t opc = getOpcodeGroup5(ORIMM_OPCODE, imm8);
-        oneShortOp(opc);
-    }
-
-    void sublRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(SUB_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void subvlRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(SUBV_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void xorlRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(XOR_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void xorlImm8r(int imm8, RegisterID dst)
-    {
-        ASSERT((imm8 <= 255) && (imm8 >= 0));
-        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
-
-        uint16_t opc = getOpcodeGroup5(XORIMM_OPCODE, imm8);
-        oneShortOp(opc);
-    }
-
-    void shllImm8r(int imm, RegisterID dst)
-    {
-        switch (imm) {
-        case 1:
-            oneShortOp(getOpcodeGroup2(SHLL_OPCODE, dst));
-            break;
-        case 2:
-            oneShortOp(getOpcodeGroup2(SHLL2_OPCODE, dst));
-            break;
-        case 8:
-            oneShortOp(getOpcodeGroup2(SHLL8_OPCODE, dst));
-            break;
-        case 16:
-            oneShortOp(getOpcodeGroup2(SHLL16_OPCODE, dst));
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-    }
-
-    void neg(RegisterID dst, RegisterID src)
-    {
-        uint16_t opc = getOpcodeGroup1(NEG_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void shldRegReg(RegisterID dst, RegisterID rShift)
-    {
-        oneShortOp(getOpcodeGroup1(SHLD_OPCODE, dst, rShift));
-    }
-
-    void shadRegReg(RegisterID dst, RegisterID rShift)
-    {
-        oneShortOp(getOpcodeGroup1(SHAD_OPCODE, dst, rShift));
-    }
-
-    void shlrImm8r(int imm, RegisterID dst)
-    {
-        switch (imm) {
-        case 1:
-            oneShortOp(getOpcodeGroup2(SHLR_OPCODE, dst));
-            break;
-        case 2:
-            oneShortOp(getOpcodeGroup2(SHLR2_OPCODE, dst));
-            break;
-        case 8:
-            oneShortOp(getOpcodeGroup2(SHLR8_OPCODE, dst));
-            break;
-        case 16:
-            oneShortOp(getOpcodeGroup2(SHLR16_OPCODE, dst));
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-    }
-
-    void shalImm8r(int imm, RegisterID dst)
-    {
-        switch (imm) {
-        case 1:
-            oneShortOp(getOpcodeGroup2(SHAL_OPCODE, dst));
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-    }
-
-    void sharImm8r(int imm, RegisterID dst)
-    {
-        switch (imm) {
-        case 1:
-            oneShortOp(getOpcodeGroup2(SHAR_OPCODE, dst));
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-    }
-
-    void imullRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MULL_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void dmullRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(DMULL_L_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void dmulslRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(DMULSL_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void stsmacl(RegisterID reg)
-    {
-        uint16_t opc = getOpcodeGroup2(STSMACL_OPCODE, reg);
-        oneShortOp(opc);
-    }
-
-    void stsmach(RegisterID reg)
-    {
-        uint16_t opc = getOpcodeGroup2(STSMACH_OPCODE, reg);
-        oneShortOp(opc);
-    }
-
-    // Comparisons
-
-    void cmplRegReg(RegisterID left, RegisterID right, Condition cond)
-    {
-        switch (cond) {
-        case NE:
-            oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left));
-            break;
-        case GT:
-            oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, right, left));
-            break;
-        case EQ:
-            oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left));
-            break;
-        case GE:
-            oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, right, left));
-            break;
-        case HS:
-            oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, right, left));
-            break;
-        case HI:
-            oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, right, left));
-            break;
-        case LI:
-            oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, left, right));
-            break;
-        case LS:
-            oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, left, right));
-            break;
-        case LE:
-            oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, left, right));
-            break;
-        case LT:
-            oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, left, right));
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-    }
-
-    void cmppl(RegisterID reg)
-    {
-        uint16_t opc = getOpcodeGroup2(CMPPL_OPCODE, reg);
-        oneShortOp(opc);
-    }
-
-    void cmppz(RegisterID reg)
-    {
-        uint16_t opc = getOpcodeGroup2(CMPPZ_OPCODE, reg);
-        oneShortOp(opc);
-    }
-
-    void cmpEqImmR0(int imm, RegisterID dst)
-    {
-        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
-        uint16_t opc = getOpcodeGroup5(CMPEQIMM_OPCODE, imm);
-        oneShortOp(opc);
-    }
-
-    void testlRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(TST_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void testlImm8r(int imm, RegisterID dst)
-    {
-        ASSERT((imm <= 255) && (imm >= 0));
-        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
-
-        uint16_t opc = getOpcodeGroup5(TSTIMM_OPCODE, imm);
-        oneShortOp(opc);
-    }
-
-    void nop()
-    {
-        oneShortOp(NOP_OPCODE, false);
-    }
-
-    void synco()
-    {
-        oneShortOp(SYNCO_OPCODE);
-    }
-
-    void sett()
-    {
-        oneShortOp(SETT_OPCODE);
-    }
-
-    void clrt()
-    {
-        oneShortOp(CLRT_OPCODE);
-    }
-
-    void fschg()
-    {
-        oneShortOp(FSCHG_OPCODE);
-    }
-
-    void bkpt()
-    {
-        oneShortOp(BRK_OPCODE, false);
-    }
-
-    void branch(uint16_t opc, int label)
-    {
-        switch (opc) {
-        case BT_OPCODE:
-            ASSERT((label <= 127) && (label >= -128));
-            oneShortOp(getOpcodeGroup5(BT_OPCODE, label));
-            break;
-        case BRA_OPCODE:
-            ASSERT((label <= 2047) && (label >= -2048));
-            oneShortOp(getOpcodeGroup6(BRA_OPCODE, label));
-            break;
-        case BF_OPCODE:
-            ASSERT((label <= 127) && (label >= -128));
-            oneShortOp(getOpcodeGroup5(BF_OPCODE, label));
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-    }
-
-    void branch(uint16_t opc, RegisterID reg)
-    {
-        switch (opc) {
-        case BRAF_OPCODE:
-            oneShortOp(getOpcodeGroup2(BRAF_OPCODE, reg));
-            break;
-        case JMP_OPCODE:
-            oneShortOp(getOpcodeGroup2(JMP_OPCODE, reg));
-            break;
-        case JSR_OPCODE:
-            oneShortOp(getOpcodeGroup2(JSR_OPCODE, reg));
-            break;
-        case BSRF_OPCODE:
-            oneShortOp(getOpcodeGroup2(BSRF_OPCODE, reg));
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-    }
-
-    void ldspr(RegisterID reg)
-    {
-        uint16_t opc = getOpcodeGroup2(LDSPR_OPCODE, reg);
-        oneShortOp(opc);
-    }
-
-    void stspr(RegisterID reg)
-    {
-        uint16_t opc = getOpcodeGroup2(STSPR_OPCODE, reg);
-        oneShortOp(opc);
-    }
-
-    void extub(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(EXTUB_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-    
-    void extuw(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(EXTUW_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    // float operations
-
-    void ldsrmfpul(RegisterID src)
-    {
-        uint16_t opc = getOpcodeGroup2(LDS_RM_FPUL_OPCODE, src);
-        oneShortOp(opc);
-    }
-
-    void fneg(FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup2(FNEG_OPCODE, dst);
-        oneShortOp(opc, true, false);
-    }
-
-    void fsqrt(FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup2(FSQRT_OPCODE, dst);
-        oneShortOp(opc, true, false);
-    }
-
-    void stsfpulReg(RegisterID src)
-    {
-        uint16_t opc = getOpcodeGroup2(STS_FPUL_RN_OPCODE, src);
-        oneShortOp(opc);
-    }
-
-    void floatfpulfrn(FPRegisterID src)
-    {
-        uint16_t opc = getOpcodeGroup2(FLOAT_OPCODE, src);
-        oneShortOp(opc, true, false);
-    }
-
-    void fmull(FPRegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(FMUL_OPCODE, dst, src);
-        oneShortOp(opc, true, false);
-    }
-
-    void fmovsRegReg(FPRegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(FMOV_OPCODE, dst, src);
-        oneShortOp(opc, true, false);
-    }
-
-    void fmovsReadrm(RegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_OPCODE, dst, src);
-        oneShortOp(opc, true, false);
-    }
-
-    void fmovsWriterm(FPRegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_OPCODE, dst, src);
-        oneShortOp(opc, true, false);
-    }
-
-    void fmovsWriter0r(FPRegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_R0RN_OPCODE, dst, src);
-        oneShortOp(opc, true, false);
-    }
-
-    void fmovsReadr0r(RegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(FMOVS_READ_R0RM_OPCODE, dst, src);
-        oneShortOp(opc, true, false);
-    }
-
-    void fmovsReadrminc(RegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_INC_OPCODE, dst, src);
-        oneShortOp(opc, true, false);
-    }
-
-    void fmovsWriterndec(FPRegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_DEC_OPCODE, dst, src);
-        oneShortOp(opc, true, false);
-    }
-
-    void ftrcRegfpul(FPRegisterID src)
-    {
-        uint16_t opc = getOpcodeGroup2(FTRC_OPCODE, src);
-        oneShortOp(opc, true, false);
-    }
-
-    void fldsfpul(FPRegisterID src)
-    {
-        uint16_t opc = getOpcodeGroup2(FLDS_FRM_FPUL_OPCODE, src);
-        oneShortOp(opc);
-    }
-
-    void fstsfpul(FPRegisterID src)
-    {
-        uint16_t opc = getOpcodeGroup2(FSTS_FPUL_FRN_OPCODE, src);
-        oneShortOp(opc);
-    }
-
-    void ldsfpscr(RegisterID reg)
-    {
-        uint16_t opc = getOpcodeGroup2(LDSFPSCR_OPCODE, reg);
-        oneShortOp(opc);
-    }
-
-    void stsfpscr(RegisterID reg)
-    {
-        uint16_t opc = getOpcodeGroup2(STSFPSCR_OPCODE, reg);
-        oneShortOp(opc);
-    }
-
-    // double operations
-
-    void dcnvds(FPRegisterID src)
-    {
-        uint16_t opc = getOpcodeGroup7(FCNVDS_DRM_FPUL_OPCODE, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void dcnvsd(FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup7(FCNVSD_FPUL_DRN_OPCODE, dst >> 1);
-        oneShortOp(opc);
-    }
-
-    void dcmppeq(FPRegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup8(FCMPEQ_OPCODE, dst >> 1, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void dcmppgt(FPRegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup8(FCMPGT_OPCODE, dst >> 1, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void dmulRegReg(FPRegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup8(FMUL_OPCODE, dst >> 1, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void dsubRegReg(FPRegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup8(FSUB_OPCODE, dst >> 1, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void daddRegReg(FPRegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup8(FADD_OPCODE, dst >> 1, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void dmovRegReg(FPRegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup8(FMOV_OPCODE, dst >> 1, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void ddivRegReg(FPRegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup8(FDIV_OPCODE, dst >> 1, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void dabs(FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup7(FABS_OPCODE, dst >> 1);
-        oneShortOp(opc);
-    }
-
-    void dsqrt(FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup7(FSQRT_OPCODE, dst >> 1);
-        oneShortOp(opc);
-    }
-
-    void dneg(FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup7(FNEG_OPCODE, dst >> 1);
-        oneShortOp(opc);
-    }
-
-    void fmovReadrm(RegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_OPCODE, dst >> 1, src);
-        oneShortOp(opc);
-    }
-
-    void fmovWriterm(FPRegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_OPCODE, dst, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void fmovWriter0r(FPRegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_R0RN_OPCODE, dst, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void fmovReadr0r(RegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup10(FMOVS_READ_R0RM_OPCODE, dst >> 1, src);
-        oneShortOp(opc);
-    }
-
-    void fmovReadrminc(RegisterID src, FPRegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_INC_OPCODE, dst >> 1, src);
-        oneShortOp(opc);
-    }
-
-    void fmovWriterndec(FPRegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_DEC_OPCODE, dst, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void floatfpulDreg(FPRegisterID src)
-    {
-        uint16_t opc = getOpcodeGroup7(FLOAT_OPCODE, src >> 1);
-        oneShortOp(opc);
-    }
-
-    void ftrcdrmfpul(FPRegisterID src)
-    {
-        uint16_t opc = getOpcodeGroup7(FTRC_OPCODE, src >> 1);
-        oneShortOp(opc);
-    }
-
-    // Various move ops
-
-    void movImm8(int imm8, RegisterID dst)
-    {
-        ASSERT((imm8 <= 127) && (imm8 >= -128));
-
-        uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, imm8);
-        oneShortOp(opc);
-    }
-
-    void movlRegReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOV_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void movwRegMem(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVW_WRITE_RN_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void movwMemReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVW_READ_RM_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void movwMemRegIn(RegisterID base, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVW_READ_RMINC_OPCODE, dst, base);
-        oneShortOp(opc);
-    }
-
-    void movwPCReg(int offset, RegisterID base, RegisterID dst)
-    {
-        ASSERT_UNUSED(base, base == SH4Registers::pc);
-        ASSERT((offset <= 255) && (offset >= 0));
-
-        uint16_t opc = getOpcodeGroup3(MOVW_READ_OFFPC_OPCODE, dst, offset);
-        oneShortOp(opc);
-    }
-
-    void movwMemReg(int offset, RegisterID base, RegisterID dst)
-    {
-        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
-
-        uint16_t opc = getOpcodeGroup11(MOVW_READ_OFFRM_OPCODE, base, offset);
-        oneShortOp(opc);
-    }
-
-    void movwR0mr(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVW_READ_R0RM_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void movwRegMemr0(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVW_WRITE_R0RN_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void movlRegMem(RegisterID src, int offset, RegisterID base)
-    {
-        ASSERT((offset <= 15) && (offset >= 0));
-
-        if (!offset) {
-            oneShortOp(getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src));
-            return;
-        }
-
-        oneShortOp(getOpcodeGroup4(MOVL_WRITE_OFFRN_OPCODE, base, src, offset));
-    }
-
-    void movlRegMem(RegisterID src, RegisterID base)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src);
-        oneShortOp(opc);
-    }
-
-    void movlMemReg(int offset, RegisterID base, RegisterID dst)
-    {
-        if (base == SH4Registers::pc) {
-            ASSERT((offset <= 255) && (offset >= 0));
-            oneShortOp(getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, dst, offset));
-            return;
-        }
-
-        ASSERT((offset <= 15) && (offset >= 0));
-        if (!offset) {
-            oneShortOp(getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base));
-            return;
-        }
-
-        oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset));
-    }
-
-    void movlMemRegCompact(int offset, RegisterID base, RegisterID dst)
-    {
-        oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset));
-    }
-
-    void movbRegMem(RegisterID src, RegisterID base)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVB_WRITE_RN_OPCODE, base, src);
-        oneShortOp(opc);
-    }
-
-    void movbMemReg(int offset, RegisterID base, RegisterID dst)
-    {
-        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
-
-        uint16_t opc = getOpcodeGroup11(MOVB_READ_OFFRM_OPCODE, base, offset);
-        oneShortOp(opc);
-    }
-
-    void movbR0mr(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVB_READ_R0RM_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void movbMemReg(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVB_READ_RM_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void movbMemRegIn(RegisterID base, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVB_READ_RMINC_OPCODE, dst, base);
-        oneShortOp(opc);
-    }
-
-    void movbRegMemr0(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVB_WRITE_R0RN_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void movlMemReg(RegisterID base, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base);
-        oneShortOp(opc);
-    }
-
-    void movlMemRegIn(RegisterID base, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, dst, base);
-        oneShortOp(opc);
-    }
-
-    void movlR0mr(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVL_READ_R0RM_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void movlRegMemr0(RegisterID src, RegisterID dst)
-    {
-        uint16_t opc = getOpcodeGroup1(MOVL_WRITE_R0RN_OPCODE, dst, src);
-        oneShortOp(opc);
-    }
-
-    void loadConstant(uint32_t constant, RegisterID dst)
-    {
-        if (((int)constant <= 0x7f) && ((int)constant >= -0x80)) {
-            movImm8(constant, dst);
-            return;
-        }
-
-        uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0);
-
-        m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t));
-        printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize());
-        m_buffer.putShortWithConstantInt(opc, constant, true);
-    }
-
-    void loadConstantUnReusable(uint32_t constant, RegisterID dst, bool ensureSpace = false)
-    {
-        uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0);
-
-        if (ensureSpace)
-            m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t));
-
-        printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize());
-        m_buffer.putShortWithConstantInt(opc, constant);
-    }
-
-    // Flow control
-
-    AssemblerLabel call()
-    {
-        RegisterID scr = claimScratch();
-        m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t));
-        loadConstantUnReusable(0x0, scr);
-        branch(JSR_OPCODE, scr);
-        nop();
-        releaseScratch(scr);
-        return m_buffer.label();
-    }
-
-    AssemblerLabel call(RegisterID dst)
-    {
-        m_buffer.ensureSpace(maxInstructionSize + 2);
-        branch(JSR_OPCODE, dst);
-        nop();
-        return m_buffer.label();
-    }
-
-    AssemblerLabel jmp()
-    {
-        RegisterID scr = claimScratch();
-        m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t));
-        loadConstantUnReusable(0x0, scr);
-        branch(BRAF_OPCODE, scr);
-        nop();
-        releaseScratch(scr);
-        return m_buffer.label();
-    }
-
-    AssemblerLabel extraInstrForBranch(RegisterID dst)
-    {
-        loadConstantUnReusable(0x0, dst);
-        branch(BRAF_OPCODE, dst);
-        nop();
-        return m_buffer.label();
-    }
-
-    AssemblerLabel jmp(RegisterID dst)
-    {
-        jmpReg(dst);
-        return m_buffer.label();
-    }
-
-    void jmpReg(RegisterID dst)
-    {
-        m_buffer.ensureSpace(maxInstructionSize + 2);
-        branch(JMP_OPCODE, dst);
-        nop();
-    }
-
-    AssemblerLabel jne()
-    {
-        branch(BF_OPCODE, 0);
-        return m_buffer.label();
-    }
-
-    AssemblerLabel je()
-    {
-        branch(BT_OPCODE, 0);
-        return m_buffer.label();
-    }
-
-    AssemblerLabel bra()
-    {
-        branch(BRA_OPCODE, 0);
-        return m_buffer.label();
-    }
-
-    void ret()
-    {
-        m_buffer.ensureSpace(maxInstructionSize + 2);
-        oneShortOp(RTS_OPCODE, false);
-    }
-
-    AssemblerLabel labelIgnoringWatchpoints()
-    {
-        m_buffer.ensureSpaceForAnyInstruction();
-        return m_buffer.label();
-    }
-
-    AssemblerLabel labelForWatchpoint()
-    {
-        m_buffer.ensureSpaceForAnyInstruction();
-        AssemblerLabel result = m_buffer.label();
-        if (static_cast(result.m_offset) != m_indexOfLastWatchpoint)
-            result = label();
-        m_indexOfLastWatchpoint = result.m_offset;
-        m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
-        return result;
-    }
-
-    AssemblerLabel label()
-    {
-        AssemblerLabel result = labelIgnoringWatchpoints();
-        while (UNLIKELY(static_cast(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
-            nop();
-            result = labelIgnoringWatchpoints();
-        }
-        return result;
-    }
-
-    int sizeOfConstantPool()
-    {
-        return m_buffer.sizeOfConstantPool();
-    }
-
-    AssemblerLabel align(int alignment)
-    {
-        m_buffer.ensureSpace(maxInstructionSize + 2);
-        while (!m_buffer.isAligned(alignment)) {
-            nop();
-            m_buffer.ensureSpace(maxInstructionSize + 2);
-        }
-        return label();
-    }
-
-    static void changePCrelativeAddress(int offset, uint16_t* instructionPtr, uint32_t newAddress)
-    {
-        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-        uint32_t address = (offset << 2) + ((reinterpret_cast(instructionPtr) + 4) &(~0x3));
-        *reinterpret_cast(address) = newAddress;
-    }
-
-    static uint32_t readPCrelativeAddress(int offset, uint16_t* instructionPtr)
-    {
-        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-        uint32_t address = (offset << 2) + ((reinterpret_cast(instructionPtr) + 4) &(~0x3));
-        return *reinterpret_cast(address);
-    }
-
-    static uint16_t* getInstructionPtr(void* code, int offset)
-    {
-        return reinterpret_cast (reinterpret_cast(code) + offset);
-    }
-
-    static void linkJump(void* code, AssemblerLabel from, void* to)
-    {
-        ASSERT(from.isSet());
-
-        uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset) - 3;
-        int offsetBits = (reinterpret_cast(to) - reinterpret_cast(code)) - from.m_offset;
-
-        /* MOV #imm, reg => LDR reg
-           braf @reg        braf @reg
-           nop              nop
-        */
-        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-        ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE);
-        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
-        printInstr(*instructionPtr, from.m_offset + 2);
-    }
-
-    static void linkCall(void* code, AssemblerLabel from, void* to)
-    {
-        uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset);
-        instructionPtr -= 3;
-        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast(to));
-    }
-
-    static void linkPointer(void* code, AssemblerLabel where, void* value)
-    {
-        uint16_t* instructionPtr = getInstructionPtr(code, where.m_offset);
-        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast(value));
-    }
-
-    static unsigned getCallReturnOffset(AssemblerLabel call)
-    {
-        ASSERT(call.isSet());
-        return call.m_offset;
-    }
-
-    static uint32_t* getLdrImmAddressOnPool(SH4Word* insn, uint32_t* constPool)
-    {
-        return (constPool + (*insn & 0xff));
-    }
-
-    static SH4Word patchConstantPoolLoad(SH4Word load, int value)
-    {
-        return ((load & ~0xff) | value);
-    }
-
-    static SH4Buffer::TwoShorts placeConstantPoolBarrier(int offset)
-    {
-        ASSERT(((offset >> 1) <= 2047) && ((offset >> 1) >= -2048));
-
-        SH4Buffer::TwoShorts m_barrier;
-        m_barrier.high = (BRA_OPCODE | (offset >> 1));
-        m_barrier.low = NOP_OPCODE;
-        printInstr(((BRA_OPCODE | (offset >> 1))), 0);
-        printInstr(NOP_OPCODE, 0);
-        return m_barrier;
-    }
-
-    static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
-    {
-        SH4Word* instructionPtr = reinterpret_cast(loadAddr);
-        SH4Word instruction = *instructionPtr;
-        SH4Word index = instruction & 0xff;
-
-        if ((instruction & 0xf000) != MOVIMM_OPCODE)
-            return;
-
-        ASSERT((((reinterpret_cast(constPoolAddr) - reinterpret_cast(loadAddr)) + index * 4)) < 1024);
-
-        int offset = reinterpret_cast(constPoolAddr) + (index * 4) - ((reinterpret_cast(instructionPtr) & ~0x03) + 4);
-        instruction &= 0x0f00;
-        instruction |= 0xd000;
-        offset &= 0x03ff;
-        instruction |= (offset >> 2);
-        *instructionPtr = instruction;
-        printInstr(instruction, reinterpret_cast(loadAddr));
-    }
-
-    static void repatchPointer(void* where, void* value)
-    {
-        patchPointer(where, value);
-    }
-
-    static void* readPointer(void* code)
-    {
-        return reinterpret_cast(readInt32(code));
-    }
-
-    static void repatchInt32(void* where, int32_t value)
-    {
-        uint16_t* instructionPtr = reinterpret_cast(where);
-        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, value);
-    }
-
-    static void repatchCompact(void* where, int32_t value)
-    {
-        uint16_t* instructionPtr = reinterpret_cast(where);
-        ASSERT(value >= 0);
-        ASSERT(value <= 60);
-
-        // Handle the uncommon case where a flushConstantPool occured in movlMemRegCompact.
-        if ((instructionPtr[0] & 0xf000) == BRA_OPCODE)
-            instructionPtr += (instructionPtr[0] & 0x0fff) + 2;
-
-        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFRM_OPCODE);
-        instructionPtr[0] = (instructionPtr[0] & 0xfff0) | (value >> 2);
-        cacheFlush(instructionPtr, sizeof(uint16_t));
-    }
-
-    static void relinkCall(void* from, void* to)
-    {
-        uint16_t* instructionPtr = reinterpret_cast(from);
-        instructionPtr -= 3;
-        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast(to));
-    }
-
-    static void relinkJump(void* from, void* to)
-    {
-        uint16_t* instructionPtr = reinterpret_cast (from);
-        instructionPtr -= 3;
-        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-        ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE);
-        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast(to) - reinterpret_cast(from));
-    }
-
-    // Linking & patching
-
-    static ptrdiff_t maxJumpReplacementSize()
-    {
-        return sizeof(SH4Word) * 6;
-    }
-
-    static void replaceWithJump(void *instructionStart, void *to)
-    {
-        SH4Word* instruction = reinterpret_cast(instructionStart);
-        intptr_t difference = reinterpret_cast(to) - (reinterpret_cast(instruction) + 3 * sizeof(SH4Word));
-
-        if ((instruction[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE) {
-            // We have an entry in constant pool and we potentially replace a branchPtrWithPatch, so let's backup what would be the
-            // condition (CMP/xx and Bx opcodes) for later use in revertJumpReplacementToBranchPtrWithPatch before putting the jump.
-            instruction[4] = instruction[1];
-            instruction[5] = instruction[2];
-            instruction[1] = (BRAF_OPCODE | (instruction[0] & 0x0f00));
-            instruction[2] = NOP_OPCODE;
-            cacheFlush(&instruction[1], 2 * sizeof(SH4Word));
-        } else {
-            instruction[0] = getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, SH4Registers::r13, 1);
-            instruction[1] = getOpcodeGroup2(BRAF_OPCODE, SH4Registers::r13);
-            instruction[2] = NOP_OPCODE;
-            cacheFlush(instruction, 3 * sizeof(SH4Word));
-        }
-
-        changePCrelativeAddress(instruction[0] & 0x00ff, instruction, difference);
-    }
-
-    static void revertJumpReplacementToBranchPtrWithPatch(void* instructionStart, RegisterID rd, int imm)
-    {
-        SH4Word *insn = reinterpret_cast(instructionStart);
-        ASSERT((insn[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-        ASSERT((insn[0] & 0x00ff) != 1);
-
-        insn[0] = getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, SH4Registers::r13, insn[0] & 0x00ff);
-        if ((insn[1] & 0xf0ff) == BRAF_OPCODE) {
-            insn[1] = (insn[4] & 0xf00f) | (rd << 8) | (SH4Registers::r13 << 4); // Restore CMP/xx opcode.
-            insn[2] = insn[5];
-            ASSERT(((insn[2] & 0xff00) == BT_OPCODE) || ((insn[2] & 0xff00) == BF_OPCODE));
-            ASSERT((insn[3] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-            insn[4] = (BRAF_OPCODE | (insn[3] & 0x0f00));
-            insn[5] = NOP_OPCODE;
-            cacheFlush(insn, 6 * sizeof(SH4Word));
-        } else {
-            // The branchPtrWithPatch has already been restored, so we just patch the immediate value and ASSERT all is as expected.
-            ASSERT((insn[1] & 0xf000) == 0x3000);
-            insn[1] = (insn[1] & 0xf00f) | (rd << 8) | (SH4Registers::r13 << 4);
-            cacheFlush(insn, 2 * sizeof(SH4Word));
-            ASSERT(((insn[2] & 0xff00) == BT_OPCODE) || ((insn[2] & 0xff00) == BF_OPCODE));
-            ASSERT((insn[3] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-            ASSERT(insn[5] == NOP_OPCODE);
-        }
-
-        changePCrelativeAddress(insn[0] & 0x00ff, insn, imm);
-    }
-
-    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type = JumpFar)
-    {
-        ASSERT(to.isSet());
-        ASSERT(from.isSet());
-
-        uint16_t* instructionPtr = getInstructionPtr(data(), from.m_offset) - 1;
-        int offsetBits = (to.m_offset - from.m_offset);
-
-        if (type == JumpNear) {
-            uint16_t instruction = instructionPtr[0];
-            int offset = (offsetBits - 2);
-            ASSERT((((instruction == BT_OPCODE) || (instruction == BF_OPCODE)) && (offset >= -256) && (offset <= 254))
-                || ((instruction == BRA_OPCODE) && (offset >= -4096) && (offset <= 4094)));
-            *instructionPtr++ = instruction | (offset >> 1);
-            printInstr(*instructionPtr, from.m_offset + 2);
-            return;
-        }
-
-        /* MOV # imm, reg => LDR reg
-           braf @reg         braf @reg
-           nop               nop
-        */
-        instructionPtr -= 2;
-        ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE);
-
-        if ((instructionPtr[0] & 0xf000) == MOVIMM_OPCODE) {
-            uint32_t* addr = getLdrImmAddressOnPool(instructionPtr, m_buffer.poolAddress());
-            *addr = offsetBits;
-            printInstr(*instructionPtr, from.m_offset + 2);
-            return;
-        }
-
-        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
-        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
-        printInstr(*instructionPtr, from.m_offset + 2);
-    }
-
-    static void* getRelocatedAddress(void* code, AssemblerLabel label)
-    {
-        return reinterpret_cast(reinterpret_cast(code) + label.m_offset);
-    }
-
-    static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
-    {
-        return b.m_offset - a.m_offset;
-    }
-
-    static void patchPointer(void* code, AssemblerLabel where, void* value)
-    {
-        patchPointer(reinterpret_cast(code) + where.m_offset, value);
-    }
-
-    static void patchPointer(void* code, void* value)
-    {
-        patchInt32(code, reinterpret_cast(value));
-    }
-
-    static void patchInt32(void* code, uint32_t value)
-    {
-        changePCrelativeAddress((*(reinterpret_cast(code)) & 0xff), reinterpret_cast(code), value);
-    }
-
-    static uint32_t readInt32(void* code)
-    {
-        return readPCrelativeAddress((*(reinterpret_cast(code)) & 0xff), reinterpret_cast(code));
-    }
-
-    static void* readCallTarget(void* from)
-    {
-        uint16_t* instructionPtr = static_cast(from);
-        instructionPtr -= 3;
-        return reinterpret_cast(readPCrelativeAddress((*instructionPtr & 0xff), instructionPtr));
-    }
-
-    static void cacheFlush(void* code, size_t size)
-    {
-#if OS(LINUX)
-        // Flush each page separately, otherwise the whole flush will fail if an uncommited page is in the area.
-        unsigned currentPage = reinterpret_cast(code) & ~(pageSize() - 1);
-        unsigned lastPage = (reinterpret_cast(code) + size - 1) & ~(pageSize() - 1);
-        do {
-#if defined CACHEFLUSH_D_L2
-            syscall(__NR_cacheflush, currentPage, pageSize(), CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2);
-#else
-            syscall(__NR_cacheflush, currentPage, pageSize(), CACHEFLUSH_D_WB | CACHEFLUSH_I);
-#endif
-            currentPage += pageSize();
-        } while (lastPage >= currentPage);
-#else
-#error "The cacheFlush support is missing on this platform."
-#endif
-    }
-
-    void prefix(uint16_t pre)
-    {
-        m_buffer.putByte(pre);
-    }
-
-    void oneShortOp(uint16_t opcode, bool checksize = true, bool isDouble = true)
-    {
-        printInstr(opcode, m_buffer.codeSize(), isDouble);
-        if (checksize)
-            m_buffer.ensureSpace(maxInstructionSize);
-        m_buffer.putShortUnchecked(opcode);
-    }
-
-    void ensureSpace(int space)
-    {
-        m_buffer.ensureSpace(space);
-    }
-
-    void ensureSpace(int insnSpace, int constSpace)
-    {
-        m_buffer.ensureSpace(insnSpace, constSpace);
-    }
-
-    // Administrative methods
-
-    void* data() const { return m_buffer.data(); }
-    size_t codeSize() const { return m_buffer.codeSize(); }
-
-    unsigned debugOffset() { return m_buffer.debugOffset(); }
-
-#ifdef SH4_ASSEMBLER_TRACING
-    static void printInstr(uint16_t opc, unsigned size, bool isdoubleInst = true)
-    {
-        if (!getenv("JavaScriptCoreDumpJIT"))
-            return;
-
-        const char *format = 0;
-        printfStdoutInstr("offset: 0x%8.8x\t", size);
-        switch (opc) {
-        case BRK_OPCODE:
-            format = "    BRK\n";
-            break;
-        case NOP_OPCODE:
-            format = "    NOP\n";
-            break;
-        case RTS_OPCODE:
-            format ="    *RTS\n";
-            break;
-        case SETS_OPCODE:
-            format = "    SETS\n";
-            break;
-        case SETT_OPCODE:
-            format = "    SETT\n";
-            break;
-        case CLRT_OPCODE:
-            format = "    CLRT\n";
-            break;
-        case FSCHG_OPCODE:
-            format = "    FSCHG\n";
-            break;
-        }
-        if (format) {
-            printfStdoutInstr(format);
-            return;
-        }
-        switch (opc & 0xf0ff) {
-        case BRAF_OPCODE:
-            format = "    *BRAF R%d\n";
-            break;
-        case DT_OPCODE:
-            format = "    DT R%d\n";
-            break;
-        case CMPPL_OPCODE:
-            format = "    CMP/PL R%d\n";
-            break;
-        case CMPPZ_OPCODE:
-            format = "    CMP/PZ R%d\n";
-            break;
-        case JMP_OPCODE:
-            format = "    *JMP @R%d\n";
-            break;
-        case JSR_OPCODE:
-            format = "    *JSR @R%d\n";
-            break;
-        case LDSPR_OPCODE:
-            format = "    LDS R%d, PR\n";
-            break;
-        case LDSLPR_OPCODE:
-            format = "    LDS.L @R%d+, PR\n";
-            break;
-        case MOVT_OPCODE:
-            format = "    MOVT R%d\n";
-            break;
-        case SHAL_OPCODE:
-            format = "    SHAL R%d\n";
-            break;
-        case SHAR_OPCODE:
-            format = "    SHAR R%d\n";
-            break;
-        case SHLL_OPCODE:
-            format = "    SHLL R%d\n";
-            break;
-        case SHLL2_OPCODE:
-            format = "    SHLL2 R%d\n";
-            break;
-        case SHLL8_OPCODE:
-            format = "    SHLL8 R%d\n";
-            break;
-        case SHLL16_OPCODE:
-            format = "    SHLL16 R%d\n";
-            break;
-        case SHLR_OPCODE:
-            format = "    SHLR R%d\n";
-            break;
-        case SHLR2_OPCODE:
-            format = "    SHLR2 R%d\n";
-            break;
-        case SHLR8_OPCODE:
-            format = "    SHLR8 R%d\n";
-            break;
-        case SHLR16_OPCODE:
-            format = "    SHLR16 R%d\n";
-            break;
-        case STSPR_OPCODE:
-            format = "    STS PR, R%d\n";
-            break;
-        case STSLPR_OPCODE:
-            format = "    STS.L PR, @-R%d\n";
-            break;
-        case LDS_RM_FPUL_OPCODE:
-            format = "    LDS R%d, FPUL\n";
-            break;
-        case STS_FPUL_RN_OPCODE:
-            format = "    STS FPUL, R%d \n";
-            break;
-        case FLDS_FRM_FPUL_OPCODE:
-            format = "    FLDS FR%d, FPUL\n";
-            break;
-        case FSTS_FPUL_FRN_OPCODE:
-            format = "    FSTS FPUL, R%d \n";
-            break;
-        case LDSFPSCR_OPCODE:
-            format = "    LDS R%d, FPSCR \n";
-            break;
-        case STSFPSCR_OPCODE:
-            format = "    STS FPSCR, R%d \n";
-            break;
-        case STSMACL_OPCODE:
-            format = "    STS MACL, R%d \n";
-            break;
-        case STSMACH_OPCODE:
-            format = "    STS MACH, R%d \n";
-            break;
-        case BSRF_OPCODE:
-            format = "    *BSRF R%d";
-            break;
-        case FTRC_OPCODE:
-            format = "    FTRC FR%d, FPUL\n";
-            break;
-        }
-        if (format) {
-            printfStdoutInstr(format, getRn(opc));
-            return;
-        }
-        switch (opc & 0xf0ff) {
-        case FNEG_OPCODE:
-            format = "    FNEG DR%d\n";
-            break;
-        case FLOAT_OPCODE:
-            format = "    FLOAT DR%d\n";
-            break;
-        case FTRC_OPCODE:
-            format = "    FTRC FR%d, FPUL\n";
-            break;
-        case FABS_OPCODE:
-            format = "    FABS FR%d\n";
-            break;
-        case FSQRT_OPCODE:
-            format = "    FSQRT FR%d\n";
-            break;
-        case FCNVDS_DRM_FPUL_OPCODE:
-            format = "    FCNVDS FR%d, FPUL\n";
-            break;
-        case FCNVSD_FPUL_DRN_OPCODE:
-            format = "    FCNVSD FPUL, FR%d\n";
-            break;
-        }
-        if (format) {
-            if (isdoubleInst)
-                printfStdoutInstr(format, getDRn(opc) << 1);
-            else
-                printfStdoutInstr(format, getRn(opc));
-            return;
-        }
-        switch (opc & 0xf00f) {
-        case ADD_OPCODE:
-            format = "    ADD R%d, R%d\n";
-            break;
-        case ADDC_OPCODE:
-            format = "    ADDC R%d, R%d\n";
-            break;
-        case ADDV_OPCODE:
-            format = "    ADDV R%d, R%d\n";
-            break;
-        case AND_OPCODE:
-            format = "    AND R%d, R%d\n";
-            break;
-        case DIV1_OPCODE:
-            format = "    DIV1 R%d, R%d\n";
-            break;
-        case CMPEQ_OPCODE:
-            format = "    CMP/EQ R%d, R%d\n";
-            break;
-        case CMPGE_OPCODE:
-            format = "    CMP/GE R%d, R%d\n";
-            break;
-        case CMPGT_OPCODE:
-            format = "    CMP/GT R%d, R%d\n";
-            break;
-        case CMPHI_OPCODE:
-            format = "    CMP/HI R%d, R%d\n";
-            break;
-        case CMPHS_OPCODE:
-            format = "    CMP/HS R%d, R%d\n";
-            break;
-        case MOV_OPCODE:
-            format = "    MOV R%d, R%d\n";
-            break;
-        case MOVB_WRITE_RN_OPCODE:
-            format = "    MOV.B R%d, @R%d\n";
-            break;
-        case MOVB_WRITE_RNDEC_OPCODE:
-            format = "    MOV.B R%d, @-R%d\n";
-            break;
-        case MOVB_WRITE_R0RN_OPCODE:
-            format = "    MOV.B R%d, @(R0, R%d)\n";
-            break;
-        case MOVB_READ_RM_OPCODE:
-            format = "    MOV.B @R%d, R%d\n";
-            break;
-        case MOVB_READ_RMINC_OPCODE:
-            format = "    MOV.B @R%d+, R%d\n";
-            break;
-        case MOVB_READ_R0RM_OPCODE:
-            format = "    MOV.B @(R0, R%d), R%d\n";
-            break;
-        case MOVL_WRITE_RN_OPCODE:
-            format = "    MOV.L R%d, @R%d\n";
-            break;
-        case MOVL_WRITE_RNDEC_OPCODE:
-            format = "    MOV.L R%d, @-R%d\n";
-            break;
-        case MOVL_WRITE_R0RN_OPCODE:
-            format = "    MOV.L R%d, @(R0, R%d)\n";
-            break;
-        case MOVL_READ_RM_OPCODE:
-            format = "    MOV.L @R%d, R%d\n";
-            break;
-        case MOVL_READ_RMINC_OPCODE:
-            format = "    MOV.L @R%d+, R%d\n";
-            break;
-        case MOVL_READ_R0RM_OPCODE:
-            format = "    MOV.L @(R0, R%d), R%d\n";
-            break;
-        case MULL_OPCODE:
-            format = "    MUL.L R%d, R%d\n";
-            break;
-        case DMULL_L_OPCODE:
-            format = "    DMULU.L R%d, R%d\n";
-            break;
-        case DMULSL_OPCODE:
-            format = "    DMULS.L R%d, R%d\n";
-            break;
-        case NEG_OPCODE:
-            format = "    NEG R%d, R%d\n";
-            break;
-        case NEGC_OPCODE:
-            format = "    NEGC R%d, R%d\n";
-            break;
-        case NOT_OPCODE:
-            format = "    NOT R%d, R%d\n";
-            break;
-        case OR_OPCODE:
-            format = "    OR R%d, R%d\n";
-            break;
-        case SHAD_OPCODE:
-            format = "    SHAD R%d, R%d\n";
-            break;
-        case SHLD_OPCODE:
-            format = "    SHLD R%d, R%d\n";
-            break;
-        case SUB_OPCODE:
-            format = "    SUB R%d, R%d\n";
-            break;
-        case SUBC_OPCODE:
-            format = "    SUBC R%d, R%d\n";
-            break;
-        case SUBV_OPCODE:
-            format = "    SUBV R%d, R%d\n";
-            break;
-        case TST_OPCODE:
-            format = "    TST R%d, R%d\n";
-            break;
-        case XOR_OPCODE:
-            format = "    XOR R%d, R%d\n";break;
-        case MOVW_WRITE_RN_OPCODE:
-            format = "    MOV.W R%d, @R%d\n";
-            break;
-        case MOVW_READ_RM_OPCODE:
-            format = "    MOV.W @R%d, R%d\n";
-            break;
-        case MOVW_READ_RMINC_OPCODE:
-            format = "    MOV.W @R%d+, R%d\n";
-            break;
-        case MOVW_READ_R0RM_OPCODE:
-            format = "    MOV.W @(R0, R%d), R%d\n";
-            break;
-        case MOVW_WRITE_R0RN_OPCODE:
-            format = "    MOV.W R%d, @(R0, R%d)\n";
-            break;
-        case EXTUB_OPCODE:
-            format = "    EXTU.B R%d, R%d\n";
-            break;
-        case EXTUW_OPCODE:
-            format = "    EXTU.W R%d, R%d\n";
-            break;
-        }
-        if (format) {
-            printfStdoutInstr(format, getRm(opc), getRn(opc));
-            return;
-        }
-        switch (opc & 0xf00f) {
-        case FSUB_OPCODE:
-            format = "    FSUB FR%d, FR%d\n";
-            break;
-        case FADD_OPCODE:
-            format = "    FADD FR%d, FR%d\n";
-            break;
-        case FDIV_OPCODE:
-            format = "    FDIV FR%d, FR%d\n";
-            break;
-        case FMUL_OPCODE:
-            format = "    DMULL FR%d, FR%d\n";
-            break;
-        case FMOV_OPCODE:
-            format = "    FMOV FR%d, FR%d\n";
-            break;
-        case FCMPEQ_OPCODE:
-            format = "    FCMP/EQ FR%d, FR%d\n";
-            break;
-        case FCMPGT_OPCODE:
-            format = "    FCMP/GT FR%d, FR%d\n";
-            break;
-        }
-        if (format) {
-            if (isdoubleInst)
-                printfStdoutInstr(format, getDRm(opc) << 1, getDRn(opc) << 1);
-            else
-                printfStdoutInstr(format, getRm(opc), getRn(opc));
-            return;
-        }
-        switch (opc & 0xf00f) {
-        case FMOVS_WRITE_RN_DEC_OPCODE:
-            format = "    %s FR%d, @-R%d\n";
-            break;
-        case FMOVS_WRITE_RN_OPCODE:
-            format = "    %s FR%d, @R%d\n";
-            break;
-        case FMOVS_WRITE_R0RN_OPCODE:
-            format = "    %s FR%d, @(R0, R%d)\n";
-            break;
-        }
-        if (format) {
-            if (isdoubleInst)
-                printfStdoutInstr(format, "FMOV", getDRm(opc) << 1, getDRn(opc));
-            else
-                printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc));
-            return;
-        }
-        switch (opc & 0xf00f) {
-        case FMOVS_READ_RM_OPCODE:
-            format = "    %s @R%d, FR%d\n";
-            break;
-        case FMOVS_READ_RM_INC_OPCODE:
-            format = "    %s @R%d+, FR%d\n";
-            break;
-        case FMOVS_READ_R0RM_OPCODE:
-            format = "    %s @(R0, R%d), FR%d\n";
-            break;
-        }
-        if (format) {
-            if (isdoubleInst)
-                printfStdoutInstr(format, "FMOV", getDRm(opc), getDRn(opc) << 1);
-            else
-                printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc));
-            return;
-        }
-        switch (opc & 0xff00) {
-        case BF_OPCODE:
-            format = "    BF %d\n";
-            break;
-        case BFS_OPCODE:
-            format = "    *BF/S %d\n";
-            break;
-        case ANDIMM_OPCODE:
-            format = "    AND #%d, R0\n";
-            break;
-        case BT_OPCODE:
-            format = "    BT %d\n";
-            break;
-        case BTS_OPCODE:
-            format = "    *BT/S %d\n";
-            break;
-        case CMPEQIMM_OPCODE:
-            format = "    CMP/EQ #%d, R0\n";
-            break;
-        case MOVB_WRITE_OFFGBR_OPCODE:
-            format = "    MOV.B R0, @(%d, GBR)\n";
-            break;
-        case MOVB_READ_OFFGBR_OPCODE:
-            format = "    MOV.B @(%d, GBR), R0\n";
-            break;
-        case MOVL_WRITE_OFFGBR_OPCODE:
-            format = "    MOV.L R0, @(%d, GBR)\n";
-            break;
-        case MOVL_READ_OFFGBR_OPCODE:
-            format = "    MOV.L @(%d, GBR), R0\n";
-            break;
-        case MOVA_READ_OFFPC_OPCODE:
-            format = "    MOVA @(%d, PC), R0\n";
-            break;
-        case ORIMM_OPCODE:
-            format = "    OR #%d, R0\n";
-            break;
-        case ORBIMM_OPCODE:
-            format = "    OR.B #%d, @(R0, GBR)\n";
-            break;
-        case TSTIMM_OPCODE:
-            format = "    TST #%d, R0\n";
-            break;
-        case TSTB_OPCODE:
-            format = "    TST.B %d, @(R0, GBR)\n";
-            break;
-        case XORIMM_OPCODE:
-            format = "    XOR #%d, R0\n";
-            break;
-        case XORB_OPCODE:
-            format = "    XOR.B %d, @(R0, GBR)\n";
-            break;
-        }
-        if (format) {
-            printfStdoutInstr(format, getImm8(opc));
-            return;
-        }
-        switch (opc & 0xff00) {
-        case MOVB_WRITE_OFFRN_OPCODE:
-            format = "    MOV.B R0, @(%d, R%d)\n";
-            break;
-        case MOVB_READ_OFFRM_OPCODE:
-            format = "    MOV.B @(%d, R%d), R0\n";
-            break;
-        }
-        if (format) {
-            printfStdoutInstr(format, getDisp(opc), getRm(opc));
-            return;
-        }
-        switch (opc & 0xf000) {
-        case BRA_OPCODE:
-            format = "    *BRA %d\n";
-            break;
-        case BSR_OPCODE:
-            format = "    *BSR %d\n";
-            break;
-        }
-        if (format) {
-            printfStdoutInstr(format, getImm12(opc));
-            return;
-        }
-        switch (opc & 0xf000) {
-        case MOVL_READ_OFFPC_OPCODE:
-            format = "    MOV.L @(%d, PC), R%d\n";
-            break;
-        case ADDIMM_OPCODE:
-            format = "    ADD #%d, R%d\n";
-            break;
-        case MOVIMM_OPCODE:
-            format = "    MOV #%d, R%d\n";
-            break;
-        case MOVW_READ_OFFPC_OPCODE:
-            format = "    MOV.W @(%d, PC), R%d\n";
-            break;
-        }
-        if (format) {
-            printfStdoutInstr(format, getImm8(opc), getRn(opc));
-            return;
-        }
-        switch (opc & 0xf000) {
-        case MOVL_WRITE_OFFRN_OPCODE:
-            format = "    MOV.L R%d, @(%d, R%d)\n";
-            printfStdoutInstr(format, getRm(opc), getDisp(opc), getRn(opc));
-            break;
-        case MOVL_READ_OFFRM_OPCODE:
-            format = "    MOV.L @(%d, R%d), R%d\n";
-            printfStdoutInstr(format, getDisp(opc), getRm(opc), getRn(opc));
-            break;
-        }
-    }
-
-    static void printfStdoutInstr(const char* format, ...)
-    {
-        if (getenv("JavaScriptCoreDumpJIT")) {
-            va_list args;
-            va_start(args, format);
-            vprintfStdoutInstr(format, args);
-            va_end(args);
-        }
-    }
-
-    static void vprintfStdoutInstr(const char* format, va_list args)
-    {
-        if (getenv("JavaScriptCoreDumpJIT"))
-            WTF::dataLogFV(format, args);
-    }
-
-    static void printBlockInstr(uint16_t* first, unsigned offset, int nbInstr)
-    {
-        printfStdoutInstr(">> repatch instructions after link\n");
-        for (int i = 0; i <= nbInstr; i++)
-            printInstr(*(first + i), offset + i);
-        printfStdoutInstr(">> end repatch\n");
-    }
-#else
-    static void printInstr(uint16_t, unsigned, bool = true) { };
-    static void printBlockInstr(uint16_t*, unsigned, int) { };
-#endif
-
-    static void replaceWithLoad(void* instructionStart)
-    {
-        SH4Word* insPtr = reinterpret_cast(instructionStart);
-
-        insPtr += 2; // skip MOV and ADD opcodes
-
-        if (((*insPtr) & 0xf00f) != MOVL_READ_RM_OPCODE) {
-            *insPtr = MOVL_READ_RM_OPCODE | (*insPtr & 0x0ff0);
-            cacheFlush(insPtr, sizeof(SH4Word));
-        }
-    }
-
-    static void replaceWithAddressComputation(void* instructionStart)
-    {
-        SH4Word* insPtr = reinterpret_cast(instructionStart);
-
-        insPtr += 2; // skip MOV and ADD opcodes
-
-        if (((*insPtr) & 0xf00f) != MOV_OPCODE) {
-            *insPtr = MOV_OPCODE | (*insPtr & 0x0ff0);
-            cacheFlush(insPtr, sizeof(SH4Word));
-        }
-    }
-
-private:
-    SH4Buffer m_buffer;
-    int m_claimscratchReg;
-    int m_indexOfLastWatchpoint;
-    int m_indexOfTailOfLastWatchpoint;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(ASSEMBLER) && CPU(SH4)
-
-#endif // SH4Assembler_h
diff --git a/Source/JavaScriptCore/assembler/X86Assembler.h b/Source/JavaScriptCore/assembler/X86Assembler.h
index 1a43e206c..fb3a169a1 100644
--- a/Source/JavaScriptCore/assembler/X86Assembler.h
+++ b/Source/JavaScriptCore/assembler/X86Assembler.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,124 +23,108 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef X86Assembler_h
-#define X86Assembler_h
+#pragma once
 
 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
 
 #include "AssemblerBuffer.h"
+#include "AssemblerCommon.h"
 #include "JITCompilationEffort.h"
 #include 
 #include 
 #include 
 #include 
 
-#if USE(MASM_PROBE)
-#include 
-#endif
-
 namespace JSC {
 
 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
 
 namespace X86Registers {
-    typedef enum {
-        eax,
-        ecx,
-        edx,
-        ebx,
-        esp,
-        ebp,
-        esi,
-        edi,
 
-#if CPU(X86_64)
-        r8,
-        r9,
-        r10,
-        r11,
-        r12,
-        r13,
-        r14,
-        r15,
-#endif
-    } RegisterID;
+#define FOR_EACH_CPU_REGISTER(V) \
+    FOR_EACH_CPU_GPREGISTER(V) \
+    FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+    FOR_EACH_CPU_FPREGISTER(V)
+
+// The following are defined as pairs of the following value:
+// 1. type of the storage needed to save the register value by the JIT probe.
+// 2. name of the register.
+#define FOR_EACH_CPU_GPREGISTER(V) \
+    V(void*, eax) \
+    V(void*, ecx) \
+    V(void*, edx) \
+    V(void*, ebx) \
+    V(void*, esp) \
+    V(void*, ebp) \
+    V(void*, esi) \
+    V(void*, edi) \
+    FOR_EACH_X86_64_CPU_GPREGISTER(V)
+
+#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+    V(void*, eip) \
+    V(void*, eflags) \
+
+// Note: the JITs only stores double values in the FP registers.
+#define FOR_EACH_CPU_FPREGISTER(V) \
+    V(double, xmm0) \
+    V(double, xmm1) \
+    V(double, xmm2) \
+    V(double, xmm3) \
+    V(double, xmm4) \
+    V(double, xmm5) \
+    V(double, xmm6) \
+    V(double, xmm7) \
+    FOR_EACH_X86_64_CPU_FPREGISTER(V)
 
-    typedef enum {
-        xmm0,
-        xmm1,
-        xmm2,
-        xmm3,
-        xmm4,
-        xmm5,
-        xmm6,
-        xmm7,
+#if CPU(X86)
 
-#if CPU(X86_64)
-        xmm8,
-        xmm9,
-        xmm10,
-        xmm11,
-        xmm12,
-        xmm13,
-        xmm14,
-        xmm15,
-#endif
-    } XMMRegisterID;
-
-#if USE(MASM_PROBE)
-    #define FOR_EACH_CPU_REGISTER(V) \
-        FOR_EACH_CPU_GPREGISTER(V) \
-        FOR_EACH_CPU_SPECIAL_REGISTER(V) \
-        FOR_EACH_CPU_FPREGISTER(V)
-
-    #define FOR_EACH_CPU_GPREGISTER(V) \
-        V(void*, eax) \
-        V(void*, ebx) \
-        V(void*, ecx) \
-        V(void*, edx) \
-        V(void*, esi) \
-        V(void*, edi) \
-        V(void*, ebp) \
-        V(void*, esp) \
-        FOR_EACH_X86_64_CPU_GPREGISTER(V)
-
-    #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
-        V(void*, eip) \
-        V(void*, eflags) \
-
-    #define FOR_EACH_CPU_FPREGISTER(V) \
-        V(__m128, xmm0) \
-        V(__m128, xmm1) \
-        V(__m128, xmm2) \
-        V(__m128, xmm3) \
-        V(__m128, xmm4) \
-        V(__m128, xmm5) \
-        V(__m128, xmm6) \
-        V(__m128, xmm7)
+#define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
+#define FOR_EACH_X86_64_CPU_FPREGISTER(V) // Nothing to add.
 
-#if CPU(X86)
-    #define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
 #elif CPU(X86_64)
-    #define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
-        V(void*, r8) \
-        V(void*, r9) \
-        V(void*, r10) \
-        V(void*, r11) \
-        V(void*, r12) \
-        V(void*, r13) \
-        V(void*, r14) \
-        V(void*, r15)
+
+#define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
+    V(void*, r8) \
+    V(void*, r9) \
+    V(void*, r10) \
+    V(void*, r11) \
+    V(void*, r12) \
+    V(void*, r13) \
+    V(void*, r14) \
+    V(void*, r15)
+
+#define FOR_EACH_X86_64_CPU_FPREGISTER(V) \
+    V(double, xmm8) \
+    V(double, xmm9) \
+    V(double, xmm10) \
+    V(double, xmm11) \
+    V(double, xmm12) \
+    V(double, xmm13) \
+    V(double, xmm14) \
+    V(double, xmm15)
+
 #endif // CPU(X86_64)
-#endif // USE(MASM_PROBE)
-}
+
+typedef enum {
+    #define DECLARE_REGISTER(_type, _regName) _regName,
+    FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+    #undef DECLARE_REGISTER
+} RegisterID;
+
+typedef enum {
+    #define DECLARE_REGISTER(_type, _regName) _regName,
+    FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+    #undef DECLARE_REGISTER
+} XMMRegisterID;
+
+} // namespace X86Register
 
 class X86Assembler {
 public:
     typedef X86Registers::RegisterID RegisterID;
     
-    static RegisterID firstRegister() { return X86Registers::eax; }
-    static RegisterID lastRegister()
+    static constexpr RegisterID firstRegister() { return X86Registers::eax; }
+    static constexpr RegisterID lastRegister()
     {
 #if CPU(X86_64)
         return X86Registers::r15;
@@ -152,8 +136,8 @@ public:
     typedef X86Registers::XMMRegisterID XMMRegisterID;
     typedef XMMRegisterID FPRegisterID;
     
-    static FPRegisterID firstFPRegister() { return X86Registers::xmm0; }
-    static FPRegisterID lastFPRegister()
+    static constexpr FPRegisterID firstFPRegister() { return X86Registers::xmm0; }
+    static constexpr FPRegisterID lastFPRegister()
     {
 #if CPU(X86_64)
         return X86Registers::xmm15;
@@ -185,21 +169,43 @@ public:
     } Condition;
 
 private:
+    // OneByteOpcodeID defines the bytecode for 1 byte instruction. It also contains the prefixes
+    // for two bytes instructions.
+    // TwoByteOpcodeID, ThreeByteOpcodeID define the opcodes for the multibytes instructions.
+    //
+    // The encoding for each instruction can be found in the Intel Architecture Manual in the appendix
+    // "Opcode Map."
+    //
+    // Each opcode can have a suffix describing the type of argument. The full list of suffixes is
+    // in the "Key to Abbreviations" section of the "Opcode Map".
+    // The most common argument types are:
+    //     -E: The argument is either a GPR or a memory address.
+    //     -G: The argument is a GPR.
+    //     -I: The argument is an immediate.
+    // The most common sizes are:
+    //     -v: 32 or 64bit depending on the operand-size attribute.
+    //     -z: 32bit in both 32bit and 64bit mode. Common for immediate values.
     typedef enum {
+        OP_ADD_EbGb                     = 0x00,
         OP_ADD_EvGv                     = 0x01,
         OP_ADD_GvEv                     = 0x03,
+        OP_ADD_EAXIv                    = 0x05,
         OP_OR_EvGv                      = 0x09,
         OP_OR_GvEv                      = 0x0B,
+        OP_OR_EAXIv                     = 0x0D,
         OP_2BYTE_ESCAPE                 = 0x0F,
         OP_AND_EvGv                     = 0x21,
         OP_AND_GvEv                     = 0x23,
         OP_SUB_EvGv                     = 0x29,
         OP_SUB_GvEv                     = 0x2B,
+        OP_SUB_EAXIv                    = 0x2D,
         PRE_PREDICT_BRANCH_NOT_TAKEN    = 0x2E,
         OP_XOR_EvGv                     = 0x31,
         OP_XOR_GvEv                     = 0x33,
+        OP_XOR_EAXIv                    = 0x35,
         OP_CMP_EvGv                     = 0x39,
         OP_CMP_GvEv                     = 0x3B,
+        OP_CMP_EAXIv                    = 0x3D,
 #if CPU(X86_64)
         PRE_REX                         = 0x40,
 #endif
@@ -224,9 +230,12 @@ private:
         OP_LEA                          = 0x8D,
         OP_GROUP1A_Ev                   = 0x8F,
         OP_NOP                          = 0x90,
+        OP_XCHG_EAX                     = 0x90,
         OP_CDQ                          = 0x99,
         OP_MOV_EAXOv                    = 0xA1,
         OP_MOV_OvEAX                    = 0xA3,
+        OP_TEST_ALIb                    = 0xA8,
+        OP_TEST_EAXIv                   = 0xA9,
         OP_MOV_EAXIv                    = 0xB8,
         OP_GROUP2_EvIb                  = 0xC1,
         OP_RET                          = 0xC3,
@@ -235,9 +244,11 @@ private:
         OP_INT3                         = 0xCC,
         OP_GROUP2_Ev1                   = 0xD1,
         OP_GROUP2_EvCL                  = 0xD3,
+        OP_ESCAPE_D9                    = 0xD9,
         OP_ESCAPE_DD                    = 0xDD,
         OP_CALL_rel32                   = 0xE8,
         OP_JMP_rel32                    = 0xE9,
+        PRE_LOCK                        = 0xF0,
         PRE_SSE_F2                      = 0xF2,
         PRE_SSE_F3                      = 0xF3,
         OP_HLT                          = 0xF4,
@@ -248,29 +259,42 @@ private:
     } OneByteOpcodeID;
 
     typedef enum {
+        OP2_UD2             = 0xB,
         OP2_MOVSD_VsdWsd    = 0x10,
         OP2_MOVSD_WsdVsd    = 0x11,
         OP2_MOVSS_VsdWsd    = 0x10,
         OP2_MOVSS_WsdVsd    = 0x11,
+        OP2_MOVAPD_VpdWpd   = 0x28,
+        OP2_MOVAPS_VpdWpd   = 0x28,
         OP2_CVTSI2SD_VsdEd  = 0x2A,
         OP2_CVTTSD2SI_GdWsd = 0x2C,
+        OP2_CVTTSS2SI_GdWsd = 0x2C,
         OP2_UCOMISD_VsdWsd  = 0x2E,
+        OP2_3BYTE_ESCAPE_3A = 0x3A,
+        OP2_CMOVCC          = 0x40,
         OP2_ADDSD_VsdWsd    = 0x58,
         OP2_MULSD_VsdWsd    = 0x59,
         OP2_CVTSD2SS_VsdWsd = 0x5A,
         OP2_CVTSS2SD_VsdWsd = 0x5A,
         OP2_SUBSD_VsdWsd    = 0x5C,
         OP2_DIVSD_VsdWsd    = 0x5E,
+        OP2_MOVMSKPD_VdEd   = 0x50,
         OP2_SQRTSD_VsdWsd   = 0x51,
+        OP2_ANDPS_VpdWpd    = 0x54,
         OP2_ANDNPD_VpdWpd   = 0x55,
+        OP2_ORPS_VpdWpd     = 0x56,
         OP2_XORPD_VpdWpd    = 0x57,
         OP2_MOVD_VdEd       = 0x6E,
         OP2_MOVD_EdVd       = 0x7E,
         OP2_JCC_rel32       = 0x80,
         OP_SETCC            = 0x90,
-        OP2_3BYTE_ESCAPE    = 0xAE,
+        OP2_3BYTE_ESCAPE_AE = 0xAE,
         OP2_IMUL_GvEv       = 0xAF,
         OP2_MOVZX_GvEb      = 0xB6,
+        OP2_BSF             = 0xBC,
+        OP2_TZCNT           = 0xBC,
+        OP2_BSR             = 0xBD,
+        OP2_LZCNT           = 0xBD,
         OP2_MOVSX_GvEb      = 0xBE,
         OP2_MOVZX_GvEw      = 0xB7,
         OP2_MOVSX_GvEw      = 0xBF,
@@ -281,9 +305,28 @@ private:
     } TwoByteOpcodeID;
     
     typedef enum {
-        OP3_MFENCE          = 0xF0,
+        OP3_ROUNDSS_VssWssIb = 0x0A,
+        OP3_ROUNDSD_VsdWsdIb = 0x0B,
+        OP3_MFENCE           = 0xF0,
     } ThreeByteOpcodeID;
 
+    struct VexPrefix {
+        enum : uint8_t {
+            TwoBytes = 0xC5,
+            ThreeBytes = 0xC4
+        };
+    };
+    enum class VexImpliedBytes : uint8_t {
+        TwoBytesOp = 1,
+        ThreeBytesOp38 = 2,
+        ThreeBytesOp3A = 3
+    };
+    
+    TwoByteOpcodeID cmovcc(Condition cond)
+    {
+        return (TwoByteOpcodeID)(OP2_CMOVCC + cond);
+    }
+
     TwoByteOpcodeID jccRel32(Condition cond)
     {
         return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
@@ -317,6 +360,7 @@ private:
         GROUP3_OP_TEST = 0,
         GROUP3_OP_NOT  = 2,
         GROUP3_OP_NEG  = 3,
+        GROUP3_OP_DIV = 6,
         GROUP3_OP_IDIV = 7,
 
         GROUP5_OP_CALLN = 2,
@@ -328,6 +372,7 @@ private:
         GROUP14_OP_PSLLQ = 6,
         GROUP14_OP_PSRLQ = 2,
 
+        ESCAPE_D9_FSTP_singleReal = 3,
         ESCAPE_DD_FSTP_doubleReal = 3,
     } GroupOpcodeID;
     
@@ -407,13 +452,43 @@ public:
         m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
     }
 
+    void addl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp(OP_ADD_EvGv, src, base, index, scale, offset);
+    }
+
+    void addb_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp8(OP_ADD_EbGb, src, base, offset);
+    }
+
+    void addb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp8(OP_ADD_EbGb, src, base, index, scale, offset);
+    }
+
+    void addw_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        m_formatter.oneByteOp8(OP_ADD_EvGv, src, base, offset);
+    }
+
+    void addw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        m_formatter.oneByteOp8(OP_ADD_EvGv, src, base, index, scale, offset);
+    }
+
     void addl_ir(int imm, RegisterID dst)
     {
         if (CAN_SIGN_EXTEND_8_32(imm)) {
             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
             m_formatter.immediate8(imm);
         } else {
-            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp(OP_ADD_EAXIv);
+            else
+                m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
             m_formatter.immediate32(imm);
         }
     }
@@ -429,6 +504,53 @@ public:
         }
     }
 
+    void addl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void addb_im(int imm, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp8(OP_GROUP1_EbIb, GROUP1_OP_ADD, base, offset);
+        m_formatter.immediate8(imm);
+    }
+
+    void addb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp8(OP_GROUP1_EbIb, GROUP1_OP_ADD, base, index, scale, offset);
+        m_formatter.immediate8(imm);
+    }
+
+    void addw_im(int imm, int offset, RegisterID base)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp8(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp8(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+            m_formatter.immediate16(imm);
+        }
+    }
+
+    void addw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp8(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp8(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset);
+            m_formatter.immediate16(imm);
+        }
+    }
+
 #if CPU(X86_64)
     void addq_rr(RegisterID src, RegisterID dst)
     {
@@ -440,13 +562,21 @@ public:
         m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset);
     }
 
+    void addq_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_ADD_EvGv, src, base, offset);
+    }
+
     void addq_ir(int imm, RegisterID dst)
     {
         if (CAN_SIGN_EXTEND_8_32(imm)) {
             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
             m_formatter.immediate8(imm);
         } else {
-            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp64(OP_ADD_EAXIv);
+            else
+                m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
             m_formatter.immediate32(imm);
         }
     }
@@ -552,6 +682,12 @@ public:
     }
 #endif // CPU(X86_64)
 
+    // Only used for testing purposes.
+    void illegalInstruction()
+    {
+        m_formatter.twoByteOp(OP2_UD2);
+    }
+
     void inc_r(RegisterID dst)
     {
         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
@@ -562,6 +698,11 @@ public:
     {
         m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
     }
+
+    void incq_m(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, base, offset);
+    }
 #endif // CPU(X86_64)
 
     void negl_r(RegisterID dst)
@@ -591,6 +732,18 @@ public:
         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
     }
 
+#if CPU(X86_64)
+    void notq_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
+    }
+
+    void notq_m(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
+    }
+#endif
+
     void orl_rr(RegisterID src, RegisterID dst)
     {
         m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
@@ -612,7 +765,10 @@ public:
             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
             m_formatter.immediate8(imm);
         } else {
-            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp(OP_OR_EAXIv);
+            else
+                m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
             m_formatter.immediate32(imm);
         }
     }
@@ -640,7 +796,10 @@ public:
             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
             m_formatter.immediate8(imm);
         } else {
-            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp64(OP_OR_EAXIv);
+            else
+                m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
             m_formatter.immediate32(imm);
         }
     }
@@ -683,7 +842,10 @@ public:
             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
             m_formatter.immediate8(imm);
         } else {
-            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp(OP_SUB_EAXIv);
+            else
+                m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
             m_formatter.immediate32(imm);
         }
     }
@@ -705,13 +867,37 @@ public:
         m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
     }
 
+    void subq_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_SUB_GvEv, dst, base, offset);
+    }
+
+    void subq_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_SUB_EvGv, src, base, offset);
+    }
+
     void subq_ir(int imm, RegisterID dst)
     {
         if (CAN_SIGN_EXTEND_8_32(imm)) {
             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
             m_formatter.immediate8(imm);
         } else {
-            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp64(OP_SUB_EAXIv);
+            else
+                m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void subq_im(int imm, int offset, RegisterID base)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
             m_formatter.immediate32(imm);
         }
     }
@@ -760,7 +946,10 @@ public:
             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
             m_formatter.immediate8(imm);
         } else {
-            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp(OP_XOR_EAXIv);
+            else
+                m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
             m_formatter.immediate32(imm);
         }
     }
@@ -777,7 +966,10 @@ public:
             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
             m_formatter.immediate8(imm);
         } else {
-            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp64(OP_XOR_EAXIv);
+            else
+                m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
             m_formatter.immediate32(imm);
         }
     }
@@ -786,28 +978,100 @@ public:
     {
         m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
     }
-    
-    void rorq_i8r(int imm, RegisterID dst)
+
+#endif
+
+    void lzcnt_rr(RegisterID src, RegisterID dst)
     {
-        if (imm == 1)
-            m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_ROR, dst);
-        else {
-            m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_ROR, dst);
-            m_formatter.immediate8(imm);
-        }
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_LZCNT, dst, src);
+    }
+
+    void lzcnt_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_LZCNT, dst, base, offset);
+    }
+
+#if CPU(X86_64)
+    void lzcntq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_LZCNT, dst, src);
     }
 
+    void lzcntq_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_LZCNT, dst, base, offset);
+    }
 #endif
 
-    void sarl_i8r(int imm, RegisterID dst)
+    void bsr_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_BSR, dst, src);
+    }
+
+    void bsr_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_BSR, dst, base, offset);
+    }
+
+#if CPU(X86_64)
+    void bsrq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(OP2_BSR, dst, src);
+    }
+
+    void bsrq_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(OP2_BSR, dst, base, offset);
+    }
+#endif
+
+    void tzcnt_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_TZCNT, dst, src);
+    }
+
+#if CPU(X86_64)
+    void tzcntq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_TZCNT, dst, src);
+    }
+#endif
+
+    void bsf_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_BSF, dst, src);
+    }
+
+#if CPU(X86_64)
+    void bsfq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(OP2_BSF, dst, src);
+    }
+#endif
+
+private:
+    template
+    void shiftInstruction32(int imm, RegisterID dst)
     {
         if (imm == 1)
-            m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+            m_formatter.oneByteOp(OP_GROUP2_Ev1, op, dst);
         else {
-            m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+            m_formatter.oneByteOp(OP_GROUP2_EvIb, op, dst);
             m_formatter.immediate8(imm);
         }
     }
+public:
+
+    void sarl_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction32(imm, dst);
+    }
 
     void sarl_CLr(RegisterID dst)
     {
@@ -816,12 +1080,7 @@ public:
     
     void shrl_i8r(int imm, RegisterID dst)
     {
-        if (imm == 1)
-            m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
-        else {
-            m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
-            m_formatter.immediate8(imm);
-        }
+        shiftInstruction32(imm, dst);
     }
     
     void shrl_CLr(RegisterID dst)
@@ -831,12 +1090,7 @@ public:
 
     void shll_i8r(int imm, RegisterID dst)
     {
-        if (imm == 1)
-            m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
-        else {
-            m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
-            m_formatter.immediate8(imm);
-        }
+        shiftInstruction32(imm, dst);
     }
 
     void shll_CLr(RegisterID dst)
@@ -844,30 +1098,87 @@ public:
         m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
     }
 
-#if CPU(X86_64)
-    void sarq_CLr(RegisterID dst)
+    void rorl_i8r(int imm, RegisterID dst)
     {
-        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+        shiftInstruction32(imm, dst);
     }
 
-    void sarq_i8r(int imm, RegisterID dst)
+    void rorl_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_ROR, dst);
+    }
+
+    void roll_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction32(imm, dst);
+    }
+
+    void roll_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_ROL, dst);
+    }
+
+#if CPU(X86_64)
+private:
+    template
+    void shiftInstruction64(int imm, RegisterID dst)
     {
         if (imm == 1)
-            m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+            m_formatter.oneByteOp64(OP_GROUP2_Ev1, op, dst);
         else {
-            m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+            m_formatter.oneByteOp64(OP_GROUP2_EvIb, op, dst);
             m_formatter.immediate8(imm);
         }
     }
+public:
+    void sarq_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+    }
+
+    void sarq_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction64(imm, dst);
+    }
+
+    void shrq_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction64(imm, dst);
+    }
+
+    void shrq_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
+    }
 
     void shlq_i8r(int imm, RegisterID dst)
     {
-        if (imm == 1)
-            m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
-        else {
-            m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
-            m_formatter.immediate8(imm);
-        }
+        shiftInstruction64(imm, dst);
+    }
+
+    void shlq_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
+    }
+
+    void rorq_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction64(imm, dst);
+    }
+
+    void rorq_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_ROR, dst);
+    }
+
+    void rolq_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction64(imm, dst);
+    }
+
+    void rolq_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_ROL, dst);
     }
 #endif // CPU(X86_64)
 
@@ -894,11 +1205,28 @@ public:
         m_formatter.immediate32(value);
     }
 
+    void divl_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_DIV, dst);
+    }
+
     void idivl_r(RegisterID dst)
     {
         m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
     }
 
+#if CPU(X86_64)
+    void divq_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_DIV, dst);
+    }
+
+    void idivq_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
+    }
+#endif // CPU(X86_64)
+
     // Comparisons:
 
     void cmpl_rr(RegisterID src, RegisterID dst)
@@ -922,7 +1250,10 @@ public:
             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
             m_formatter.immediate8(imm);
         } else {
-            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp(OP_CMP_EAXIv);
+            else
+                m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
             m_formatter.immediate32(imm);
         }
     }
@@ -1008,7 +1339,10 @@ public:
             m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
             m_formatter.immediate8(imm);
         } else {
-            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp64(OP_CMP_EAXIv);
+            else
+                m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
             m_formatter.immediate32(imm);
         }
     }
@@ -1091,7 +1425,10 @@ public:
     
     void testl_i32r(int imm, RegisterID dst)
     {
-        m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+        if (dst == X86Registers::eax)
+            m_formatter.oneByteOp(OP_TEST_EAXIv);
+        else
+            m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
         m_formatter.immediate32(imm);
     }
 
@@ -1145,7 +1482,10 @@ public:
 
     void testq_i32r(int imm, RegisterID dst)
     {
-        m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+        if (dst == X86Registers::eax)
+            m_formatter.oneByteOp64(OP_TEST_EAXIv);
+        else
+            m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
         m_formatter.immediate32(imm);
     }
 
@@ -1170,7 +1510,10 @@ public:
     
     void testb_i8r(int imm, RegisterID dst)
     {
-        m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
+        if (dst == X86Registers::eax)
+            m_formatter.oneByteOp(OP_TEST_ALIb);
+        else
+            m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
         m_formatter.immediate8(imm);
     }
 
@@ -1199,35 +1542,77 @@ public:
         setne_r(dst);
     }
 
-    // Various move ops:
-
-    void cdq()
+    void setnp_r(RegisterID dst)
     {
-        m_formatter.oneByteOp(OP_CDQ);
+        m_formatter.twoByteOp8(setccOpcode(ConditionNP), (GroupOpcodeID)0, dst);
     }
 
-    void fstpl(int offset, RegisterID base)
+    void setp_r(RegisterID dst)
     {
-        m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset);
+        m_formatter.twoByteOp8(setccOpcode(ConditionP), (GroupOpcodeID)0, dst);
     }
 
-    void xchgl_rr(RegisterID src, RegisterID dst)
+    // Various move ops:
+
+    void cdq()
     {
-        m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
+        m_formatter.oneByteOp(OP_CDQ);
     }
 
 #if CPU(X86_64)
-    void xchgq_rr(RegisterID src, RegisterID dst)
+    void cqo()
     {
-        m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
+        m_formatter.oneByteOp64(OP_CDQ);
     }
 #endif
 
-    void movl_rr(RegisterID src, RegisterID dst)
+    void fstps(int offset, RegisterID base)
     {
-        m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
+        m_formatter.oneByteOp(OP_ESCAPE_D9, ESCAPE_D9_FSTP_singleReal, base, offset);
     }
-    
+
+    void fstpl(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset);
+    }
+
+    void xchgl_rr(RegisterID src, RegisterID dst)
+    {
+        if (src == X86Registers::eax)
+            m_formatter.oneByteOp(OP_XCHG_EAX, dst);
+        else if (dst == X86Registers::eax)
+            m_formatter.oneByteOp(OP_XCHG_EAX, src);
+        else
+            m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
+    }
+
+    void xchgl_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_XCHG_EvGv, src, base, offset);
+    }
+
+#if CPU(X86_64)
+    void xchgq_rr(RegisterID src, RegisterID dst)
+    {
+        if (src == X86Registers::eax)
+            m_formatter.oneByteOp64(OP_XCHG_EAX, dst);
+        else if (dst == X86Registers::eax)
+            m_formatter.oneByteOp64(OP_XCHG_EAX, src);
+        else
+            m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
+    }
+
+    void xchgq_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_XCHG_EvGv, src, base, offset);
+    }
+#endif
+
+    void movl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
+    }
+    
     void movl_rm(RegisterID src, int offset, RegisterID base)
     {
         m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
@@ -1330,7 +1715,16 @@ public:
     {
         m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
     }
-    
+
+    void movw_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+
+        // FIXME: We often use oneByteOp8 for 16-bit operations. It's not clear that this is
+        // necessary. https://bugs.webkit.org/show_bug.cgi?id=153433
+        m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, offset);
+    }
+
     void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
     {
         m_formatter.prefix(PRE_OPERAND_SIZE);
@@ -1411,6 +1805,12 @@ public:
         m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
         m_formatter.immediate64(imm);
     }
+
+    void mov_i32r(int32_t imm, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, dst);
+        m_formatter.immediate32(imm);
+    }
     
     void movsxd_rr(RegisterID src, RegisterID dst)
     {
@@ -1497,15 +1897,118 @@ public:
         m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
     }
 
+    void movsbl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp8(OP2_MOVSX_GvEb, dst, src);
+    }
+
+    void movzwl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp8(OP2_MOVZX_GvEw, dst, src);
+    }
+
+    void movswl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp8(OP2_MOVSX_GvEw, dst, src);
+    }
+
+    void cmovl_rr(Condition cond, RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, src);
+    }
+
+    void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, base, offset);
+    }
+
+    void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, base, index, scale, offset);
+    }
+
+    void cmovel_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionE), dst, src);
+    }
+    
+    void cmovnel_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionNE), dst, src);
+    }
+    
+    void cmovpl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionP), dst, src);
+    }
+    
+    void cmovnpl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionNP), dst, src);
+    }
+
+#if CPU(X86_64)
+    void cmovq_rr(Condition cond, RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(cond), dst, src);
+    }
+
+    void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(cond), dst, base, offset);
+    }
+
+    void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(cond), dst, base, index, scale, offset);
+    }
+
+    void cmoveq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionE), dst, src);
+    }
+
+    void cmovneq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionNE), dst, src);
+    }
+
+    void cmovpq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionP), dst, src);
+    }
+
+    void cmovnpq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionNP), dst, src);
+    }
+#else
+    void cmovl_mr(Condition cond, const void* addr, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, addr);
+    }
+#endif
+
     void leal_mr(int offset, RegisterID base, RegisterID dst)
     {
         m_formatter.oneByteOp(OP_LEA, dst, base, offset);
     }
+
+    void leal_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_LEA, dst, base, index, scale, offset);
+    }
+
 #if CPU(X86_64)
     void leaq_mr(int offset, RegisterID base, RegisterID dst)
     {
         m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
     }
+
+    void leaq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_LEA, dst, base, index, scale, offset);
+    }
 #endif
 
     // Flow control:
@@ -1547,6 +2050,11 @@ public:
         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
     }
     
+    void jmp_m(int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, index, scale, offset);
+    }
+    
 #if !CPU(X86_64)
     void jmp_m(const void* address)
     {
@@ -1662,12 +2170,66 @@ public:
         m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
     }
 
+    void vaddsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
     void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
     {
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
     }
 
+    void addsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vaddsd_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vaddsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
+    void addss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void vaddss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
+    void addss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void addss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vaddss_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vaddss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
 #if !CPU(X86_64)
     void addsd_mr(const void* address, XMMRegisterID dst)
     {
@@ -1682,12 +2244,36 @@ public:
         m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
     }
 
+    void cvtsi2ss_rr(RegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+    }
+
 #if CPU(X86_64)
     void cvtsi2sdq_rr(RegisterID src, XMMRegisterID dst)
     {
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
     }
+
+    void cvtsi2ssq_rr(RegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+    }
+
+    void cvtsi2sdq_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+    }
+
+    void cvtsi2ssq_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+    }
 #endif
 
     void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
@@ -1696,6 +2282,12 @@ public:
         m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
     }
 
+    void cvtsi2ss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+    }
+
 #if !CPU(X86_64)
     void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
     {
@@ -1710,18 +2302,44 @@ public:
         m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
     }
 
+    void cvttss2si_rr(XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_CVTTSS2SI_GdWsd, dst, (RegisterID)src);
+    }
+
+#if CPU(X86_64)
+    void cvttss2siq_rr(XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_CVTTSS2SI_GdWsd, dst, (RegisterID)src);
+    }
+#endif
+
     void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst)
     {
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src);
     }
 
+    void cvtsd2ss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, base, offset);
+    }
+
     void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst)
     {
         m_formatter.prefix(PRE_SSE_F3);
         m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src);
     }
-    
+
+    void cvtss2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, base, offset);
+    }
+
 #if CPU(X86_64)
     void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
     {
@@ -1743,6 +2361,12 @@ public:
     }
 
 #if CPU(X86_64)
+    void movmskpd_rr(XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp64(OP2_MOVMSKPD_VdEd, dst, (RegisterID)src);
+    }
+
     void movq_rr(XMMRegisterID src, RegisterID dst)
     {
         m_formatter.prefix(PRE_SSE_66);
@@ -1756,6 +2380,17 @@ public:
     }
 #endif
 
+    void movapd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_MOVAPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void movaps_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVAPS_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
     void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
     {
         m_formatter.prefix(PRE_SSE_F2);
@@ -1773,6 +2408,12 @@ public:
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
     }
+
+    void movss_rm(XMMRegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
+    }
     
     void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
     {
@@ -1791,7 +2432,13 @@ public:
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
     }
-    
+
+    void movss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
     void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
     {
         m_formatter.prefix(PRE_SSE_F3);
@@ -1817,12 +2464,66 @@ public:
         m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
     }
 
+    void vmulsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
     void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
     {
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
     }
 
+    void mulsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MULSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vmulsd_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vmulsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
+    void mulss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void vmulss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
+    void mulss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void mulss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MULSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vmulss_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vmulss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
     void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
     {
         m_formatter.prefix(PRE_SSE_66);
@@ -1856,22 +2557,86 @@ public:
         m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
     }
 
+    void vsubsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
     void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
     {
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
     }
 
-    void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
+    void subsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
     {
-        m_formatter.prefix(PRE_SSE_66);
-        m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, dst, base, index, scale, offset);
     }
 
-    void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    void vsubsd_mr(XMMRegisterID b, int offset, RegisterID base, XMMRegisterID dst)
     {
-        m_formatter.prefix(PRE_SSE_66);
-        m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vsubsd_mr(XMMRegisterID b, int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
+    void subss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void vsubss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
+    void subss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void subss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vsubss_mr(XMMRegisterID b, int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vsubss_mr(XMMRegisterID b, int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
+    void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void ucomiss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void ucomiss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
     }
 
     void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
@@ -1886,8 +2651,39 @@ public:
         m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
     }
 
+    void divss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void divss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void andps_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_ANDPS_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void orps_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_ORPS_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void xorps_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
     void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
     {
+        if (src == dst) {
+            xorps_rr(src, dst);
+            return;
+        }
         m_formatter.prefix(PRE_SSE_66);
         m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
     }
@@ -1903,7 +2699,60 @@ public:
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
     }
-    
+
+    void sqrtsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void sqrtss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void sqrtss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    enum class RoundingType : uint8_t {
+        ToNearestWithTiesToEven = 0,
+        TowardNegativeInfiniti = 1,
+        TowardInfiniti = 2,
+        TowardZero = 3
+    };
+
+    void roundss_rr(XMMRegisterID src, XMMRegisterID dst, RoundingType rounding)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSS_VssWssIb, (RegisterID)dst, (RegisterID)src);
+        m_formatter.immediate8(static_cast(rounding));
+    }
+
+    void roundss_mr(int offset, RegisterID base, XMMRegisterID dst, RoundingType rounding)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSS_VssWssIb, (RegisterID)dst, base, offset);
+        m_formatter.immediate8(static_cast(rounding));
+    }
+
+    void roundsd_rr(XMMRegisterID src, XMMRegisterID dst, RoundingType rounding)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSD_VsdWsdIb, (RegisterID)dst, (RegisterID)src);
+        m_formatter.immediate8(static_cast(rounding));
+    }
+
+    void roundsd_mr(int offset, RegisterID base, XMMRegisterID dst, RoundingType rounding)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSD_VsdWsdIb, (RegisterID)dst, base, offset);
+        m_formatter.immediate8(static_cast(rounding));
+    }
+
     // Misc instructions:
 
     void int3()
@@ -1921,9 +2770,14 @@ public:
         m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
     }
     
+    void lock()
+    {
+        m_formatter.prefix(PRE_LOCK);
+    }
+    
     void mfence()
     {
-        m_formatter.threeByteOp(OP3_MFENCE);
+        m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_AE, OP3_MFENCE);
     }
 
     // Assembler admin methods:
@@ -2010,6 +2864,11 @@ public:
         setRel32(from, to);
     }
     
+    static void relinkJumpToNop(void* from)
+    {
+        setInt32(from, 0);
+    }
+    
     static void relinkCall(void* from, void* to)
     {
         setRel32(from, to);
@@ -2050,13 +2909,18 @@ public:
     {
         return 5;
     }
+
+    static constexpr ptrdiff_t patchableJumpSize()
+    {
+        return 5;
+    }
     
 #if CPU(X86_64)
     static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
     {
+        const unsigned instructionSize = 10; // REX.W MOV IMM64
         const int rexBytes = 1;
         const int opcodeBytes = 1;
-        ASSERT(rexBytes + opcodeBytes <= maxJumpReplacementSize());
         uint8_t* ptr = reinterpret_cast(instructionStart);
         ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
         ptr[1] = OP_MOV_EAXIv | (dst & 7);
@@ -2066,11 +2930,33 @@ public:
             uint8_t asBytes[8];
         } u;
         u.asWord = imm;
-        for (unsigned i = rexBytes + opcodeBytes; i < static_cast(maxJumpReplacementSize()); ++i)
+        for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
+            ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
+    }
+
+    static void revertJumpTo_movl_i32r(void* instructionStart, int32_t imm, RegisterID dst)
+    {
+        // We only revert jumps on inline caches, and inline caches always use the scratch register (r11).
+        // FIXME: If the above is ever false then we need to make this smarter with respect to emitting 
+        // the REX byte.
+        ASSERT(dst == X86Registers::r11);
+        const unsigned instructionSize = 6; // REX MOV IMM32
+        const int rexBytes = 1;
+        const int opcodeBytes = 1;
+        uint8_t* ptr = reinterpret_cast(instructionStart);
+        ptr[0] = PRE_REX | (dst >> 3);
+        ptr[1] = OP_MOV_EAXIv | (dst & 7);
+        
+        union {
+            uint32_t asWord;
+            uint8_t asBytes[4];
+        } u;
+        u.asWord = imm;
+        for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
             ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
     }
 #endif
-    
+
     static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
     {
         const int opcodeBytes = 1;
@@ -2165,10 +3051,50 @@ public:
     {
         m_formatter.oneByteOp(OP_NOP);
     }
-    
-    static void fillNops(void* base, size_t size)
+
+    static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
     {
+        UNUSED_PARAM(isCopyingToExecutableMemory);
+#if CPU(X86_64)
+        static const uint8_t nops[10][10] = {
+            // nop
+            {0x90},
+            // xchg %ax,%ax
+            {0x66, 0x90},
+            // nopl (%[re]ax)
+            {0x0f, 0x1f, 0x00},
+            // nopl 8(%[re]ax)
+            {0x0f, 0x1f, 0x40, 0x08},
+            // nopl 8(%[re]ax,%[re]ax,1)
+            {0x0f, 0x1f, 0x44, 0x00, 0x08},
+            // nopw 8(%[re]ax,%[re]ax,1)
+            {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08},
+            // nopl 512(%[re]ax)
+            {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00},
+            // nopl 512(%[re]ax,%[re]ax,1)
+            {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
+            // nopw 512(%[re]ax,%[re]ax,1)
+            {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
+            // nopw %cs:512(%[re]ax,%[re]ax,1)
+            {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}
+        };
+
+        uint8_t* where = reinterpret_cast(base);
+        while (size) {
+            unsigned nopSize = static_cast(std::min(size, 15));
+            unsigned numPrefixes = nopSize <= 10 ? 0 : nopSize - 10;
+            for (unsigned i = 0; i != numPrefixes; ++i)
+                *where++ = 0x66;
+
+            unsigned nopRest = nopSize - numPrefixes;
+            for (unsigned i = 0; i != nopRest; ++i)
+                *where++ = nops[nopRest-1][i];
+
+            size -= nopSize;
+        }
+#else
         memset(base, OP_NOP, size);
+#endif
     }
 
     // This is a no-op on x86
@@ -2200,16 +3126,14 @@ private:
     }
 
     class X86InstructionFormatter {
-
         static const int maxInstructionSize = 16;
 
     public:
-
         enum ModRmMode {
-            ModRmMemoryNoDisp,
-            ModRmMemoryDisp8,
-            ModRmMemoryDisp32,
-            ModRmRegister,
+            ModRmMemoryNoDisp = 0,
+            ModRmMemoryDisp8 = 1 << 6,
+            ModRmMemoryDisp32 = 2 << 6,
+            ModRmRegister = 3 << 6,
         };
 
         // Legacy prefix bytes:
@@ -2221,6 +3145,260 @@ private:
             m_buffer.putByte(pre);
         }
 
+#if CPU(X86_64)
+        // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
+        static bool byteRegRequiresRex(int reg)
+        {
+            static_assert(X86Registers::esp == 4, "Necessary condition for OR-masking");
+            return (reg >= X86Registers::esp);
+        }
+        static bool byteRegRequiresRex(int a, int b)
+        {
+            return byteRegRequiresRex(a | b);
+        }
+
+        // Registers r8 & above require a REX prefixe.
+        static bool regRequiresRex(int reg)
+        {
+            static_assert(X86Registers::r8 == 8, "Necessary condition for OR-masking");
+            return (reg >= X86Registers::r8);
+        }
+        static bool regRequiresRex(int a, int b)
+        {
+            return regRequiresRex(a | b);
+        }
+        static bool regRequiresRex(int a, int b, int c)
+        {
+            return regRequiresRex(a | b | c);
+        }
+#else
+        static bool byteRegRequiresRex(int) { return false; }
+        static bool byteRegRequiresRex(int, int) { return false; }
+        static bool regRequiresRex(int) { return false; }
+        static bool regRequiresRex(int, int) { return false; }
+        static bool regRequiresRex(int, int, int) { return false; }
+#endif
+
+        class SingleInstructionBufferWriter : public AssemblerBuffer::LocalWriter {
+        public:
+            SingleInstructionBufferWriter(AssemblerBuffer& buffer)
+                : AssemblerBuffer::LocalWriter(buffer, maxInstructionSize)
+            {
+            }
+
+            // Internals; ModRm and REX formatters.
+
+            static constexpr RegisterID noBase = X86Registers::ebp;
+            static constexpr RegisterID hasSib = X86Registers::esp;
+            static constexpr RegisterID noIndex = X86Registers::esp;
+
+#if CPU(X86_64)
+            static constexpr RegisterID noBase2 = X86Registers::r13;
+            static constexpr RegisterID hasSib2 = X86Registers::r12;
+
+            // Format a REX prefix byte.
+            ALWAYS_INLINE void emitRex(bool w, int r, int x, int b)
+            {
+                ASSERT(r >= 0);
+                ASSERT(x >= 0);
+                ASSERT(b >= 0);
+                putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
+            }
+
+            // Used to plant a REX byte with REX.w set (for 64-bit operations).
+            ALWAYS_INLINE void emitRexW(int r, int x, int b)
+            {
+                emitRex(true, r, x, b);
+            }
+
+            // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
+            // regRequiresRex() to check other registers (i.e. address base & index).
+            ALWAYS_INLINE void emitRexIf(bool condition, int r, int x, int b)
+            {
+                if (condition)
+                    emitRex(false, r, x, b);
+            }
+
+            // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
+            ALWAYS_INLINE void emitRexIfNeeded(int r, int x, int b)
+            {
+                emitRexIf(regRequiresRex(r, x, b), r, x, b);
+            }
+#else
+            // No REX prefix bytes on 32-bit x86.
+            ALWAYS_INLINE void emitRexIf(bool, int, int, int) { }
+            ALWAYS_INLINE void emitRexIfNeeded(int, int, int) { }
+#endif
+
+            ALWAYS_INLINE void putModRm(ModRmMode mode, int reg, RegisterID rm)
+            {
+                putByteUnchecked(mode | ((reg & 7) << 3) | (rm & 7));
+            }
+
+            ALWAYS_INLINE void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
+            {
+                ASSERT(mode != ModRmRegister);
+
+                putModRm(mode, reg, hasSib);
+                putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
+            }
+
+            ALWAYS_INLINE void registerModRM(int reg, RegisterID rm)
+            {
+                putModRm(ModRmRegister, reg, rm);
+            }
+
+            ALWAYS_INLINE void memoryModRM(int reg, RegisterID base, int offset)
+            {
+                // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if CPU(X86_64)
+                if ((base == hasSib) || (base == hasSib2)) {
+#else
+                if (base == hasSib) {
+#endif
+                    if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
+                        putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
+                    else if (CAN_SIGN_EXTEND_8_32(offset)) {
+                        putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+                        putByteUnchecked(offset);
+                    } else {
+                        putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+                        putIntUnchecked(offset);
+                    }
+                } else {
+#if CPU(X86_64)
+                    if (!offset && (base != noBase) && (base != noBase2))
+#else
+                    if (!offset && (base != noBase))
+#endif
+                        putModRm(ModRmMemoryNoDisp, reg, base);
+                    else if (CAN_SIGN_EXTEND_8_32(offset)) {
+                        putModRm(ModRmMemoryDisp8, reg, base);
+                        putByteUnchecked(offset);
+                    } else {
+                        putModRm(ModRmMemoryDisp32, reg, base);
+                        putIntUnchecked(offset);
+                    }
+                }
+            }
+
+            ALWAYS_INLINE void memoryModRM_disp8(int reg, RegisterID base, int offset)
+            {
+                // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+                ASSERT(CAN_SIGN_EXTEND_8_32(offset));
+#if CPU(X86_64)
+                if ((base == hasSib) || (base == hasSib2)) {
+#else
+                if (base == hasSib) {
+#endif
+                    putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+                    putByteUnchecked(offset);
+                } else {
+                    putModRm(ModRmMemoryDisp8, reg, base);
+                    putByteUnchecked(offset);
+                }
+            }
+
+            ALWAYS_INLINE void memoryModRM_disp32(int reg, RegisterID base, int offset)
+            {
+                // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if CPU(X86_64)
+                if ((base == hasSib) || (base == hasSib2)) {
+#else
+                if (base == hasSib) {
+#endif
+                    putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+                    putIntUnchecked(offset);
+                } else {
+                    putModRm(ModRmMemoryDisp32, reg, base);
+                    putIntUnchecked(offset);
+                }
+            }
+        
+            ALWAYS_INLINE void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
+            {
+                ASSERT(index != noIndex);
+
+#if CPU(X86_64)
+                if (!offset && (base != noBase) && (base != noBase2))
+#else
+                if (!offset && (base != noBase))
+#endif
+                    putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
+                else if (CAN_SIGN_EXTEND_8_32(offset)) {
+                    putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
+                    putByteUnchecked(offset);
+                } else {
+                    putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
+                    putIntUnchecked(offset);
+                }
+            }
+
+#if !CPU(X86_64)
+            ALWAYS_INLINE void memoryModRM(int reg, const void* address)
+            {
+                // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
+                putModRm(ModRmMemoryNoDisp, reg, noBase);
+                putIntUnchecked(reinterpret_cast(address));
+            }
+#endif
+            ALWAYS_INLINE void twoBytesVex(OneByteOpcodeID simdPrefix, RegisterID inOpReg, RegisterID r)
+            {
+                putByteUnchecked(VexPrefix::TwoBytes);
+
+                uint8_t secondByte = vexEncodeSimdPrefix(simdPrefix);
+                secondByte |= (~inOpReg & 0xf) << 3;
+                secondByte |= !regRequiresRex(r) << 7;
+                putByteUnchecked(secondByte);
+            }
+
+            ALWAYS_INLINE void threeBytesVexNds(OneByteOpcodeID simdPrefix, VexImpliedBytes impliedBytes, RegisterID r, RegisterID inOpReg, RegisterID x, RegisterID b)
+            {
+                putByteUnchecked(VexPrefix::ThreeBytes);
+
+                uint8_t secondByte = static_cast(impliedBytes);
+                secondByte |= !regRequiresRex(r) << 7;
+                secondByte |= !regRequiresRex(x) << 6;
+                secondByte |= !regRequiresRex(b) << 5;
+                putByteUnchecked(secondByte);
+
+                uint8_t thirdByte = vexEncodeSimdPrefix(simdPrefix);
+                thirdByte |= (~inOpReg & 0xf) << 3;
+                putByteUnchecked(thirdByte);
+            }
+
+            ALWAYS_INLINE void threeBytesVexNds(OneByteOpcodeID simdPrefix, VexImpliedBytes impliedBytes, RegisterID r, RegisterID inOpReg, RegisterID b)
+            {
+                putByteUnchecked(VexPrefix::ThreeBytes);
+
+                uint8_t secondByte = static_cast(impliedBytes);
+                secondByte |= !regRequiresRex(r) << 7;
+                secondByte |= 1 << 6; // REX.X
+                secondByte |= !regRequiresRex(b) << 5;
+                putByteUnchecked(secondByte);
+
+                uint8_t thirdByte = vexEncodeSimdPrefix(simdPrefix);
+                thirdByte |= (~inOpReg & 0xf) << 3;
+                putByteUnchecked(thirdByte);
+            }
+        private:
+            uint8_t vexEncodeSimdPrefix(OneByteOpcodeID simdPrefix)
+            {
+                switch (simdPrefix) {
+                case 0x66:
+                    return 1;
+                case 0xF3:
+                    return 2;
+                case 0xF2:
+                    return 3;
+                default:
+                    RELEASE_ASSERT_NOT_REACHED();
+                }
+                return 0;
+            }
+
+        };
+
         // Word-sized operands / no operand instruction formatters.
         //
         // In addition to the opcode, the following operand permutations are supported:
@@ -2237,116 +3415,176 @@ private:
 
         void oneByteOp(OneByteOpcodeID opcode)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            m_buffer.putByteUnchecked(opcode);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.putByteUnchecked(opcode);
         }
 
         void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIfNeeded(0, 0, reg);
-            m_buffer.putByteUnchecked(opcode + (reg & 7));
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(0, 0, reg);
+            writer.putByteUnchecked(opcode + (reg & 7));
         }
 
         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIfNeeded(reg, 0, rm);
-            m_buffer.putByteUnchecked(opcode);
-            registerModRM(reg, rm);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, rm);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
         }
 
         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIfNeeded(reg, 0, base);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM(reg, base, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, offset);
         }
 
         void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIfNeeded(reg, 0, base);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM_disp32(reg, base, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM_disp32(reg, base, offset);
         }
         
         void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIfNeeded(reg, 0, base);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM_disp8(reg, base, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM_disp8(reg, base, offset);
         }
 
         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIfNeeded(reg, index, base);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM(reg, base, index, scale, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, index, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, index, scale, offset);
         }
 
 #if !CPU(X86_64)
         void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM(reg, address);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, address);
         }
 #endif
 
         void twoByteOp(TwoByteOpcodeID opcode)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(opcode);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
         }
 
         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIfNeeded(reg, 0, rm);
-            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(opcode);
-            registerModRM(reg, rm);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, rm);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
         }
 
         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIfNeeded(reg, 0, base);
-            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM(reg, base, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, base);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, offset);
         }
 
         void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIfNeeded(reg, index, base);
-            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM(reg, base, index, scale, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, index, base);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, index, scale, offset);
         }
 
 #if !CPU(X86_64)
         void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM(reg, address);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, address);
         }
 #endif
+        void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID b)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            if (regRequiresRex(b))
+                writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, b);
+            else
+                writer.twoBytesVex(simdPrefix, a, dest);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(dest, b);
+        }
 
-        void threeByteOp(ThreeByteOpcodeID opcode)
+        void vexNdsLigWigCommutativeTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID b)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(OP2_3BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(opcode);
+            // Since this is a commutative operation, we can try switching the arguments.
+            if (regRequiresRex(b))
+                std::swap(a, b);
+            vexNdsLigWigTwoByteOp(simdPrefix, opcode, dest, a, b);
+        }
+
+        void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            if (regRequiresRex(base))
+                writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, base);
+            else
+                writer.twoBytesVex(simdPrefix, a, dest);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(dest, base, offset);
+        }
+
+        void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, int offset, RegisterID base, RegisterID index, int scale)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            if (regRequiresRex(base, index))
+                writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, index, base);
+            else
+                writer.twoBytesVex(simdPrefix, a, dest);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(dest, base, index, scale, offset);
+        }
+
+        void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(twoBytePrefix);
+            writer.putByteUnchecked(opcode);
+        }
+
+        void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode, int reg, RegisterID rm)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, rm);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(twoBytePrefix);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
+        }
+
+        void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode, int reg, RegisterID base, int displacement)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, base);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(twoBytePrefix);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, displacement);
         }
 
 #if CPU(X86_64)
@@ -2358,65 +3596,83 @@ private:
 
         void oneByteOp64(OneByteOpcodeID opcode)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexW(0, 0, 0);
-            m_buffer.putByteUnchecked(opcode);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(0, 0, 0);
+            writer.putByteUnchecked(opcode);
         }
 
         void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexW(0, 0, reg);
-            m_buffer.putByteUnchecked(opcode + (reg & 7));
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(0, 0, reg);
+            writer.putByteUnchecked(opcode + (reg & 7));
         }
 
         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexW(reg, 0, rm);
-            m_buffer.putByteUnchecked(opcode);
-            registerModRM(reg, rm);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, rm);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
         }
 
         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexW(reg, 0, base);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM(reg, base, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, offset);
         }
 
         void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexW(reg, 0, base);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM_disp32(reg, base, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM_disp32(reg, base, offset);
         }
         
         void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexW(reg, 0, base);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM_disp8(reg, base, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM_disp8(reg, base, offset);
         }
 
         void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexW(reg, index, base);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM(reg, base, index, scale, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, index, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, index, scale, offset);
         }
 
         void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexW(reg, 0, rm);
-            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(opcode);
-            registerModRM(reg, rm);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, rm);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
+        }
+
+        void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, base);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, offset);
+        }
+
+        void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, index, base);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, index, scale, offset);
         }
 #endif
 
@@ -2447,52 +3703,52 @@ private:
 
         void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
-            m_buffer.putByteUnchecked(opcode);
-            registerModRM(groupOp, rm);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(groupOp, rm);
         }
 
         void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
-            m_buffer.putByteUnchecked(opcode);
-            registerModRM(reg, rm);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(reg, rm), reg, 0, rm);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
         }
 
         void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(base), reg, 0, base);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM(reg, base, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(reg, base), reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, offset);
         }
 
         void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index) || regRequiresRex(base), reg, index, base);
-            m_buffer.putByteUnchecked(opcode);
-            memoryModRM(reg, base, index, scale, offset);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index, base), reg, index, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, index, scale, offset);
         }
 
         void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
-            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(opcode);
-            registerModRM(reg, rm);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(reg, rm), reg, 0, rm);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
         }
 
         void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
         {
-            m_buffer.ensureSpace(maxInstructionSize);
-            emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
-            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
-            m_buffer.putByteUnchecked(opcode);
-            registerModRM(groupOp, rm);
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(groupOp, rm);
         }
 
         // Immediates:
@@ -2535,177 +3791,6 @@ private:
 
         unsigned debugOffset() { return m_buffer.debugOffset(); }
 
-    private:
-
-        // Internals; ModRm and REX formatters.
-
-        static const RegisterID noBase = X86Registers::ebp;
-        static const RegisterID hasSib = X86Registers::esp;
-        static const RegisterID noIndex = X86Registers::esp;
-#if CPU(X86_64)
-        static const RegisterID noBase2 = X86Registers::r13;
-        static const RegisterID hasSib2 = X86Registers::r12;
-
-        // Registers r8 & above require a REX prefixe.
-        inline bool regRequiresRex(int reg)
-        {
-            return (reg >= X86Registers::r8);
-        }
-
-        // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
-        inline bool byteRegRequiresRex(int reg)
-        {
-            return (reg >= X86Registers::esp);
-        }
-
-        // Format a REX prefix byte.
-        inline void emitRex(bool w, int r, int x, int b)
-        {
-            ASSERT(r >= 0);
-            ASSERT(x >= 0);
-            ASSERT(b >= 0);
-            m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
-        }
-
-        // Used to plant a REX byte with REX.w set (for 64-bit operations).
-        inline void emitRexW(int r, int x, int b)
-        {
-            emitRex(true, r, x, b);
-        }
-
-        // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
-        // regRequiresRex() to check other registers (i.e. address base & index).
-        inline void emitRexIf(bool condition, int r, int x, int b)
-        {
-            if (condition) emitRex(false, r, x, b);
-        }
-
-        // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
-        inline void emitRexIfNeeded(int r, int x, int b)
-        {
-            emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
-        }
-#else
-        // No REX prefix bytes on 32-bit x86.
-        inline bool regRequiresRex(int) { return false; }
-        inline bool byteRegRequiresRex(int) { return false; }
-        inline void emitRexIf(bool, int, int, int) {}
-        inline void emitRexIfNeeded(int, int, int) {}
-#endif
-
-        void putModRm(ModRmMode mode, int reg, RegisterID rm)
-        {
-            m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
-        }
-
-        void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
-        {
-            ASSERT(mode != ModRmRegister);
-
-            putModRm(mode, reg, hasSib);
-            m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
-        }
-
-        void registerModRM(int reg, RegisterID rm)
-        {
-            putModRm(ModRmRegister, reg, rm);
-        }
-
-        void memoryModRM(int reg, RegisterID base, int offset)
-        {
-            // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-#if CPU(X86_64)
-            if ((base == hasSib) || (base == hasSib2)) {
-#else
-            if (base == hasSib) {
-#endif
-                if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
-                    putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
-                else if (CAN_SIGN_EXTEND_8_32(offset)) {
-                    putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
-                    m_buffer.putByteUnchecked(offset);
-                } else {
-                    putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
-                    m_buffer.putIntUnchecked(offset);
-                }
-            } else {
-#if CPU(X86_64)
-                if (!offset && (base != noBase) && (base != noBase2))
-#else
-                if (!offset && (base != noBase))
-#endif
-                    putModRm(ModRmMemoryNoDisp, reg, base);
-                else if (CAN_SIGN_EXTEND_8_32(offset)) {
-                    putModRm(ModRmMemoryDisp8, reg, base);
-                    m_buffer.putByteUnchecked(offset);
-                } else {
-                    putModRm(ModRmMemoryDisp32, reg, base);
-                    m_buffer.putIntUnchecked(offset);
-                }
-            }
-        }
-
-        void memoryModRM_disp8(int reg, RegisterID base, int offset)
-        {
-            // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-            ASSERT(CAN_SIGN_EXTEND_8_32(offset));
-#if CPU(X86_64)
-            if ((base == hasSib) || (base == hasSib2)) {
-#else
-            if (base == hasSib) {
-#endif
-                putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
-                m_buffer.putByteUnchecked(offset);
-            } else {
-                putModRm(ModRmMemoryDisp8, reg, base);
-                m_buffer.putByteUnchecked(offset);
-            }
-        }
-
-        void memoryModRM_disp32(int reg, RegisterID base, int offset)
-        {
-            // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-#if CPU(X86_64)
-            if ((base == hasSib) || (base == hasSib2)) {
-#else
-            if (base == hasSib) {
-#endif
-                putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
-                m_buffer.putIntUnchecked(offset);
-            } else {
-                putModRm(ModRmMemoryDisp32, reg, base);
-                m_buffer.putIntUnchecked(offset);
-            }
-        }
-    
-        void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
-        {
-            ASSERT(index != noIndex);
-
-#if CPU(X86_64)
-            if (!offset && (base != noBase) && (base != noBase2))
-#else
-            if (!offset && (base != noBase))
-#endif
-                putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
-            else if (CAN_SIGN_EXTEND_8_32(offset)) {
-                putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
-                m_buffer.putByteUnchecked(offset);
-            } else {
-                putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
-                m_buffer.putIntUnchecked(offset);
-            }
-        }
-
-#if !CPU(X86_64)
-        void memoryModRM(int reg, const void* address)
-        {
-            // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
-            putModRm(ModRmMemoryNoDisp, reg, noBase);
-            m_buffer.putIntUnchecked(reinterpret_cast(address));
-        }
-#endif
-
     public:
         AssemblerBuffer m_buffer;
     } m_formatter;
@@ -2716,5 +3801,3 @@ private:
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER) && CPU(X86)
-
-#endif // X86Assembler_h
diff --git a/Source/JavaScriptCore/b3/B3ArgumentRegValue.cpp b/Source/JavaScriptCore/b3/B3ArgumentRegValue.cpp
new file mode 100644
index 000000000..594d0d69b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ArgumentRegValue.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ArgumentRegValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+ArgumentRegValue::~ArgumentRegValue()
+{
+}
+
+void ArgumentRegValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, m_reg);
+}
+
+Value* ArgumentRegValue::cloneImpl() const
+{
+    return new ArgumentRegValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ArgumentRegValue.h b/Source/JavaScriptCore/b3/B3ArgumentRegValue.h
new file mode 100644
index 000000000..55b365fc2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ArgumentRegValue.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include "Reg.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE ArgumentRegValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == ArgumentReg; }
+    
+    ~ArgumentRegValue();
+
+    Reg argumentReg() const { return m_reg; }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    ArgumentRegValue(Origin origin, Reg reg)
+        : Value(CheckedOpcode, ArgumentReg, reg.isGPR() ? pointerType() : Double, origin)
+        , m_reg(reg)
+    {
+        ASSERT(reg.isSet());
+    }
+
+    Reg m_reg;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BasicBlock.cpp b/Source/JavaScriptCore/b3/B3BasicBlock.cpp
new file mode 100644
index 000000000..63a4e58d1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BasicBlock.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3BasicBlock.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BasicBlockUtils.h"
+#include "B3Procedure.h"
+#include "B3ValueInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+const char* const BasicBlock::dumpPrefix = "#";
+
+BasicBlock::BasicBlock(unsigned index, double frequency)
+    : m_index(index)
+    , m_frequency(frequency)
+{
+}
+
+BasicBlock::~BasicBlock()
+{
+}
+
+void BasicBlock::append(Value* value)
+{
+    m_values.append(value);
+    value->owner = this;
+}
+
+void BasicBlock::appendNonTerminal(Value* value)
+{
+    m_values.append(m_values.last());
+    m_values[m_values.size() - 1] = value;
+    value->owner = this;
+}
+
+void BasicBlock::removeLast(Procedure& proc)
+{
+    ASSERT(!m_values.isEmpty());
+    proc.deleteValue(m_values.takeLast());
+}
+
+void BasicBlock::replaceLast(Procedure& proc, Value* value)
+{
+    removeLast(proc);
+    append(value);
+}
+
+Value* BasicBlock::appendIntConstant(Procedure& proc, Origin origin, Type type, int64_t value)
+{
+    Value* result = proc.addIntConstant(origin, type, value);
+    append(result);
+    return result;
+}
+
+Value* BasicBlock::appendIntConstant(Procedure& proc, Value* likeValue, int64_t value)
+{
+    return appendIntConstant(proc, likeValue->origin(), likeValue->type(), value);
+}
+
+Value* BasicBlock::appendBoolConstant(Procedure& proc, Origin origin, bool value)
+{
+    return appendIntConstant(proc, origin, Int32, value ? 1 : 0);
+}
+
+void BasicBlock::clearSuccessors()
+{
+    m_successors.clear();
+}
+
+void BasicBlock::appendSuccessor(FrequentedBlock target)
+{
+    m_successors.append(target);
+}
+
+void BasicBlock::setSuccessors(FrequentedBlock target)
+{
+    m_successors.resize(1);
+    m_successors[0] = target;
+}
+
+void BasicBlock::setSuccessors(FrequentedBlock taken, FrequentedBlock notTaken)
+{
+    m_successors.resize(2);
+    m_successors[0] = taken;
+    m_successors[1] = notTaken;
+}
+
+bool BasicBlock::replaceSuccessor(BasicBlock* from, BasicBlock* to)
+{
+    bool result = false;
+    for (BasicBlock*& successor : successorBlocks()) {
+        if (successor == from) {
+            successor = to;
+            result = true;
+            
+            // Keep looping because a successor may be mentioned multiple times, like in a Switch.
+        }
+    }
+    return result;
+}
+
+bool BasicBlock::addPredecessor(BasicBlock* block)
+{
+    return B3::addPredecessor(this, block);
+}
+
+bool BasicBlock::removePredecessor(BasicBlock* block)
+{
+    return B3::removePredecessor(this, block);
+}
+
+bool BasicBlock::replacePredecessor(BasicBlock* from, BasicBlock* to)
+{
+    return B3::replacePredecessor(this, from, to);
+}
+
+void BasicBlock::updatePredecessorsAfter()
+{
+    B3::updatePredecessorsAfter(this);
+}
+
+void BasicBlock::dump(PrintStream& out) const
+{
+    out.print(dumpPrefix, m_index);
+}
+
+void BasicBlock::deepDump(const Procedure& proc, PrintStream& out) const
+{
+    out.print("BB", *this, ": ; frequency = ", m_frequency, "\n");
+    if (predecessors().size())
+        out.print("  Predecessors: ", pointerListDump(predecessors()), "\n");
+    for (Value* value : *this)
+        out.print("    ", B3::deepDump(proc, value), "\n");
+    if (!successors().isEmpty()) {
+        out.print("  Successors: ");
+        if (size())
+            last()->dumpSuccessors(this, out);
+        else
+            out.print(listDump(successors()));
+        out.print("\n");
+    }
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin)
+{
+    RELEASE_ASSERT(opcode == Oops || opcode == Return);
+    clearSuccessors();
+    return appendNew(proc, opcode, origin);
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin, Value* value)
+{
+    RELEASE_ASSERT(opcode == Return);
+    clearSuccessors();
+    return appendNew(proc, opcode, origin, value);
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin, const FrequentedBlock& target)
+{
+    RELEASE_ASSERT(opcode == Jump);
+    setSuccessors(target);
+    return appendNew(proc, opcode, origin);
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin, Value* predicate, const FrequentedBlock& taken, const FrequentedBlock& notTaken)
+{
+    RELEASE_ASSERT(opcode == Branch);
+    setSuccessors(taken, notTaken);
+    return appendNew(proc, opcode, origin, predicate);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BasicBlock.h b/Source/JavaScriptCore/b3/B3BasicBlock.h
new file mode 100644
index 000000000..11f466835
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BasicBlock.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3FrequentedBlock.h"
+#include "B3Opcode.h"
+#include "B3Origin.h"
+#include "B3SuccessorCollection.h"
+#include "B3Type.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class BlockInsertionSet;
+class InsertionSet;
+class Procedure;
+class Value;
+
+class BasicBlock {
+    WTF_MAKE_NONCOPYABLE(BasicBlock);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    typedef Vector ValueList;
+    typedef Vector PredecessorList;
+    typedef Vector SuccessorList;
+
+    static const char* const dumpPrefix;
+
+    ~BasicBlock();
+
+    unsigned index() const { return m_index; }
+
+    ValueList::iterator begin() { return m_values.begin(); }
+    ValueList::iterator end() { return m_values.end(); }
+    ValueList::const_iterator begin() const { return m_values.begin(); }
+    ValueList::const_iterator end() const { return m_values.end(); }
+
+    size_t size() const { return m_values.size(); }
+    Value* at(size_t index) const { return m_values[index]; }
+    Value*& at(size_t index) { return m_values[index]; }
+
+    Value* last() const { return m_values.last(); }
+    Value*& last() { return m_values.last(); }
+
+    const ValueList& values() const { return m_values; }
+    ValueList& values() { return m_values; }
+
+    JS_EXPORT_PRIVATE void append(Value*);
+    JS_EXPORT_PRIVATE void appendNonTerminal(Value*);
+    JS_EXPORT_PRIVATE void replaceLast(Procedure&, Value*);
+
+    template
+    ValueType* appendNew(Procedure&, Arguments...);
+    template
+    ValueType* appendNewNonTerminal(Procedure&, Arguments...);
+
+    JS_EXPORT_PRIVATE Value* appendIntConstant(Procedure&, Origin, Type, int64_t value);
+    Value* appendIntConstant(Procedure&, Value* likeValue, int64_t value);
+    Value* appendBoolConstant(Procedure&, Origin, bool);
+
+    void removeLast(Procedure&);
+    
+    template
+    ValueType* replaceLastWithNew(Procedure&, Arguments...);
+
+    unsigned numSuccessors() const { return m_successors.size(); }
+    const FrequentedBlock& successor(unsigned index) const { return m_successors[index]; }
+    FrequentedBlock& successor(unsigned index) { return m_successors[index]; }
+    const SuccessorList& successors() const { return m_successors; }
+    SuccessorList& successors() { return m_successors; }
+    
+    void clearSuccessors();
+    JS_EXPORT_PRIVATE void appendSuccessor(FrequentedBlock);
+    JS_EXPORT_PRIVATE void setSuccessors(FrequentedBlock);
+    JS_EXPORT_PRIVATE void setSuccessors(FrequentedBlock, FrequentedBlock);
+
+    BasicBlock* successorBlock(unsigned index) const { return successor(index).block(); }
+    BasicBlock*& successorBlock(unsigned index) { return successor(index).block(); }
+    SuccessorCollection successorBlocks()
+    {
+        return SuccessorCollection(successors());
+    }
+    SuccessorCollection successorBlocks() const
+    {
+        return SuccessorCollection(successors());
+    }
+
+    bool replaceSuccessor(BasicBlock* from, BasicBlock* to);
+    
+    // This is only valid for Jump and Branch.
+    const FrequentedBlock& taken() const;
+    FrequentedBlock& taken();
+    // This is only valid for Branch.
+    const FrequentedBlock& notTaken() const;
+    FrequentedBlock& notTaken();
+    // This is only valid for Branch and Switch.
+    const FrequentedBlock& fallThrough() const;
+    FrequentedBlock& fallThrough();
+
+    unsigned numPredecessors() const { return m_predecessors.size(); }
+    BasicBlock* predecessor(unsigned index) const { return m_predecessors[index]; }
+    BasicBlock*& predecessor(unsigned index) { return m_predecessors[index]; }
+    const PredecessorList& predecessors() const { return m_predecessors; }
+    PredecessorList& predecessors() { return m_predecessors; }
+    bool containsPredecessor(BasicBlock* block) { return m_predecessors.contains(block); }
+
+    bool addPredecessor(BasicBlock*);
+    bool removePredecessor(BasicBlock*);
+    bool replacePredecessor(BasicBlock* from, BasicBlock* to);
+
+    // Update predecessors starting with the successors of this block.
+    void updatePredecessorsAfter();
+
+    double frequency() const { return m_frequency; }
+
+    void dump(PrintStream&) const;
+    void deepDump(const Procedure&, PrintStream&) const;
+
+    // These are deprecated method for compatibility with the old ControlValue class. Don't use them
+    // in new code.
+    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159440
+    
+    // Use this for Oops.
+    JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin);
+    // Use this for Return.
+    JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin, Value*);
+    // Use this for Jump.
+    JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin, const FrequentedBlock&);
+    // Use this for Branch.
+    JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin, Value*, const FrequentedBlock&, const FrequentedBlock&);
+    
+private:
+    friend class BlockInsertionSet;
+    friend class InsertionSet;
+    friend class Procedure;
+    
+    // Instantiate via Procedure.
+    BasicBlock(unsigned index, double frequency);
+
+    unsigned m_index;
+    ValueList m_values;
+    PredecessorList m_predecessors;
+    SuccessorList m_successors;
+    double m_frequency;
+};
+
+class DeepBasicBlockDump {
+public:
+    DeepBasicBlockDump(const Procedure& proc, const BasicBlock* block)
+        : m_proc(proc)
+        , m_block(block)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_block)
+            m_block->deepDump(m_proc, out);
+        else
+            out.print("");
+    }
+
+private:
+    const Procedure& m_proc;
+    const BasicBlock* m_block;
+};
+
+inline DeepBasicBlockDump deepDump(const Procedure& proc, const BasicBlock* block)
+{
+    return DeepBasicBlockDump(proc, block);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BasicBlockInlines.h b/Source/JavaScriptCore/b3/B3BasicBlockInlines.h
new file mode 100644
index 000000000..26c2df41b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BasicBlockInlines.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3ProcedureInlines.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+template
+ValueType* BasicBlock::appendNew(Procedure& procedure, Arguments... arguments)
+{
+    ValueType* result = procedure.add(arguments...);
+    append(result);
+    return result;
+}
+
+template
+ValueType* BasicBlock::appendNewNonTerminal(Procedure& procedure, Arguments... arguments)
+{
+    ValueType* result = procedure.add(arguments...);
+    appendNonTerminal(result);
+    return result;
+}
+
+template
+ValueType* BasicBlock::replaceLastWithNew(Procedure& procedure, Arguments... arguments)
+{
+    ValueType* result = procedure.add(arguments...);
+    replaceLast(procedure, result);
+    return result;
+}
+
+inline const FrequentedBlock& BasicBlock::taken() const
+{
+    ASSERT(last()->opcode() == Jump || last()->opcode() == Branch);
+    return m_successors[0];
+}
+
+inline FrequentedBlock& BasicBlock::taken()
+{
+    ASSERT(last()->opcode() == Jump || last()->opcode() == Branch);
+    return m_successors[0];
+}
+
+inline const FrequentedBlock& BasicBlock::notTaken() const
+{
+    ASSERT(last()->opcode() == Branch);
+    return m_successors[1];
+}
+
+inline FrequentedBlock& BasicBlock::notTaken()
+{
+    ASSERT(last()->opcode() == Branch);
+    return m_successors[1];
+}
+
+inline const FrequentedBlock& BasicBlock::fallThrough() const
+{
+    ASSERT(last()->opcode() == Branch || last()->opcode() == Switch);
+    return m_successors.last();
+}
+
+inline FrequentedBlock& BasicBlock::fallThrough()
+{
+    ASSERT(last()->opcode() == Branch || last()->opcode() == Switch);
+    return m_successors.last();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BasicBlockUtils.h b/Source/JavaScriptCore/b3/B3BasicBlockUtils.h
new file mode 100644
index 000000000..e5998c864
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BasicBlockUtils.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+template
+bool addPredecessor(BasicBlock* block, BasicBlock* predecessor)
+{
+    auto& predecessors = block->predecessors();
+
+    if (predecessors.contains(predecessor))
+        return false;
+
+    predecessors.append(predecessor);
+    return true;
+}
+
+template
+bool removePredecessor(BasicBlock* block, BasicBlock* predecessor)
+{
+    auto& predecessors = block->predecessors();
+    for (unsigned i = 0; i < predecessors.size(); ++i) {
+        if (predecessors[i] == predecessor) {
+            predecessors[i--] = predecessors.last();
+            predecessors.removeLast();
+            ASSERT(!predecessors.contains(predecessor));
+            return true;
+        }
+    }
+    return false;
+}
+
+template
+bool replacePredecessor(BasicBlock* block, BasicBlock* from, BasicBlock* to)
+{
+    bool changed = false;
+    // We do it this way because 'to' may already be a predecessor of 'block'.
+    changed |= removePredecessor(block, from);
+    changed |= addPredecessor(block, to);
+    return changed;
+}
+
+template
+void updatePredecessorsAfter(BasicBlock* root)
+{
+    Vector worklist;
+    worklist.append(root);
+    while (!worklist.isEmpty()) {
+        BasicBlock* block = worklist.takeLast();
+        for (BasicBlock* successor : block->successorBlocks()) {
+            if (addPredecessor(successor, block))
+                worklist.append(successor);
+        }
+    }
+}
+
+template
+void clearPredecessors(Vector>& blocks)
+{
+    for (auto& block : blocks) {
+        if (block)
+            block->predecessors().resize(0);
+    }
+}
+
+template
+void recomputePredecessors(Vector>& blocks)
+{
+    clearPredecessors(blocks);
+    updatePredecessorsAfter(blocks[0].get());
+}
+
+template
+bool isBlockDead(BasicBlock* block)
+{
+    if (!block)
+        return false;
+    if (!block->index())
+        return false;
+    return block->predecessors().isEmpty();
+}
+
+template
+Vector blocksInPreOrder(BasicBlock* root)
+{
+    Vector result;
+    GraphNodeWorklist> worklist;
+    worklist.push(root);
+    while (BasicBlock* block = worklist.pop()) {
+        result.append(block);
+        for (BasicBlock* successor : block->successorBlocks())
+            worklist.push(successor);
+    }
+    return result;
+}
+
+template
+Vector blocksInPostOrder(BasicBlock* root)
+{
+    Vector result;
+    PostOrderGraphNodeWorklist> worklist;
+    worklist.push(root);
+    while (GraphNodeWithOrder item = worklist.pop()) {
+        switch (item.order) {
+        case GraphVisitOrder::Pre:
+            worklist.pushPost(item.node);
+            for (BasicBlock* successor : item.node->successorBlocks())
+                worklist.push(successor);
+            break;
+        case GraphVisitOrder::Post:
+            result.append(item.node);
+            break;
+        }
+    }
+    return result;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BlockInsertionSet.cpp b/Source/JavaScriptCore/b3/B3BlockInsertionSet.cpp
new file mode 100644
index 000000000..76a166820
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BlockInsertionSet.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3BlockInsertionSet.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3InsertionSet.h"
+#include "B3ProcedureInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+BlockInsertionSet::BlockInsertionSet(Procedure &proc)
+    : m_proc(proc)
+{
+}
+
+BlockInsertionSet::~BlockInsertionSet() { }
+
+void BlockInsertionSet::insert(BlockInsertion&& insertion)
+{
+    m_insertions.append(WTFMove(insertion));
+}
+
+BasicBlock* BlockInsertionSet::insert(unsigned index, double frequency)
+{
+    std::unique_ptr block(new BasicBlock(UINT_MAX, frequency));
+    BasicBlock* result = block.get();
+    insert(BlockInsertion(index, WTFMove(block)));
+    return result;
+}
+
+BasicBlock* BlockInsertionSet::insertBefore(BasicBlock* before, double frequency)
+{
+    return insert(before->index(), frequency == frequency ? frequency : before->frequency());
+}
+
+BasicBlock* BlockInsertionSet::insertAfter(BasicBlock* after, double frequency)
+{
+    return insert(after->index() + 1, frequency == frequency ? frequency : after->frequency());
+}
+
+BasicBlock* BlockInsertionSet::splitForward(
+    BasicBlock* block, unsigned& valueIndex, InsertionSet* insertionSet, double frequency)
+{
+    Value* value = block->at(valueIndex);
+
+    // Create a new block that will go just before 'block', and make it contain everything prior
+    // to 'valueIndex'.
+    BasicBlock* result = insertBefore(block, frequency);
+    result->m_values.resize(valueIndex + 1);
+    for (unsigned i = valueIndex; i--;)
+        result->m_values[i] = block->m_values[i];
+
+    // Make the new block jump to 'block'.
+    result->m_values[valueIndex] = m_proc.add(Jump, value->origin());
+    result->setSuccessors(FrequentedBlock(block));
+
+    // If we had inserted things into 'block' before this, execute those insertions now.
+    if (insertionSet)
+        insertionSet->execute(result);
+
+    // Remove everything prior to 'valueIndex' from 'block', since those things are now in the
+    // new block.
+    block->m_values.remove(0, valueIndex);
+
+    // This is being used in a forward loop over 'block'. Update the index of the loop so that
+    // it can continue to the next block.
+    valueIndex = 0;
+
+    // Fixup the predecessors of 'block'. They now must jump to the new block.
+    result->predecessors() = WTFMove(block->predecessors());
+    block->addPredecessor(result);
+    for (BasicBlock* predecessor : result->predecessors())
+        predecessor->replaceSuccessor(block, result);
+
+    return result;
+}
+
+bool BlockInsertionSet::execute()
+{
+    if (m_insertions.isEmpty())
+        return false;
+    
+    // We allow insertions to be given to us in any order. So, we need to sort them before
+    // running WTF::executeInsertions. We strongly prefer a stable sort and we want it to be
+    // fast, so we use bubble sort.
+    bubbleSort(m_insertions.begin(), m_insertions.end());
+
+    executeInsertions(m_proc.m_blocks, m_insertions);
+    
+    // Prune out empty entries. This isn't strictly necessary but it's
+    // healthy to keep the block list from growing.
+    m_proc.m_blocks.removeAllMatching(
+        [&] (std::unique_ptr& blockPtr) -> bool {
+            return !blockPtr;
+        });
+    
+    // Make sure that the blocks know their new indices.
+    for (unsigned i = 0; i < m_proc.m_blocks.size(); ++i)
+        m_proc.m_blocks[i]->m_index = i;
+    
+    return true;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3BlockInsertionSet.h b/Source/JavaScriptCore/b3/B3BlockInsertionSet.h
new file mode 100644
index 000000000..b316f646c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BlockInsertionSet.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class InsertionSet;
+
+typedef WTF::Insertion> BlockInsertion;
+
+class BlockInsertionSet {
+public:
+    BlockInsertionSet(Procedure&);
+    ~BlockInsertionSet();
+    
+    void insert(BlockInsertion&&);
+
+    // Insert a new block at a given index.
+    BasicBlock* insert(unsigned index, double frequency = PNaN);
+
+    // Inserts a new block before the given block. Usually you will not pass the frequency
+    // argument. Passing PNaN causes us to just use the frequency of the 'before' block. That's
+    // usually what you want.
+    BasicBlock* insertBefore(BasicBlock* before, double frequency = PNaN);
+
+    // Inserts a new block after the given block.
+    BasicBlock* insertAfter(BasicBlock* after, double frequency = PNaN);
+
+    // A helper to split a block when forward iterating over it. It creates a new block to hold
+    // everything before the instruction at valueIndex. The current block is left with
+    // everything at and after valueIndex. If the optional InsertionSet is provided, it will get
+    // executed on the newly created block - this makes sense if you had previously inserted
+    // things into the original block, since the newly created block will be indexed identically
+    // to how this block was indexed for all values prior to valueIndex. After this runs, it sets
+    // valueIndex to zero. This allows you to use this method for things like:
+    //
+    // for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+    //     Value* value = block->at(valueIndex);
+    //     if (value->opcode() == Foo) {
+    //         BasicBlock* predecessor =
+    //             m_blockInsertionSet.splitForward(block, valueIndex, &m_insertionSet);
+    //         ... // Now you can append to predecessor, insert new blocks before 'block', and
+    //         ... // you can use m_insertionSet to insert more thing before 'value'.
+    //         predecessor->updatePredecessorsAfter();
+    //     }
+    // }
+    //
+    // Note how usually this idiom ends in a all to updatePredecessorsAftter(), which ensures
+    // that the predecessors involved in any of the new control flow that you've created are up
+    // to date.
+    BasicBlock* splitForward(
+        BasicBlock*, unsigned& valueIndex, InsertionSet* = nullptr,
+        double frequency = PNaN);
+    
+    bool execute();
+
+private:
+    Procedure& m_proc;
+    Vector m_insertions;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BlockWorklist.h b/Source/JavaScriptCore/b3/B3BlockWorklist.h
new file mode 100644
index 000000000..6fa197c61
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BlockWorklist.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+
+typedef GraphNodeWorklist> BlockWorklist;
+
+// When you say BlockWith you should read it as "block with an int".
+template using BlockWith = GraphNodeWith;
+
+// Extended block worklist is useful for enqueueing some meta-data along with the block. It also
+// permits forcibly enqueueing things even if the block has already been seen. It's useful for
+// things like building a spanning tree, in which case T (the auxiliary payload) would be the
+// successor index.
+template using ExtendedBlockWorklist = ExtendedGraphNodeWorklist>;
+
+typedef GraphVisitOrder VisitOrder;
+
+typedef GraphNodeWithOrder BlockWithOrder;
+
+typedef PostOrderGraphNodeWorklist> PostOrderBlockWorklist;
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BottomProvider.h b/Source/JavaScriptCore/b3/B3BottomProvider.h
new file mode 100644
index 000000000..9a977f0eb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BottomProvider.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3InsertionSet.h"
+
+namespace JSC { namespace B3 {
+
+// This exists because we cannot convert values to constants in-place.
+// FIXME: https://bugs.webkit.org/show_bug.cgi?id=159119
+
+class BottomProvider {
+public:
+    BottomProvider(InsertionSet& insertionSet, size_t index)
+        : m_insertionSet(&insertionSet)
+        , m_index(index)
+    {
+    }
+    
+    Value* operator()(Origin origin, Type type) const
+    {
+        return m_insertionSet->insertBottom(m_index, origin, type);
+    }
+    
+private:
+    InsertionSet* m_insertionSet;
+    size_t m_index;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BreakCriticalEdges.cpp b/Source/JavaScriptCore/b3/B3BreakCriticalEdges.cpp
new file mode 100644
index 000000000..abdf0ceeb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BreakCriticalEdges.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3BreakCriticalEdges.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+void breakCriticalEdges(Procedure& proc)
+{
+    BlockInsertionSet insertionSet(proc);
+    
+    for (BasicBlock* block : proc) {
+        if (block->numSuccessors() <= 1)
+            continue;
+
+        for (BasicBlock*& successor : block->successorBlocks()) {
+            if (successor->numPredecessors() <= 1)
+                continue;
+
+            BasicBlock* pad =
+                insertionSet.insertBefore(successor, successor->frequency());
+            pad->appendNew(proc, Jump, successor->at(0)->origin());
+            pad->setSuccessors(FrequentedBlock(successor));
+            pad->addPredecessor(block);
+            successor->replacePredecessor(block, pad);
+            successor = pad;
+        }
+    }
+
+    insertionSet.execute();
+    proc.invalidateCFG();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3BreakCriticalEdges.h b/Source/JavaScriptCore/b3/B3BreakCriticalEdges.h
new file mode 100644
index 000000000..75c324f4e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BreakCriticalEdges.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+void breakCriticalEdges(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CCallValue.cpp b/Source/JavaScriptCore/b3/B3CCallValue.cpp
new file mode 100644
index 000000000..518d72349
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CCallValue.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3CCallValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+CCallValue::~CCallValue()
+{
+}
+
+Value* CCallValue::cloneImpl() const
+{
+    return new CCallValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3CCallValue.h b/Source/JavaScriptCore/b3/B3CCallValue.h
new file mode 100644
index 000000000..44ec349f2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CCallValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Effects.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE CCallValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == CCall; }
+
+    ~CCallValue();
+
+    Effects effects;
+
+protected:
+    Value* cloneImpl() const override;
+    
+private:
+    friend class Procedure;
+
+    template
+    CCallValue(Type type, Origin origin, Arguments... arguments)
+        : Value(CheckedOpcode, CCall, type, origin, arguments...)
+        , effects(Effects::forCall())
+    {
+        RELEASE_ASSERT(numChildren() >= 1);
+    }
+
+    template
+    CCallValue(Type type, Origin origin, const Effects& effects, Arguments... arguments)
+        : Value(CheckedOpcode, CCall, type, origin, arguments...)
+        , effects(effects)
+    {
+        RELEASE_ASSERT(numChildren() >= 1);
+    }
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CFG.h b/Source/JavaScriptCore/b3/B3CFG.h
new file mode 100644
index 000000000..3d1418e8a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CFG.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3Procedure.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class CFG {
+    WTF_MAKE_NONCOPYABLE(CFG);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    typedef BasicBlock* Node;
+    typedef IndexSet Set;
+    template using Map = IndexMap;
+    typedef Vector List;
+
+    CFG(Procedure& proc)
+        : m_proc(proc)
+    {
+    }
+
+    Node root() { return m_proc[0]; }
+
+    template
+    Map newMap() { return IndexMap(m_proc.size()); }
+
+    SuccessorCollection successors(Node node) { return node->successorBlocks(); }
+    BasicBlock::PredecessorList& predecessors(Node node) { return node->predecessors(); }
+
+    unsigned index(Node node) const { return node->index(); }
+    Node node(unsigned index) const { return m_proc[index]; }
+    unsigned numNodes() const { return m_proc.size(); }
+
+    PointerDump dump(Node node) const { return pointerDump(node); }
+
+    void dump(PrintStream& out) const
+    {
+        m_proc.dump(out);
+    }
+
+private:
+    Procedure& m_proc;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CaseCollection.cpp b/Source/JavaScriptCore/b3/B3CaseCollection.cpp
new file mode 100644
index 000000000..5221ebab4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CaseCollection.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3CaseCollection.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CaseCollectionInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+void CaseCollection::dump(PrintStream& out) const
+{
+    CommaPrinter comma;
+    for (SwitchCase switchCase : *this)
+        out.print(comma, switchCase);
+    out.print(comma, "default->", fallThrough());
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3CaseCollection.h b/Source/JavaScriptCore/b3/B3CaseCollection.h
new file mode 100644
index 000000000..c45cc641d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CaseCollection.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3SwitchCase.h"
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class SwitchValue;
+
+// NOTE: You'll always want to include B3CaseCollectionInlines.h when you use this.
+
+class CaseCollection {
+public:
+    CaseCollection()
+    {
+    }
+    
+    CaseCollection(const SwitchValue* terminal, const BasicBlock* owner)
+        : m_switch(terminal)
+        , m_owner(owner)
+    {
+    }
+    
+    const FrequentedBlock& fallThrough() const;
+
+    unsigned size() const;
+    SwitchCase at(unsigned index) const;
+    
+    SwitchCase operator[](unsigned index) const
+    {
+        return at(index);
+    }
+
+    class iterator {
+    public:
+        iterator()
+            : m_collection(nullptr)
+            , m_index(0)
+        {
+        }
+
+        iterator(const CaseCollection& collection, unsigned index)
+            : m_collection(&collection)
+            , m_index(index)
+        {
+        }
+
+        SwitchCase operator*()
+        {
+            return m_collection->at(m_index);
+        }
+
+        iterator& operator++()
+        {
+            m_index++;
+            return *this;
+        }
+
+        bool operator==(const iterator& other) const
+        {
+            ASSERT(m_collection == other.m_collection);
+            return m_index == other.m_index;
+        }
+
+        bool operator!=(const iterator& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        const CaseCollection* m_collection;
+        unsigned m_index;
+    };
+
+    typedef iterator const_iterator;
+
+    iterator begin() const { return iterator(*this, 0); }
+    iterator end() const { return iterator(*this, size()); }
+    
+    void dump(PrintStream&) const;
+    
+private:
+    const SwitchValue* m_switch { nullptr };
+    const BasicBlock* m_owner { nullptr };
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CaseCollectionInlines.h b/Source/JavaScriptCore/b3/B3CaseCollectionInlines.h
new file mode 100644
index 000000000..237a56822
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CaseCollectionInlines.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CaseCollection.h"
+#include "B3SwitchValue.h"
+#include "B3BasicBlock.h"
+
+namespace JSC { namespace B3 {
+
+inline const FrequentedBlock& CaseCollection::fallThrough() const
+{
+    return m_owner->fallThrough();
+}
+
+inline unsigned CaseCollection::size() const
+{
+    return m_switch->numCaseValues();
+}
+
+inline SwitchCase CaseCollection::at(unsigned index) const
+{
+    return SwitchCase(m_switch->caseValue(index), m_owner->successor(index));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CheckSpecial.cpp b/Source/JavaScriptCore/b3/B3CheckSpecial.cpp
new file mode 100644
index 000000000..6f7826cb5
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CheckSpecial.cpp
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3CheckSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "AirInstInlines.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+namespace {
+
+unsigned numB3Args(B3::Kind kind)
+{
+    switch (kind.opcode()) {
+    case CheckAdd:
+    case CheckSub:
+    case CheckMul:
+        return 2;
+    case Check:
+        return 1;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return 0;
+    }
+}
+
+unsigned numB3Args(Value* value)
+{
+    return numB3Args(value->kind());
+}
+
+unsigned numB3Args(Inst& inst)
+{
+    return numB3Args(inst.origin);
+}
+
+} // anonymous namespace
+
+CheckSpecial::Key::Key(const Inst& inst)
+{
+    m_kind = inst.kind;
+    m_numArgs = inst.args.size();
+    m_stackmapRole = SameAsRep;
+}
+
+void CheckSpecial::Key::dump(PrintStream& out) const
+{
+    out.print(m_kind, "(", m_numArgs, ",", m_stackmapRole, ")");
+}
+
+CheckSpecial::CheckSpecial(Air::Kind kind, unsigned numArgs, RoleMode stackmapRole)
+    : m_checkKind(kind)
+    , m_stackmapRole(stackmapRole)
+    , m_numCheckArgs(numArgs)
+{
+    ASSERT(isDefinitelyTerminal(kind.opcode));
+}
+
+CheckSpecial::CheckSpecial(const CheckSpecial::Key& key)
+    : CheckSpecial(key.kind(), key.numArgs(), key.stackmapRole())
+{
+}
+
+CheckSpecial::~CheckSpecial()
+{
+}
+
+Inst CheckSpecial::hiddenBranch(const Inst& inst) const
+{
+    Inst hiddenBranch(m_checkKind, inst.origin);
+    hiddenBranch.args.reserveInitialCapacity(m_numCheckArgs);
+    for (unsigned i = 0; i < m_numCheckArgs; ++i)
+        hiddenBranch.args.append(inst.args[i + 1]);
+    ASSERT(hiddenBranch.isTerminal());
+    return hiddenBranch;
+}
+
+void CheckSpecial::forEachArg(Inst& inst, const ScopedLambda& callback)
+{
+    Inst hidden = hiddenBranch(inst);
+    hidden.forEachArg(
+        [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+            unsigned index = &arg - &hidden.args[0];
+            callback(inst.args[1 + index], role, type, width);
+        });
+
+    std::optional firstRecoverableIndex;
+    if (m_checkKind.opcode == BranchAdd32 || m_checkKind.opcode == BranchAdd64)
+        firstRecoverableIndex = 1;
+    forEachArgImpl(numB3Args(inst), m_numCheckArgs + 1, inst, m_stackmapRole, firstRecoverableIndex, callback);
+}
+
+bool CheckSpecial::isValid(Inst& inst)
+{
+    return hiddenBranch(inst).isValidForm()
+        && isValidImpl(numB3Args(inst), m_numCheckArgs + 1, inst)
+        && inst.args.size() - m_numCheckArgs - 1 == inst.origin->numChildren() - numB3Args(inst);
+}
+
+bool CheckSpecial::admitsStack(Inst& inst, unsigned argIndex)
+{
+    if (argIndex >= 1 && argIndex < 1 + m_numCheckArgs)
+        return hiddenBranch(inst).admitsStack(argIndex - 1);
+    return admitsStackImpl(numB3Args(inst), m_numCheckArgs + 1, inst, argIndex);
+}
+
+std::optional CheckSpecial::shouldTryAliasingDef(Inst& inst)
+{
+    if (std::optional branchDef = hiddenBranch(inst).shouldTryAliasingDef())
+        return *branchDef + 1;
+    return std::nullopt;
+}
+
+CCallHelpers::Jump CheckSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext& context)
+{
+    CCallHelpers::Jump fail = hiddenBranch(inst).generate(jit, context);
+    ASSERT(fail.isSet());
+
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    Vector reps = repsImpl(context, numB3Args(inst), m_numCheckArgs + 1, inst);
+
+    // Set aside the args that are relevant to undoing the operation. This is because we don't want to
+    // capture all of inst in the closure below.
+    Vector args;
+    for (unsigned i = 0; i < m_numCheckArgs; ++i)
+        args.append(inst.args[1 + i]);
+
+    context.latePaths.append(
+        createSharedTask(
+            [=] (CCallHelpers& jit, GenerationContext& context) {
+                fail.link(&jit);
+
+                // If necessary, undo the operation.
+                switch (m_checkKind.opcode) {
+                case BranchAdd32:
+                    if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3])
+                        || (m_numCheckArgs == 3 && args[1] == args[2])) {
+                        // This is ugly, but that's fine - we won't have to do this very often.
+                        ASSERT(args[1].isGPR());
+                        GPRReg valueGPR = args[1].gpr();
+                        GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR);
+                        jit.pushToSave(scratchGPR);
+                        jit.setCarry(scratchGPR);
+                        jit.lshift32(CCallHelpers::TrustedImm32(31), scratchGPR);
+                        jit.urshift32(CCallHelpers::TrustedImm32(1), valueGPR);
+                        jit.or32(scratchGPR, valueGPR);
+                        jit.popToRestore(scratchGPR);
+                        break;
+                    }
+                    if (m_numCheckArgs == 4) {
+                        if (args[1] == args[3])
+                            Inst(Sub32, nullptr, args[2], args[3]).generate(jit, context);
+                        else if (args[2] == args[3])
+                            Inst(Sub32, nullptr, args[1], args[3]).generate(jit, context);
+                    } else if (m_numCheckArgs == 3)
+                        Inst(Sub32, nullptr, args[1], args[2]).generate(jit, context);
+                    break;
+                case BranchAdd64:
+                    if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3])
+                        || (m_numCheckArgs == 3 && args[1] == args[2])) {
+                        // This is ugly, but that's fine - we won't have to do this very often.
+                        ASSERT(args[1].isGPR());
+                        GPRReg valueGPR = args[1].gpr();
+                        GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR);
+                        jit.pushToSave(scratchGPR);
+                        jit.setCarry(scratchGPR);
+                        jit.lshift64(CCallHelpers::TrustedImm32(63), scratchGPR);
+                        jit.urshift64(CCallHelpers::TrustedImm32(1), valueGPR);
+                        jit.or64(scratchGPR, valueGPR);
+                        jit.popToRestore(scratchGPR);
+                        break;
+                    }
+                    if (m_numCheckArgs == 4) {
+                        if (args[1] == args[3])
+                            Inst(Sub64, nullptr, args[2], args[3]).generate(jit, context);
+                        else if (args[2] == args[3])
+                            Inst(Sub64, nullptr, args[1], args[3]).generate(jit, context);
+                    } else if (m_numCheckArgs == 3)
+                        Inst(Sub64, nullptr, args[1], args[2]).generate(jit, context);
+                    break;
+                case BranchSub32:
+                    Inst(Add32, nullptr, args[1], args[2]).generate(jit, context);
+                    break;
+                case BranchSub64:
+                    Inst(Add64, nullptr, args[1], args[2]).generate(jit, context);
+                    break;
+                case BranchNeg32:
+                    Inst(Neg32, nullptr, args[1]).generate(jit, context);
+                    break;
+                case BranchNeg64:
+                    Inst(Neg64, nullptr, args[1]).generate(jit, context);
+                    break;
+                default:
+                    break;
+                }
+                
+                value->m_generator->run(jit, StackmapGenerationParams(value, reps, context));
+            }));
+
+    return CCallHelpers::Jump(); // As far as Air thinks, we are not a terminal.
+}
+
+void CheckSpecial::dumpImpl(PrintStream& out) const
+{
+    out.print(m_checkKind, "(", m_numCheckArgs, ",", m_stackmapRole, ")");
+}
+
+void CheckSpecial::deepDumpImpl(PrintStream& out) const
+{
+    out.print("B3::CheckValue lowered to ", m_checkKind, " with ", m_numCheckArgs, " args.");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CheckSpecial.h b/Source/JavaScriptCore/b3/B3CheckSpecial.h
new file mode 100644
index 000000000..aa7f2feab
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CheckSpecial.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirKind.h"
+#include "B3StackmapSpecial.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace Air {
+struct Inst;
+}
+
+// We want to lower Check instructions to a branch, but then we want to route that branch to our
+// out-of-line code instead of doing anything else. For this reason, a CheckSpecial will remember
+// which branch opcode we have selected along with the number of args in the overload we want. It
+// will create an Inst with that opcode plus the appropriate args from the owning Inst whenever you
+// call any of the callbacks.
+//
+// Note that for CheckAdd, CheckSub, and CheckMul we expect that the B3 arguments are the reverse
+// of the Air arguments (Add(a, b) => Add32 b, a). Except:
+// - CheckSub(0, x), which turns into BranchNeg32 x.
+// - CheckMul(a, b), which turns into Mul32 b, a but we pass Any for a's ValueRep.
+
+class CheckSpecial : public StackmapSpecial {
+public:
+    // Support for hash consing these things.
+    class Key {
+    public:
+        Key()
+            : m_stackmapRole(SameAsRep)
+            , m_numArgs(0)
+        {
+        }
+        
+        Key(Air::Kind kind, unsigned numArgs, RoleMode stackmapRole = SameAsRep)
+            : m_kind(kind)
+            , m_stackmapRole(stackmapRole)
+            , m_numArgs(numArgs)
+        {
+        }
+
+        explicit Key(const Air::Inst&);
+
+        bool operator==(const Key& other) const
+        {
+            return m_kind == other.m_kind
+                && m_numArgs == other.m_numArgs
+                && m_stackmapRole == other.m_stackmapRole;
+        }
+
+        bool operator!=(const Key& other) const
+        {
+            return !(*this == other);
+        }
+
+        explicit operator bool() const { return *this != Key(); }
+
+        Air::Kind kind() const { return m_kind; }
+        unsigned numArgs() const { return m_numArgs; }
+        RoleMode stackmapRole() const { return m_stackmapRole; }
+
+        void dump(PrintStream& out) const;
+
+        Key(WTF::HashTableDeletedValueType)
+            : m_stackmapRole(SameAsRep)
+            , m_numArgs(1)
+        {
+        }
+
+        bool isHashTableDeletedValue() const
+        {
+            return *this == Key(WTF::HashTableDeletedValue);
+        }
+
+        unsigned hash() const
+        {
+            // Seriously, we don't need to be smart here. It just doesn't matter.
+            return m_kind.hash() + m_numArgs + m_stackmapRole;
+        }
+        
+    private:
+        Air::Kind m_kind;
+        RoleMode m_stackmapRole;
+        unsigned m_numArgs;
+    };
+    
+    CheckSpecial(Air::Kind, unsigned numArgs, RoleMode stackmapRole = SameAsRep);
+    CheckSpecial(const Key&);
+    ~CheckSpecial();
+
+protected:
+    // Constructs and returns the Inst representing the branch that this will use.
+    Air::Inst hiddenBranch(const Air::Inst&) const;
+
+    void forEachArg(Air::Inst&, const ScopedLambda&) override;
+    bool isValid(Air::Inst&) override;
+    bool admitsStack(Air::Inst&, unsigned argIndex) override;
+    std::optional shouldTryAliasingDef(Air::Inst&) override;
+
+    // NOTE: the generate method will generate the hidden branch and then register a LatePath that
+    // generates the stackmap. Super crazy dude!
+
+    CCallHelpers::Jump generate(Air::Inst&, CCallHelpers&, Air::GenerationContext&) override;
+
+    void dumpImpl(PrintStream&) const override;
+    void deepDumpImpl(PrintStream&) const override;
+
+private:
+    Air::Kind m_checkKind;
+    RoleMode m_stackmapRole;
+    unsigned m_numCheckArgs;
+};
+
+struct CheckSpecialKeyHash {
+    static unsigned hash(const CheckSpecial::Key& key) { return key.hash(); }
+    static bool equal(const CheckSpecial::Key& a, const CheckSpecial::Key& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::B3::CheckSpecialKeyHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits {
+    // I don't want to think about this very hard, it's not worth it. I'm a be conservative.
+    static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CheckValue.cpp b/Source/JavaScriptCore/b3/B3CheckValue.cpp
new file mode 100644
index 000000000..79b6c6e72
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CheckValue.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3CheckValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+CheckValue::~CheckValue()
+{
+}
+
+void CheckValue::convertToAdd()
+{
+    RELEASE_ASSERT(opcode() == CheckAdd || opcode() == CheckSub || opcode() == CheckMul);
+    m_kind = CheckAdd;
+}
+
+Value* CheckValue::cloneImpl() const
+{
+    return new CheckValue(*this);
+}
+
+// Use this form for CheckAdd, CheckSub, and CheckMul.
+CheckValue::CheckValue(Kind kind, Origin origin, Value* left, Value* right)
+    : StackmapValue(CheckedOpcode, kind, left->type(), origin)
+{
+    ASSERT(B3::isInt(type()));
+    ASSERT(left->type() == right->type());
+    ASSERT(kind == CheckAdd || kind == CheckSub || kind == CheckMul);
+    append(ConstrainedValue(left, ValueRep::WarmAny));
+    append(ConstrainedValue(right, ValueRep::WarmAny));
+}
+
+// Use this form for Check.
+CheckValue::CheckValue(Kind kind, Origin origin, Value* predicate)
+    : StackmapValue(CheckedOpcode, kind, Void, origin)
+{
+    ASSERT(kind == Check);
+    append(ConstrainedValue(predicate, ValueRep::WarmAny));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CheckValue.h b/Source/JavaScriptCore/b3/B3CheckValue.h
new file mode 100644
index 000000000..e3d94bace
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CheckValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackmapValue.h"
+
+namespace JSC { namespace B3 {
+
+class CheckValue : public StackmapValue {
+public:
+    static bool accepts(Kind kind)
+    {
+        switch (kind.opcode()) {
+        case CheckAdd:
+        case CheckSub:
+        case CheckMul:
+        case Check:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    ~CheckValue();
+
+    void convertToAdd();
+
+protected:
+    Value* cloneImpl() const override;
+    
+private:
+    friend class Procedure;
+
+    // Use this form for CheckAdd, CheckSub, and CheckMul.
+    JS_EXPORT_PRIVATE CheckValue(Kind, Origin, Value* left, Value* right);
+
+    // Use this form for Check.
+    JS_EXPORT_PRIVATE CheckValue(Kind, Origin, Value* predicate);
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Common.cpp b/Source/JavaScriptCore/b3/B3Common.cpp
new file mode 100644
index 000000000..60da36291
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Common.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Common.h"
+
+#if ENABLE(B3_JIT)
+
+#include "DFGCommon.h"
+#include "FTLState.h"
+#include "Options.h"
+
+namespace JSC { namespace B3 {
+
+bool shouldDumpIR(B3ComplitationMode mode)
+{
+#if ENABLE(FTL_JIT)
+    return FTL::verboseCompilationEnabled() || FTL::shouldDumpDisassembly() || shouldDumpIRAtEachPhase(mode);
+#else
+    return shouldDumpIRAtEachPhase(mode);
+#endif
+}
+
+bool shouldDumpIRAtEachPhase(B3ComplitationMode mode)
+{
+    if (mode == B3Mode)
+        return Options::dumpGraphAtEachPhase() || Options::dumpB3GraphAtEachPhase();
+    return Options::dumpGraphAtEachPhase() || Options::dumpAirGraphAtEachPhase();
+}
+
+bool shouldValidateIR()
+{
+    return DFG::validationEnabled() || shouldValidateIRAtEachPhase();
+}
+
+bool shouldValidateIRAtEachPhase()
+{
+    return Options::validateGraphAtEachPhase();
+}
+
+bool shouldSaveIRBeforePhase()
+{
+    return Options::verboseValidationFailure();
+}
+
+bool shouldMeasurePhaseTiming()
+{
+    return Options::logB3PhaseTimes();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Common.h b/Source/JavaScriptCore/b3/B3Common.h
new file mode 100644
index 000000000..41e8ee096
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Common.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "JSExportMacros.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+inline bool is64Bit() { return sizeof(void*) == 8; }
+inline bool is32Bit() { return !is64Bit(); }
+
+enum B3ComplitationMode {
+    B3Mode,
+    AirMode
+};
+
+JS_EXPORT_PRIVATE bool shouldDumpIR(B3ComplitationMode);
+bool shouldDumpIRAtEachPhase(B3ComplitationMode);
+bool shouldValidateIR();
+bool shouldValidateIRAtEachPhase();
+bool shouldSaveIRBeforePhase();
+bool shouldMeasurePhaseTiming();
+
+template
+inline bool isIdentical(InputType left, InputType right)
+{
+    BitsType leftBits = bitwise_cast(left);
+    BitsType rightBits = bitwise_cast(right);
+    return leftBits == rightBits;
+}
+
+inline bool isIdentical(int32_t left, int32_t right)
+{
+    return isIdentical(left, right);
+}
+
+inline bool isIdentical(int64_t left, int64_t right)
+{
+    return isIdentical(left, right);
+}
+
+inline bool isIdentical(double left, double right)
+{
+    return isIdentical(left, right);
+}
+
+inline bool isIdentical(float left, float right)
+{
+    return isIdentical(left, right);
+}
+
+template
+inline bool isRepresentableAsImpl(InputType originalValue)
+{
+    // Convert the original value to the desired result type.
+    ResultType result = static_cast(originalValue);
+
+    // Convert the converted value back to the original type. The original value is representable
+    // using the new type if such round-tripping doesn't lose bits.
+    InputType newValue = static_cast(result);
+
+    return isIdentical(originalValue, newValue);
+}
+
+template
+inline bool isRepresentableAs(int32_t value)
+{
+    return isRepresentableAsImpl(value);
+}
+
+template
+inline bool isRepresentableAs(int64_t value)
+{
+    return isRepresentableAsImpl(value);
+}
+
+template
+inline bool isRepresentableAs(double value)
+{
+    return isRepresentableAsImpl(value);
+}
+
+template
+static IntType chillDiv(IntType numerator, IntType denominator)
+{
+    if (!denominator)
+        return 0;
+    if (denominator == -1 && numerator == std::numeric_limits::min())
+        return std::numeric_limits::min();
+    return numerator / denominator;
+}
+
+template
+static IntType chillMod(IntType numerator, IntType denominator)
+{
+    if (!denominator)
+        return 0;
+    if (denominator == -1 && numerator == std::numeric_limits::min())
+        return 0;
+    return numerator % denominator;
+}
+
+template
+static IntType chillUDiv(IntType numerator, IntType denominator)
+{
+    typedef typename std::make_unsigned::type UnsignedIntType;
+    UnsignedIntType unsignedNumerator = static_cast(numerator);
+    UnsignedIntType unsignedDenominator = static_cast(denominator);
+    if (!unsignedDenominator)
+        return 0;
+    return unsignedNumerator / unsignedDenominator;
+}
+
+template
+static IntType chillUMod(IntType numerator, IntType denominator)
+{
+    typedef typename std::make_unsigned::type UnsignedIntType;
+    UnsignedIntType unsignedNumerator = static_cast(numerator);
+    UnsignedIntType unsignedDenominator = static_cast(denominator);
+    if (!unsignedDenominator)
+        return 0;
+    return unsignedNumerator % unsignedDenominator;
+}
+
+template
+static IntType rotateRight(IntType value, int32_t shift)
+{
+    typedef typename std::make_unsigned::type UnsignedIntType;
+    UnsignedIntType uValue = static_cast(value);
+    int32_t bits = sizeof(IntType) * 8;
+    int32_t mask = bits - 1;
+    shift &= mask;
+    return (uValue >> shift) | (uValue << ((bits - shift) & mask));
+}
+
+template
+static IntType rotateLeft(IntType value, int32_t shift)
+{
+    typedef typename std::make_unsigned::type UnsignedIntType;
+    UnsignedIntType uValue = static_cast(value);
+    int32_t bits = sizeof(IntType) * 8;
+    int32_t mask = bits - 1;
+    shift &= mask;
+    return (uValue << shift) | (uValue >> ((bits - shift) & mask));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Commutativity.cpp b/Source/JavaScriptCore/b3/B3Commutativity.cpp
new file mode 100644
index 000000000..5de43e648
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Commutativity.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Commutativity.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, Commutativity commutativity)
+{
+    switch (commutativity) {
+    case Commutative:
+        out.print("Commutative");
+        return;
+    case NotCommutative:
+        out.print("NotCommutative");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Commutativity.h b/Source/JavaScriptCore/b3/B3Commutativity.h
new file mode 100644
index 000000000..bf0de7537
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Commutativity.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+enum Commutativity {
+    Commutative,
+    NotCommutative
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::B3::Commutativity);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Compilation.cpp b/Source/JavaScriptCore/b3/B3Compilation.cpp
new file mode 100644
index 000000000..9e20a6b84
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Compilation.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Compilation.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproducts.h"
+#include "CCallHelpers.h"
+
+namespace JSC { namespace B3 {
+
+Compilation::Compilation(MacroAssemblerCodeRef codeRef, std::unique_ptr byproducts)
+    : m_codeRef(codeRef)
+    , m_byproducts(WTFMove(byproducts))
+{
+}
+
+Compilation::Compilation(Compilation&& other)
+    : m_codeRef(WTFMove(other.m_codeRef))
+    , m_byproducts(WTFMove(other.m_byproducts))
+{
+}
+
+Compilation::~Compilation()
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Compilation.h b/Source/JavaScriptCore/b3/B3Compilation.h
new file mode 100644
index 000000000..739865256
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Compilation.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "MacroAssemblerCodeRef.h"
+#include 
+#include 
+
+namespace JSC {
+
+class VM;
+
+namespace B3 {
+
+class OpaqueByproducts;
+class Procedure;
+
+// This class is a way to keep the result of a B3 compilation alive
+// and runnable.
+
+class Compilation {
+    WTF_MAKE_NONCOPYABLE(Compilation);
+    WTF_MAKE_FAST_ALLOCATED;
+
+public:
+    JS_EXPORT_PRIVATE Compilation(MacroAssemblerCodeRef, std::unique_ptr);
+    JS_EXPORT_PRIVATE Compilation(Compilation&&);
+    JS_EXPORT_PRIVATE ~Compilation();
+
+    MacroAssemblerCodePtr code() const { return m_codeRef.code(); }
+    MacroAssemblerCodeRef codeRef() const { return m_codeRef; }
+    
+    CString disassembly() const { return m_codeRef.disassembly(); }
+
+private:
+    MacroAssemblerCodeRef m_codeRef;
+    std::unique_ptr m_byproducts;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Compile.cpp b/Source/JavaScriptCore/b3/B3Compile.cpp
new file mode 100644
index 000000000..980390ac0
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Compile.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Compile.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Generate.h"
+#include "B3OpaqueByproducts.h"
+#include "B3Procedure.h"
+#include "B3TimingScope.h"
+#include "CCallHelpers.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+
+namespace JSC { namespace B3 {
+
+Compilation compile(VM& vm, Procedure& proc, unsigned optLevel)
+{
+    TimingScope timingScope("Compilation");
+    
+    prepareForGeneration(proc, optLevel);
+    
+    CCallHelpers jit(&vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(vm, jit, nullptr);
+
+    return Compilation(FINALIZE_CODE(linkBuffer, ("B3::Compilation")), proc.releaseByproducts());
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Compile.h b/Source/JavaScriptCore/b3/B3Compile.h
new file mode 100644
index 000000000..37db1608f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Compile.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Compilation.h"
+
+namespace JSC {
+
+class VM;
+
+namespace B3 {
+
+class Procedure;
+
+// This is a fool-proof API for compiling a Procedure to code and then running that code. You compile
+// a Procedure using this API by doing:
+//
+// Compilation compilation = B3::compile(vm, proc);
+//
+// Then you keep the Compilation object alive for as long as you want to be able to run the code.
+// If this API feels too high-level, you can use B3::generate() directly.
+
+JS_EXPORT_PRIVATE Compilation compile(VM&, Procedure&, unsigned optLevel = 1);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ComputeDivisionMagic.h b/Source/JavaScriptCore/b3/B3ComputeDivisionMagic.h
new file mode 100644
index 000000000..8c17ed669
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ComputeDivisionMagic.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ *
+ * This contains code taken from LLVM's APInt class. That code implements finding the magic
+ * numbers for strength-reducing division. The LLVM code on which this code is based was
+ * implemented using "Hacker's Delight", Henry S. Warren, Jr., chapter 10.
+ *
+ * ==============================================================================
+ * LLVM Release License
+ * ==============================================================================
+ * University of Illinois/NCSA
+ * Open Source License
+ * 
+ * Copyright (c) 2003-2014 University of Illinois at Urbana-Champaign.
+ * All rights reserved.
+ * 
+ * Developed by:
+ * 
+ *     LLVM Team
+ * 
+ *     University of Illinois at Urbana-Champaign
+ * 
+ *     http://llvm.org
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal with
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ * 
+ *     * Redistributions of source code must retain the above copyright notice,
+ *       this list of conditions and the following disclaimers.
+ * 
+ *     * Redistributions in binary form must reproduce the above copyright notice,
+ *       this list of conditions and the following disclaimers in the
+ *       documentation and/or other materials provided with the distribution.
+ * 
+ *     * Neither the names of the LLVM Team, University of Illinois at
+ *       Urbana-Champaign, nor the names of its contributors may be used to
+ *       endorse or promote products derived from this Software without specific
+ *       prior written permission.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+template
+struct DivisionMagic {
+    T magicMultiplier;
+    unsigned shift;
+};
+
+// This contains code taken from LLVM's APInt::magic(). It's modestly adapted to our style, but
+// not completely, to make it easier to apply their changes in the future.
+template
+DivisionMagic computeDivisionMagic(T divisor)
+{
+    typedef typename std::make_unsigned::type UnsignedT;
+    UnsignedT d = divisor;
+    unsigned p;
+    UnsignedT ad, anc, delta, q1, r1, q2, r2, t;
+    UnsignedT signedMin = static_cast(std::numeric_limits::min());
+    DivisionMagic mag;
+    unsigned bitWidth = sizeof(divisor) * 8;
+
+    // This code doesn't like to think of signedness as a type. Instead it likes to think that
+    // operations have signedness. This is how we generally do it in B3 as well. For this reason,
+    // we cast all the operated values once to unsigned. And later, we convert it to signed.
+    // Only `divisor` have signedness here.
+
+    ad = divisor < 0 ? -divisor : divisor; // -(signed min value) < signed max value. So there is no loss.
+    t = signedMin + (d >> (bitWidth - 1));
+    anc = t - 1 - (t % ad);   // absolute value of nc
+    p = bitWidth - 1;    // initialize p
+    q1 = signedMin / anc;   // initialize q1 = 2p/abs(nc)
+    r1 = signedMin - q1*anc;    // initialize r1 = rem(2p,abs(nc))
+    q2 = signedMin / ad;    // initialize q2 = 2p/abs(d)
+    r2 = signedMin - q2*ad;     // initialize r2 = rem(2p,abs(d))
+    do {
+        p = p + 1;
+        q1 = q1 << 1;          // update q1 = 2p/abs(nc)
+        r1 = r1 << 1;          // update r1 = rem(2p/abs(nc))
+        if (r1 >= anc) {  // must be unsigned comparison
+            q1 = q1 + 1;
+            r1 = r1 - anc;
+        }
+        q2 = q2 << 1;          // update q2 = 2p/abs(d)
+        r2 = r2 << 1;          // update r2 = rem(2p/abs(d))
+        if (r2 >= ad) {   // must be unsigned comparison
+            q2 = q2 + 1;
+            r2 = r2 - ad;
+        }
+        delta = ad - r2;
+    } while (q1 < delta || (q1 == delta && r1 == 0));
+
+    mag.magicMultiplier = q2 + 1;
+    if (divisor < 0)
+        mag.magicMultiplier = -mag.magicMultiplier;   // resulting magic number
+    mag.shift = p - bitWidth;          // resulting shift
+
+    return mag;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Const32Value.cpp b/Source/JavaScriptCore/b3/B3Const32Value.cpp
new file mode 100644
index 000000000..49a7453a4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Const32Value.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Const32Value.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+Const32Value::~Const32Value()
+{
+}
+
+Value* Const32Value::negConstant(Procedure& proc) const
+{
+    return proc.add(origin(), -m_value);
+}
+
+Value* Const32Value::addConstant(Procedure& proc, int32_t other) const
+{
+    return proc.add(origin(), m_value + other);
+}
+
+Value* Const32Value::addConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value + other->asInt32());
+}
+
+Value* Const32Value::subConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value - other->asInt32());
+}
+
+Value* Const32Value::mulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value * other->asInt32());
+}
+
+Value* Const32Value::checkAddConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    CheckedInt32 result = CheckedInt32(m_value) + CheckedInt32(other->asInt32());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const32Value::checkSubConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    CheckedInt32 result = CheckedInt32(m_value) - CheckedInt32(other->asInt32());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const32Value::checkMulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    CheckedInt32 result = CheckedInt32(m_value) * CheckedInt32(other->asInt32());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const32Value::checkNegConstant(Procedure& proc) const
+{
+    if (m_value == -m_value)
+        return nullptr;
+    return negConstant(proc);
+}
+
+Value* Const32Value::divConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), chillDiv(m_value, other->asInt32()));
+}
+
+Value* Const32Value::uDivConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), chillUDiv(m_value, other->asInt32()));
+}
+
+Value* Const32Value::modConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), chillMod(m_value, other->asInt32()));
+}
+
+Value* Const32Value::uModConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), chillUMod(m_value, other->asInt32()));
+}
+
+Value* Const32Value::bitAndConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value & other->asInt32());
+}
+
+Value* Const32Value::bitOrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value | other->asInt32());
+}
+
+Value* Const32Value::bitXorConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value ^ other->asInt32());
+}
+
+Value* Const32Value::shlConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value << (other->asInt32() & 31));
+}
+
+Value* Const32Value::sShrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value >> (other->asInt32() & 31));
+}
+
+Value* Const32Value::zShrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), static_cast(static_cast(m_value) >> (other->asInt32() & 31)));
+}
+
+Value* Const32Value::rotRConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), rotateRight(m_value, other->asInt32()));
+}
+
+Value* Const32Value::rotLConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), rotateLeft(m_value, other->asInt32()));
+}
+
+Value* Const32Value::bitwiseCastConstant(Procedure& proc) const
+{
+    return proc.add(origin(), bitwise_cast(m_value));
+}
+
+Value* Const32Value::iToDConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+Value* Const32Value::iToFConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+TriState Const32Value::equalConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value == other->asInt32());
+}
+
+TriState Const32Value::notEqualConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value != other->asInt32());
+}
+
+TriState Const32Value::lessThanConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value < other->asInt32());
+}
+
+TriState Const32Value::greaterThanConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value > other->asInt32());
+}
+
+TriState Const32Value::lessEqualConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value <= other->asInt32());
+}
+
+TriState Const32Value::greaterEqualConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value >= other->asInt32());
+}
+
+TriState Const32Value::aboveConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(static_cast(m_value) > static_cast(other->asInt32()));
+}
+
+TriState Const32Value::belowConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(static_cast(m_value) < static_cast(other->asInt32()));
+}
+
+TriState Const32Value::aboveEqualConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(static_cast(m_value) >= static_cast(other->asInt32()));
+}
+
+TriState Const32Value::belowEqualConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(static_cast(m_value) <= static_cast(other->asInt32()));
+}
+
+void Const32Value::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, m_value);
+}
+
+Value* Const32Value::cloneImpl() const
+{
+    return new Const32Value(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Const32Value.h b/Source/JavaScriptCore/b3/B3Const32Value.h
new file mode 100644
index 000000000..af4d08b05
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Const32Value.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE Const32Value : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Const32; }
+    
+    ~Const32Value();
+    
+    int32_t value() const { return m_value; }
+
+    Value* negConstant(Procedure&) const override;
+    Value* addConstant(Procedure&, int32_t other) const override;
+    Value* addConstant(Procedure&, const Value* other) const override;
+    Value* subConstant(Procedure&, const Value* other) const override;
+    Value* mulConstant(Procedure&, const Value* other) const override;
+    Value* checkAddConstant(Procedure&, const Value* other) const override;
+    Value* checkSubConstant(Procedure&, const Value* other) const override;
+    Value* checkMulConstant(Procedure&, const Value* other) const override;
+    Value* checkNegConstant(Procedure&) const override;
+    Value* divConstant(Procedure&, const Value* other) const override;
+    Value* uDivConstant(Procedure&, const Value* other) const override;
+    Value* modConstant(Procedure&, const Value* other) const override;
+    Value* uModConstant(Procedure&, const Value* other) const override;
+    Value* bitAndConstant(Procedure&, const Value* other) const override;
+    Value* bitOrConstant(Procedure&, const Value* other) const override;
+    Value* bitXorConstant(Procedure&, const Value* other) const override;
+    Value* shlConstant(Procedure&, const Value* other) const override;
+    Value* sShrConstant(Procedure&, const Value* other) const override;
+    Value* zShrConstant(Procedure&, const Value* other) const override;
+    Value* rotRConstant(Procedure&, const Value* other) const override;
+    Value* rotLConstant(Procedure&, const Value* other) const override;
+    Value* bitwiseCastConstant(Procedure&) const override;
+    Value* iToDConstant(Procedure&) const override;
+    Value* iToFConstant(Procedure&) const override;
+
+    TriState equalConstant(const Value* other) const override;
+    TriState notEqualConstant(const Value* other) const override;
+    TriState lessThanConstant(const Value* other) const override;
+    TriState greaterThanConstant(const Value* other) const override;
+    TriState lessEqualConstant(const Value* other) const override;
+    TriState greaterEqualConstant(const Value* other) const override;
+    TriState aboveConstant(const Value* other) const override;
+    TriState belowConstant(const Value* other) const override;
+    TriState aboveEqualConstant(const Value* other) const override;
+    TriState belowEqualConstant(const Value* other) const override;
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+    friend class Procedure;
+
+    Const32Value(Origin origin, int32_t value)
+        : Value(CheckedOpcode, Const32, Int32, origin)
+        , m_value(value)
+    {
+    }
+
+private:
+    int32_t m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Const64Value.cpp b/Source/JavaScriptCore/b3/B3Const64Value.cpp
new file mode 100644
index 000000000..4f7b86b2e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Const64Value.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Const64Value.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+Const64Value::~Const64Value()
+{
+}
+
+Value* Const64Value::negConstant(Procedure& proc) const
+{
+    return proc.add(origin(), -m_value);
+}
+
+Value* Const64Value::addConstant(Procedure& proc, int32_t other) const
+{
+    return proc.add(origin(), m_value + static_cast(other));
+}
+
+Value* Const64Value::addConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value + other->asInt64());
+}
+
+Value* Const64Value::subConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value - other->asInt64());
+}
+
+Value* Const64Value::mulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value * other->asInt64());
+}
+
+Value* Const64Value::checkAddConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    CheckedInt64 result = CheckedInt64(m_value) + CheckedInt64(other->asInt64());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const64Value::checkSubConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    CheckedInt64 result = CheckedInt64(m_value) - CheckedInt64(other->asInt64());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const64Value::checkMulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    CheckedInt64 result = CheckedInt64(m_value) * CheckedInt64(other->asInt64());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const64Value::checkNegConstant(Procedure& proc) const
+{
+    if (m_value == -m_value)
+        return nullptr;
+    return negConstant(proc);
+}
+
+Value* Const64Value::divConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), chillDiv(m_value, other->asInt64()));
+}
+
+Value* Const64Value::uDivConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), chillUDiv(m_value, other->asInt64()));
+}
+
+Value* Const64Value::modConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), chillMod(m_value, other->asInt64()));
+}
+
+Value* Const64Value::uModConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), chillUMod(m_value, other->asInt64()));
+}
+
+Value* Const64Value::bitAndConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value & other->asInt64());
+}
+
+Value* Const64Value::bitOrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value | other->asInt64());
+}
+
+Value* Const64Value::bitXorConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value ^ other->asInt64());
+}
+
+Value* Const64Value::shlConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value << (other->asInt32() & 63));
+}
+
+Value* Const64Value::sShrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value >> (other->asInt32() & 63));
+}
+
+Value* Const64Value::zShrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), static_cast(static_cast(m_value) >> (other->asInt32() & 63)));
+}
+
+Value* Const64Value::rotRConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), rotateRight(m_value, other->asInt32()));
+}
+
+Value* Const64Value::rotLConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), rotateLeft(m_value, other->asInt32()));
+}
+
+Value* Const64Value::bitwiseCastConstant(Procedure& proc) const
+{
+    return proc.add(origin(), bitwise_cast(m_value));
+}
+
+Value* Const64Value::iToDConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+Value* Const64Value::iToFConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+TriState Const64Value::equalConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value == other->asInt64());
+}
+
+TriState Const64Value::notEqualConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value != other->asInt64());
+}
+
+TriState Const64Value::lessThanConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value < other->asInt64());
+}
+
+TriState Const64Value::greaterThanConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value > other->asInt64());
+}
+
+TriState Const64Value::lessEqualConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value <= other->asInt64());
+}
+
+TriState Const64Value::greaterEqualConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value >= other->asInt64());
+}
+
+TriState Const64Value::aboveConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(static_cast(m_value) > static_cast(other->asInt64()));
+}
+
+TriState Const64Value::belowConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(static_cast(m_value) < static_cast(other->asInt64()));
+}
+
+TriState Const64Value::aboveEqualConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(static_cast(m_value) >= static_cast(other->asInt64()));
+}
+
+TriState Const64Value::belowEqualConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(static_cast(m_value) <= static_cast(other->asInt64()));
+}
+
+void Const64Value::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, m_value);
+}
+
+Value* Const64Value::cloneImpl() const
+{
+    return new Const64Value(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Const64Value.h b/Source/JavaScriptCore/b3/B3Const64Value.h
new file mode 100644
index 000000000..3efd55847
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Const64Value.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE Const64Value : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Const64; }
+    
+    ~Const64Value();
+    
+    int64_t value() const { return m_value; }
+
+    Value* negConstant(Procedure&) const override;
+    Value* addConstant(Procedure&, int32_t other) const override;
+    Value* addConstant(Procedure&, const Value* other) const override;
+    Value* subConstant(Procedure&, const Value* other) const override;
+    Value* mulConstant(Procedure&, const Value* other) const override;
+    Value* checkAddConstant(Procedure&, const Value* other) const override;
+    Value* checkSubConstant(Procedure&, const Value* other) const override;
+    Value* checkMulConstant(Procedure&, const Value* other) const override;
+    Value* checkNegConstant(Procedure&) const override;
+    Value* divConstant(Procedure&, const Value* other) const override;
+    Value* uDivConstant(Procedure&, const Value* other) const override;
+    Value* modConstant(Procedure&, const Value* other) const override;
+    Value* uModConstant(Procedure&, const Value* other) const override;
+    Value* bitAndConstant(Procedure&, const Value* other) const override;
+    Value* bitOrConstant(Procedure&, const Value* other) const override;
+    Value* bitXorConstant(Procedure&, const Value* other) const override;
+    Value* shlConstant(Procedure&, const Value* other) const override;
+    Value* sShrConstant(Procedure&, const Value* other) const override;
+    Value* zShrConstant(Procedure&, const Value* other) const override;
+    Value* rotRConstant(Procedure&, const Value* other) const override;
+    Value* rotLConstant(Procedure&, const Value* other) const override;
+    Value* bitwiseCastConstant(Procedure&) const override;
+    Value* iToDConstant(Procedure&) const override;
+    Value* iToFConstant(Procedure&) const override;
+
+    TriState equalConstant(const Value* other) const override;
+    TriState notEqualConstant(const Value* other) const override;
+    TriState lessThanConstant(const Value* other) const override;
+    TriState greaterThanConstant(const Value* other) const override;
+    TriState lessEqualConstant(const Value* other) const override;
+    TriState greaterEqualConstant(const Value* other) const override;
+    TriState aboveConstant(const Value* other) const override;
+    TriState belowConstant(const Value* other) const override;
+    TriState aboveEqualConstant(const Value* other) const override;
+    TriState belowEqualConstant(const Value* other) const override;
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+    friend class Procedure;
+
+    Const64Value(Origin origin, int64_t value)
+        : Value(CheckedOpcode, Const64, Int64, origin)
+        , m_value(value)
+    {
+    }
+    
+private:
+    int64_t m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstDoubleValue.cpp b/Source/JavaScriptCore/b3/B3ConstDoubleValue.cpp
new file mode 100644
index 000000000..0a7d7482c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstDoubleValue.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ConstDoubleValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ConstFloatValue.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+ConstDoubleValue::~ConstDoubleValue()
+{
+}
+
+Value* ConstDoubleValue::negConstant(Procedure& proc) const
+{
+    return proc.add(origin(), -m_value);
+}
+
+Value* ConstDoubleValue::addConstant(Procedure& proc, int32_t other) const
+{
+    return proc.add(origin(), m_value + static_cast(other));
+}
+
+Value* ConstDoubleValue::addConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    return proc.add(origin(), m_value + other->asDouble());
+}
+
+Value* ConstDoubleValue::subConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    return proc.add(origin(), m_value - other->asDouble());
+}
+
+Value* ConstDoubleValue::mulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    return proc.add(origin(), m_value * other->asDouble());
+}
+
+Value* ConstDoubleValue::bitAndConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    double result = bitwise_cast(bitwise_cast(m_value) & bitwise_cast(other->asDouble()));
+    return proc.add(origin(), result);
+}
+
+Value* ConstDoubleValue::bitOrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    double result = bitwise_cast(bitwise_cast(m_value) | bitwise_cast(other->asDouble()));
+    return proc.add(origin(), result);
+}
+
+Value* ConstDoubleValue::bitXorConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    double result = bitwise_cast(bitwise_cast(m_value) ^ bitwise_cast(other->asDouble()));
+    return proc.add(origin(), result);
+}
+
+
+Value* ConstDoubleValue::bitwiseCastConstant(Procedure& proc) const
+{
+    return proc.add(origin(), bitwise_cast(m_value));
+}
+
+Value* ConstDoubleValue::doubleToFloatConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+Value* ConstDoubleValue::absConstant(Procedure& proc) const
+{
+    return proc.add(origin(), fabs(m_value));
+}
+
+Value* ConstDoubleValue::ceilConstant(Procedure& proc) const
+{
+    return proc.add(origin(), ceil(m_value));
+}
+
+Value* ConstDoubleValue::floorConstant(Procedure& proc) const
+{
+    return proc.add(origin(), floor(m_value));
+}
+
+Value* ConstDoubleValue::sqrtConstant(Procedure& proc) const
+{
+    return proc.add(origin(), sqrt(m_value));
+}
+
+Value* ConstDoubleValue::divConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    return proc.add(origin(), m_value / other->asDouble());
+}
+
+Value* ConstDoubleValue::modConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    return proc.add(origin(), fmod(m_value, other->asDouble()));
+}
+
+TriState ConstDoubleValue::equalConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value == other->asDouble());
+}
+
+TriState ConstDoubleValue::notEqualConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value != other->asDouble());
+}
+
+TriState ConstDoubleValue::lessThanConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value < other->asDouble());
+}
+
+TriState ConstDoubleValue::greaterThanConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value > other->asDouble());
+}
+
+TriState ConstDoubleValue::lessEqualConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value <= other->asDouble());
+}
+
+TriState ConstDoubleValue::greaterEqualConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value >= other->asDouble());
+}
+
+TriState ConstDoubleValue::equalOrUnorderedConstant(const Value* other) const
+{
+    if (std::isnan(m_value))
+        return TrueTriState;
+
+    if (!other->hasDouble())
+        return MixedTriState;
+    double otherValue = other->asDouble();
+    return triState(std::isunordered(m_value, otherValue) || m_value == otherValue);
+}
+
+void ConstDoubleValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma);
+    out.printf("%le", m_value);
+}
+
+Value* ConstDoubleValue::cloneImpl() const
+{
+    return new ConstDoubleValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstDoubleValue.h b/Source/JavaScriptCore/b3/B3ConstDoubleValue.h
new file mode 100644
index 000000000..fdfaddc1e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstDoubleValue.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE ConstDoubleValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == ConstDouble; }
+    
+    ~ConstDoubleValue();
+    
+    double value() const { return m_value; }
+
+    Value* negConstant(Procedure&) const override;
+    Value* addConstant(Procedure&, int32_t other) const override;
+    Value* addConstant(Procedure&, const Value* other) const override;
+    Value* subConstant(Procedure&, const Value* other) const override;
+    Value* divConstant(Procedure&, const Value* other) const override;
+    Value* modConstant(Procedure&, const Value* other) const override;
+    Value* mulConstant(Procedure&, const Value* other) const override;
+    Value* bitAndConstant(Procedure&, const Value* other) const override;
+    Value* bitOrConstant(Procedure&, const Value* other) const override;
+    Value* bitXorConstant(Procedure&, const Value* other) const override;
+    Value* bitwiseCastConstant(Procedure&) const override;
+    Value* doubleToFloatConstant(Procedure&) const override;
+    Value* absConstant(Procedure&) const override;
+    Value* ceilConstant(Procedure&) const override;
+    Value* floorConstant(Procedure&) const override;
+    Value* sqrtConstant(Procedure&) const override;
+
+    TriState equalConstant(const Value* other) const override;
+    TriState notEqualConstant(const Value* other) const override;
+    TriState lessThanConstant(const Value* other) const override;
+    TriState greaterThanConstant(const Value* other) const override;
+    TriState lessEqualConstant(const Value* other) const override;
+    TriState greaterEqualConstant(const Value* other) const override;
+    TriState equalOrUnorderedConstant(const Value* other) const override;
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    ConstDoubleValue(Origin origin, double value)
+        : Value(CheckedOpcode, ConstDouble, Double, origin)
+        , m_value(value)
+    {
+    }
+    
+    double m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstFloatValue.cpp b/Source/JavaScriptCore/b3/B3ConstFloatValue.cpp
new file mode 100644
index 000000000..76facae32
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstFloatValue.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ConstFloatValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ConstDoubleValue.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+ConstFloatValue::~ConstFloatValue()
+{
+}
+
+Value* ConstFloatValue::negConstant(Procedure& proc) const
+{
+    return proc.add(origin(), -m_value);
+}
+
+Value* ConstFloatValue::addConstant(Procedure& proc, int32_t other) const
+{
+    return proc.add(origin(), m_value + static_cast(other));
+}
+
+Value* ConstFloatValue::addConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    return proc.add(origin(), m_value + other->asFloat());
+}
+
+Value* ConstFloatValue::subConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    return proc.add(origin(), m_value - other->asFloat());
+}
+
+Value* ConstFloatValue::mulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    return proc.add(origin(), m_value * other->asFloat());
+}
+
+Value* ConstFloatValue::bitAndConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    float result = bitwise_cast(bitwise_cast(m_value) & bitwise_cast(other->asFloat()));
+    return proc.add(origin(), result);
+}
+
+Value* ConstFloatValue::bitOrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    float result = bitwise_cast(bitwise_cast(m_value) | bitwise_cast(other->asFloat()));
+    return proc.add(origin(), result);
+}
+
+Value* ConstFloatValue::bitXorConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    float result = bitwise_cast(bitwise_cast(m_value) ^ bitwise_cast(other->asFloat()));
+    return proc.add(origin(), result);
+}
+
+Value* ConstFloatValue::bitwiseCastConstant(Procedure& proc) const
+{
+    return proc.add(origin(), bitwise_cast(m_value));
+}
+
+Value* ConstFloatValue::floatToDoubleConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+Value* ConstFloatValue::absConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(fabs(m_value)));
+}
+
+Value* ConstFloatValue::ceilConstant(Procedure& proc) const
+{
+    return proc.add(origin(), ceilf(m_value));
+}
+
+Value* ConstFloatValue::floorConstant(Procedure& proc) const
+{
+    return proc.add(origin(), floorf(m_value));
+}
+
+Value* ConstFloatValue::sqrtConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(sqrt(m_value)));
+}
+
+Value* ConstFloatValue::divConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    return proc.add(origin(), m_value / other->asFloat());
+}
+
+TriState ConstFloatValue::equalConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value == other->asFloat());
+}
+
+TriState ConstFloatValue::notEqualConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value != other->asFloat());
+}
+
+TriState ConstFloatValue::lessThanConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value < other->asFloat());
+}
+
+TriState ConstFloatValue::greaterThanConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value > other->asFloat());
+}
+
+TriState ConstFloatValue::lessEqualConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value <= other->asFloat());
+}
+
+TriState ConstFloatValue::greaterEqualConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value >= other->asFloat());
+}
+
+void ConstFloatValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma);
+    out.printf("%le", m_value);
+}
+
+Value* ConstFloatValue::cloneImpl() const
+{
+    return new ConstFloatValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstFloatValue.h b/Source/JavaScriptCore/b3/B3ConstFloatValue.h
new file mode 100644
index 000000000..185583c07
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstFloatValue.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE ConstFloatValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == ConstFloat; }
+
+    ~ConstFloatValue();
+
+    float value() const { return m_value; }
+
+    Value* negConstant(Procedure&) const override;
+    Value* addConstant(Procedure&, int32_t other) const override;
+    Value* addConstant(Procedure&, const Value* other) const override;
+    Value* subConstant(Procedure&, const Value* other) const override;
+    Value* divConstant(Procedure&, const Value* other) const override;
+    Value* mulConstant(Procedure&, const Value* other) const override;
+    Value* bitAndConstant(Procedure&, const Value* other) const override;
+    Value* bitOrConstant(Procedure&, const Value* other) const override;
+    Value* bitXorConstant(Procedure&, const Value* other) const override;
+    Value* bitwiseCastConstant(Procedure&) const override;
+    Value* floatToDoubleConstant(Procedure&) const override;
+    Value* absConstant(Procedure&) const override;
+    Value* ceilConstant(Procedure&) const override;
+    Value* floorConstant(Procedure&) const override;
+    Value* sqrtConstant(Procedure&) const override;
+
+    TriState equalConstant(const Value* other) const override;
+    TriState notEqualConstant(const Value* other) const override;
+    TriState lessThanConstant(const Value* other) const override;
+    TriState greaterThanConstant(const Value* other) const override;
+    TriState lessEqualConstant(const Value* other) const override;
+    TriState greaterEqualConstant(const Value* other) const override;
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    ConstFloatValue(Origin origin, float value)
+        : Value(CheckedOpcode, ConstFloat, Float, origin)
+        , m_value(value)
+    {
+    }
+
+    float m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstPtrValue.h b/Source/JavaScriptCore/b3/B3ConstPtrValue.h
new file mode 100644
index 000000000..78bcba39b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstPtrValue.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Const32Value.h"
+#include "B3Const64Value.h"
+
+namespace JSC { namespace B3 {
+
+// Usually you want to use Const32Value or Const64Value directly. But this is useful for writing
+// platform-agnostic code. Note that a ConstPtrValue will behave like either a Const32Value or
+// Const64Value depending on platform.
+
+#if USE(JSVALUE64)
+typedef Const64Value ConstPtrValueBase;
+#else
+typedef Const32Value ConstPtrValueBase;
+#endif
+
+class ConstPtrValue : public ConstPtrValueBase {
+public:
+    void* value() const
+    {
+        return bitwise_cast(ConstPtrValueBase::value());
+    }
+
+private:
+    friend class Procedure;
+
+    template
+    ConstPtrValue(Origin origin, T* pointer)
+        : ConstPtrValueBase(origin, bitwise_cast(pointer))
+    {
+    }
+    template
+    ConstPtrValue(Origin origin, T pointer)
+        : ConstPtrValueBase(origin, static_cast(pointer))
+    {
+    }
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstrainedValue.cpp b/Source/JavaScriptCore/b3/B3ConstrainedValue.cpp
new file mode 100644
index 000000000..dd1762ff2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstrainedValue.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ConstrainedValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+void ConstrainedValue::dump(PrintStream& out) const
+{
+    out.print(pointerDump(m_value), ":", m_rep);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3ConstrainedValue.h b/Source/JavaScriptCore/b3/B3ConstrainedValue.h
new file mode 100644
index 000000000..d2cd31fe1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstrainedValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3ValueRep.h"
+
+namespace JSC { namespace B3 {
+
+class Value;
+
+class ConstrainedValue {
+public:
+    ConstrainedValue()
+    {
+    }
+
+    ConstrainedValue(Value* value)
+        : m_value(value)
+        , m_rep(ValueRep::WarmAny)
+    {
+    }
+
+    ConstrainedValue(Value* value, const ValueRep& rep)
+        : m_value(value)
+        , m_rep(rep)
+    {
+    }
+
+    explicit operator bool() const { return m_value || m_rep; }
+
+    Value* value() const { return m_value; }
+    const ValueRep& rep() const { return m_rep; }
+
+    void dump(PrintStream& out) const;
+
+private:
+    Value* m_value;
+    ValueRep m_rep;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3DataSection.cpp b/Source/JavaScriptCore/b3/B3DataSection.cpp
new file mode 100644
index 000000000..f4e68bca2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3DataSection.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3DataSection.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+DataSection::DataSection(size_t size)
+    : m_data(fastZeroedMalloc(size))
+    , m_size(size)
+{
+}
+
+DataSection::~DataSection()
+{
+    fastFree(m_data);
+}
+
+void DataSection::dump(PrintStream& out) const
+{
+    out.print("DataSection at ", RawPointer(m_data), " with ", m_size, " bytes.");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3DataSection.h b/Source/JavaScriptCore/b3/B3DataSection.h
new file mode 100644
index 000000000..0bca40ed4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3DataSection.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproduct.h"
+
+namespace JSC { namespace B3 {
+
+class DataSection : public OpaqueByproduct {
+public:
+    DataSection(size_t size);
+    virtual ~DataSection();
+
+    void* data() const { return m_data; }
+    size_t size() const { return m_size; }
+
+    void dump(PrintStream&) const override;
+
+private:
+    void* m_data;
+    size_t m_size;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Dominators.h b/Source/JavaScriptCore/b3/B3Dominators.h
new file mode 100644
index 000000000..4a9d085f1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Dominators.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CFG.h"
+#include "B3Procedure.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class Dominators : public WTF::Dominators {
+    WTF_MAKE_NONCOPYABLE(Dominators);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    Dominators(Procedure& proc)
+        : WTF::Dominators(proc.cfg())
+    {
+    }
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3DuplicateTails.cpp b/Source/JavaScriptCore/b3/B3DuplicateTails.cpp
new file mode 100644
index 000000000..fe94a607d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3DuplicateTails.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3DuplicateTails.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BreakCriticalEdges.h"
+#include "B3Dominators.h"
+#include "B3FixSSA.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+class DuplicateTails {
+public:
+    DuplicateTails(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+        , m_maxSize(Options::maxB3TailDupBlockSize())
+        , m_maxSuccessors(Options::maxB3TailDupBlockSuccessors())
+    {
+    }
+
+    void run()
+    {
+        // Breaking critical edges introduces blocks that jump to things. Those Jumps' successors
+        // become candidates for tail duplication. Prior to critical edge breaking, some of those
+        // Jumps would have been Branches, and so no tail duplication would have happened.
+        breakCriticalEdges(m_proc);
+        
+        // Find blocks that would be candidates for tail duplication. They must be small enough
+        // and they much not have too many successors.
+
+        m_proc.resetValueOwners();
+
+        IndexSet candidates;
+
+        for (BasicBlock* block : m_proc) {
+            if (block->size() > m_maxSize)
+                continue;
+            if (block->numSuccessors() > m_maxSuccessors)
+                continue;
+            if (block->last()->type() != Void) // Demoting doesn't handle terminals with values.
+                continue;
+
+            candidates.add(block);
+        }
+
+        // Collect the set of values that must be de-SSA'd.
+        IndexSet valuesToDemote;
+        for (BasicBlock* block : m_proc) {
+            for (Value* value : *block) {
+                if (value->opcode() == Phi && candidates.contains(block))
+                    valuesToDemote.add(value);
+                for (Value* child : value->children()) {
+                    if (child->owner != block && candidates.contains(child->owner))
+                        valuesToDemote.add(child);
+                }
+            }
+        }
+        demoteValues(m_proc, valuesToDemote);
+        if (verbose) {
+            dataLog("Procedure after value demotion:\n");
+            dataLog(m_proc);
+        }
+
+        for (BasicBlock* block : m_proc) {
+            if (block->last()->opcode() != Jump)
+                continue;
+
+            BasicBlock* tail = block->successorBlock(0);
+            if (!candidates.contains(tail))
+                continue;
+
+            // Don't tail duplicate a trivial self-loop, because the code below can't handle block and
+            // tail being the same block.
+            if (block == tail)
+                continue;
+
+            // We're about to change 'block'. Make sure that nobody duplicates block after this
+            // point.
+            candidates.remove(block);
+
+            if (verbose)
+                dataLog("Duplicating ", *tail, " into ", *block, "\n");
+
+            block->removeLast(m_proc);
+
+            HashMap map;
+            for (Value* value : *tail) {
+                Value* clone = m_proc.clone(value);
+                for (Value*& child : clone->children()) {
+                    if (Value* replacement = map.get(child))
+                        child = replacement;
+                }
+                if (value->type() != Void)
+                    map.add(value, clone);
+                block->append(clone);
+            }
+            block->successors() = tail->successors();
+        }
+
+        m_proc.resetReachability();
+        m_proc.invalidateCFG();
+    }
+    
+private:
+
+    Procedure& m_proc;
+    InsertionSet m_insertionSet;
+    unsigned m_maxSize;
+    unsigned m_maxSuccessors;
+};
+
+} // anonymous namespace
+
+void duplicateTails(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "duplicateTails");
+    DuplicateTails duplicateTails(proc);
+    duplicateTails.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3DuplicateTails.h b/Source/JavaScriptCore/b3/B3DuplicateTails.h
new file mode 100644
index 000000000..443adafb6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3DuplicateTails.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Replaces jumps to tiny basic blocks with the contents of those basic blocks. Also simplifies
+// branches that are path-redundant. Does not do a fixpoint, because it does not have a good way
+// of detecting termination.
+
+void duplicateTails(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Effects.cpp b/Source/JavaScriptCore/b3/B3Effects.cpp
new file mode 100644
index 000000000..aeda46f83
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Effects.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Effects.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+// These helpers cascade in such a way that after the helper for terminal, we don't have to worry
+// about terminal again, since the terminal case considers all ways that a terminal may interfere
+// with something else. And after the exit sideways case, we don't have to worry about either
+// exitsSideways or terminal. And so on...
+
+bool interferesWithTerminal(const Effects& terminal, const Effects& other)
+{
+    if (!terminal.terminal)
+        return false;
+    return other.terminal || other.controlDependent || other.writesLocalState || other.writes || other.writesPinned;
+}
+
+bool interferesWithExitSideways(const Effects& exitsSideways, const Effects& other)
+{
+    if (!exitsSideways.exitsSideways)
+        return false;
+    return other.controlDependent || other.writes || other.writesPinned;
+}
+
+bool interferesWithWritesLocalState(const Effects& writesLocalState, const Effects& other)
+{
+    if (!writesLocalState.writesLocalState)
+        return false;
+    return other.writesLocalState || other.readsLocalState;
+}
+
+bool interferesWithWritesPinned(const Effects& writesPinned, const Effects& other)
+{
+    if (!writesPinned.writesPinned)
+        return false;
+    return other.writesPinned || other.readsPinned;
+}
+
+} // anonymous namespace
+
+bool Effects::interferes(const Effects& other) const
+{
+    return interferesWithTerminal(*this, other)
+        || interferesWithTerminal(other, *this)
+        || interferesWithExitSideways(*this, other)
+        || interferesWithExitSideways(other, *this)
+        || interferesWithWritesLocalState(*this, other)
+        || interferesWithWritesLocalState(other, *this)
+        || interferesWithWritesPinned(*this, other)
+        || interferesWithWritesPinned(other, *this)
+        || writes.overlaps(other.writes)
+        || writes.overlaps(other.reads)
+        || reads.overlaps(other.writes);
+}
+
+bool Effects::operator==(const Effects& other) const
+{
+    return terminal == other.terminal
+        && exitsSideways == other.exitsSideways
+        && controlDependent == other.controlDependent
+        && writesLocalState == other.writesLocalState
+        && readsLocalState == other.readsLocalState
+        && writesPinned == other.writesPinned
+        && readsPinned == other.readsPinned
+        && writes == other.writes
+        && reads == other.reads;
+}
+
+bool Effects::operator!=(const Effects& other) const
+{
+    return !(*this == other);
+}
+
+void Effects::dump(PrintStream& out) const
+{
+    CommaPrinter comma("|");
+    if (terminal)
+        out.print(comma, "Terminal");
+    if (exitsSideways)
+        out.print(comma, "ExitsSideways");
+    if (controlDependent)
+        out.print(comma, "ControlDependent");
+    if (writesLocalState)
+        out.print(comma, "WritesLocalState");
+    if (readsLocalState)
+        out.print(comma, "ReadsLocalState");
+    if (writesPinned)
+        out.print(comma, "WritesPinned");
+    if (readsPinned)
+        out.print(comma, "ReadsPinned");
+    if (writes)
+        out.print(comma, "Writes:", writes);
+    if (reads)
+        out.print(comma, "Reads:", reads);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Effects.h b/Source/JavaScriptCore/b3/B3Effects.h
new file mode 100644
index 000000000..7a088535b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Effects.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+struct Effects {
+    // True if this cannot continue execution in the current block.
+    bool terminal { false };
+
+    // True if this value can cause execution to terminate abruptly, and that this abrupt termination is
+    // observable. An example of how this gets used is to limit the hoisting of controlDependent values.
+    // Note that if exitsSideways is set to true but reads is bottom, then B3 is free to assume that
+    // after abrupt termination of this procedure, none of the heap will be read. That's usually false,
+    // so make sure that reads corresponds to the set of things that are readable after this function
+    // terminates abruptly.
+    bool exitsSideways { false };
+
+    // True if the instruction may change semantics if hoisted above some control flow. For example,
+    // loads are usually control-dependent because we must assume that any control construct (either
+    // a terminal like Branch or anything that exits sideways, like Check) validates whether the
+    // pointer is valid. Hoisting the load above control may cause the load to trap even though it
+    // would not have otherwise trapped.
+    bool controlDependent { false };
+
+    // True if this writes to the local state. Operations that write local state don't write to anything
+    // in "memory" but they have a side-effect anyway. This is for modeling Upsilons, Sets, and Fences.
+    // This is a way of saying: even though this operation is not a terminal, does not exit sideways,
+    // and does not write to the heap, you still cannot kill this operation.
+    bool writesLocalState { false };
+
+    // True if this reads from the local state. This is only used for Phi and Get.
+    bool readsLocalState { false };
+
+    // B3 understands things about pinned registers. Therefore, it needs to know who reads them and
+    // who writes them. We don't track this on a per-register basis because that would be harder and
+    // we don't need it. Note that if you want to construct an immutable pinned register while also
+    // having other pinned registers that are mutable, then you can use ArgumentReg. Also note that
+    // nobody will stop you from making this get out-of-sync with your clobbered register sets in
+    // Patchpoint. It's recommended that you err on the side of being conservative.
+    // FIXME: Explore making these be RegisterSets. That's mainly hard because it would be awkward to
+    // reconcile with StackmapValue's support for clobbered regs.
+    // https://bugs.webkit.org/show_bug.cgi?id=163173
+    bool readsPinned { false };
+    bool writesPinned { false };
+
+    HeapRange writes;
+    HeapRange reads;
+    
+    static Effects none()
+    {
+        return Effects();
+    }
+
+    static Effects forCall()
+    {
+        Effects result;
+        result.exitsSideways = true;
+        result.controlDependent = true;
+        result.writes = HeapRange::top();
+        result.reads = HeapRange::top();
+        result.readsPinned = true;
+        result.writesPinned = true;
+        return result;
+    }
+
+    static Effects forCheck()
+    {
+        Effects result;
+        result.exitsSideways = true;
+        // The program could read anything after exiting, and it's on us to declare this.
+        result.reads = HeapRange::top();
+        return result;
+    }
+
+    bool mustExecute() const
+    {
+        return terminal || exitsSideways || writesLocalState || writes || writesPinned;
+    }
+
+    // Returns true if reordering instructions with these respective effects would change program
+    // behavior in an observable way.
+    bool interferes(const Effects&) const;
+    
+    JS_EXPORT_PRIVATE bool operator==(const Effects&) const;
+    JS_EXPORT_PRIVATE bool operator!=(const Effects&) const;
+
+    JS_EXPORT_PRIVATE void dump(PrintStream& out) const;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.cpp b/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.cpp
new file mode 100644
index 000000000..feaacdac4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.cpp
@@ -0,0 +1,703 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3EliminateCommonSubexpressions.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockWorklist.h"
+#include "B3Dominators.h"
+#include "B3HeapRange.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3PureCSE.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3ValueKey.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include "DFGGraph.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+// FIXME: We could treat Patchpoints with a non-empty set of reads as a "memory value" and somehow
+// eliminate redundant ones. We would need some way of determining if two patchpoints are replacable.
+// It doesn't seem right to use the reads set for this. We could use the generator, but that feels
+// lame because the FTL will pretty much use a unique generator for each patchpoint even when two
+// patchpoints have the same semantics as far as CSE would be concerned. We could invent something
+// like a "value ID" for patchpoints. By default, each one gets a unique value ID, but FTL could force
+// some patchpoints to share the same one as a signal that they will return the same value if executed
+// in the same heap with the same inputs.
+
+typedef Vector MemoryMatches;
+
+class MemoryValueMap {
+public:
+    MemoryValueMap() { }
+
+    void add(MemoryValue* memory)
+    {
+        Matches& matches = m_map.add(memory->lastChild(), Matches()).iterator->value;
+        if (matches.contains(memory))
+            return;
+        matches.append(memory);
+    }
+
+    template
+    void removeIf(const Functor& functor)
+    {
+        m_map.removeIf(
+            [&] (HashMap::KeyValuePairType& entry) -> bool {
+                entry.value.removeAllMatching(
+                    [&] (Value* value) -> bool {
+                        if (MemoryValue* memory = value->as())
+                            return functor(memory);
+                        return true;
+                    });
+                return entry.value.isEmpty();
+            });
+    }
+
+    Matches* find(Value* ptr)
+    {
+        auto iter = m_map.find(ptr);
+        if (iter == m_map.end())
+            return nullptr;
+        return &iter->value;
+    }
+
+    template
+    MemoryValue* find(Value* ptr, const Functor& functor)
+    {
+        if (Matches* matches = find(ptr)) {
+            for (Value* candidateValue : *matches) {
+                if (MemoryValue* candidateMemory = candidateValue->as()) {
+                    if (functor(candidateMemory))
+                        return candidateMemory;
+                }
+            }
+        }
+        return nullptr;
+    }
+
+    void dump(PrintStream& out) const
+    {
+        out.print("{");
+        CommaPrinter comma;
+        for (auto& entry : m_map)
+            out.print(comma, pointerDump(entry.key), "=>", pointerListDump(entry.value));
+        out.print("}");
+    }
+    
+private:
+    // This uses Matches for two reasons:
+    // - It cannot be a MemoryValue* because the key is imprecise. Many MemoryValues could have the
+    //   same key while being unaliased.
+    // - It can't be a MemoryMatches array because the MemoryValue*'s could be turned into Identity's.
+    HashMap m_map;
+};
+
+struct ImpureBlockData {
+    void dump(PrintStream& out) const
+    {
+        out.print(
+            "{reads = ", reads, ", writes = ", writes, ", storesAtHead = ", storesAtHead,
+            ", memoryValuesAtTail = ", memoryValuesAtTail, "}");
+    }
+
+    RangeSet reads; // This only gets used for forward store elimination.
+    RangeSet writes; // This gets used for both load and store elimination.
+
+    MemoryValueMap storesAtHead;
+    MemoryValueMap memoryValuesAtTail;
+};
+
+class CSE {
+public:
+    CSE(Procedure& proc)
+        : m_proc(proc)
+        , m_dominators(proc.dominators())
+        , m_impureBlockData(proc.size())
+        , m_insertionSet(proc)
+    {
+    }
+
+    bool run()
+    {
+        if (verbose)
+            dataLog("B3 before CSE:\n", m_proc);
+        
+        m_proc.resetValueOwners();
+
+        // Summarize the impure effects of each block, and the impure values available at the end of
+        // each block. This doesn't edit code yet.
+        for (BasicBlock* block : m_proc) {
+            ImpureBlockData& data = m_impureBlockData[block];
+            for (Value* value : *block) {
+                Effects effects = value->effects();
+                MemoryValue* memory = value->as();
+                
+                if (memory && memory->isStore()
+                    && !data.reads.overlaps(memory->range())
+                    && !data.writes.overlaps(memory->range()))
+                    data.storesAtHead.add(memory);
+                data.reads.add(effects.reads);
+
+                if (HeapRange writes = effects.writes)
+                    clobber(data, writes);
+
+                if (memory)
+                    data.memoryValuesAtTail.add(memory);
+            }
+
+            if (verbose)
+                dataLog("Block ", *block, ": ", data, "\n");
+        }
+
+        // Perform CSE. This edits code.
+        Vector postOrder = m_proc.blocksInPostOrder();
+        for (unsigned i = postOrder.size(); i--;) {
+            m_block = postOrder[i];
+            if (verbose)
+                dataLog("Looking at ", *m_block, ":\n");
+
+            m_data = ImpureBlockData();
+            for (m_index = 0; m_index < m_block->size(); ++m_index) {
+                m_value = m_block->at(m_index);
+                process();
+            }
+            m_insertionSet.execute(m_block);
+            m_impureBlockData[m_block] = m_data;
+        }
+
+        // The previous pass might have requested that we insert code in some basic block other than
+        // the one that it was looking at. This inserts them.
+        for (BasicBlock* block : m_proc) {
+            for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+                auto iter = m_sets.find(block->at(valueIndex));
+                if (iter == m_sets.end())
+                    continue;
+
+                for (Value* value : iter->value)
+                    m_insertionSet.insertValue(valueIndex + 1, value);
+            }
+            m_insertionSet.execute(block);
+        }
+
+        if (verbose)
+            dataLog("B3 after CSE:\n", m_proc);
+
+        return m_changed;
+    }
+    
+private:
+    void process()
+    {
+        m_value->performSubstitution();
+
+        if (m_pureCSE.process(m_value, m_dominators)) {
+            ASSERT(!m_value->effects().writes);
+            m_changed = true;
+            return;
+        }
+
+        MemoryValue* memory = m_value->as();
+        if (memory && processMemoryBeforeClobber(memory))
+            return;
+
+        if (HeapRange writes = m_value->effects().writes)
+            clobber(m_data, writes);
+        
+        if (memory)
+            processMemoryAfterClobber(memory);
+    }
+
+    // Return true if we got rid of the operation. If you changed IR in this function, you have to
+    // set m_changed even if you also return true.
+    bool processMemoryBeforeClobber(MemoryValue* memory)
+    {
+        Value* value = memory->child(0);
+        Value* ptr = memory->lastChild();
+        HeapRange range = memory->range();
+        int32_t offset = memory->offset();
+
+        switch (memory->opcode()) {
+        case Store8:
+            return handleStoreBeforeClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && ((candidate->opcode() == Store8 && candidate->child(0) == value)
+                            || ((candidate->opcode() == Load8Z || candidate->opcode() == Load8S)
+                                && candidate == value));
+                });
+        case Store16:
+            return handleStoreBeforeClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && ((candidate->opcode() == Store16 && candidate->child(0) == value)
+                            || ((candidate->opcode() == Load16Z || candidate->opcode() == Load16S)
+                                && candidate == value));
+                });
+        case Store:
+            return handleStoreBeforeClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && ((candidate->opcode() == Store && candidate->child(0) == value)
+                            || (candidate->opcode() == Load && candidate == value));
+                });
+        default:
+            return false;
+        }
+    }
+
+    void clobber(ImpureBlockData& data, HeapRange writes)
+    {
+        data.writes.add(writes);
+        
+        data.memoryValuesAtTail.removeIf(
+            [&] (MemoryValue* memory) {
+                return memory->range().overlaps(writes);
+            });
+    }
+
+    void processMemoryAfterClobber(MemoryValue* memory)
+    {
+        Value* ptr = memory->lastChild();
+        HeapRange range = memory->range();
+        int32_t offset = memory->offset();
+        Type type = memory->type();
+
+        // FIXME: Empower this to insert more casts and shifts. For example, a Load8 could match a
+        // Store and mask the result. You could even have:
+        //
+        // Store(@value, @ptr, offset = 0)
+        // Load8Z(@ptr, offset = 2)
+        //
+        // Which could be turned into something like this:
+        //
+        // Store(@value, @ptr, offset = 0)
+        // ZShr(@value, 16)
+        
+        switch (memory->opcode()) {
+        case Load8Z: {
+            handleMemoryValue(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && (candidate->opcode() == Load8Z || candidate->opcode() == Store8);
+                },
+                [&] (MemoryValue* match, Vector& fixups) -> Value* {
+                    if (match->opcode() == Store8) {
+                        Value* mask = m_proc.add(m_value->origin(), 0xff);
+                        fixups.append(mask);
+                        Value* zext = m_proc.add(
+                            BitAnd, m_value->origin(), match->child(0), mask);
+                        fixups.append(zext);
+                        return zext;
+                    }
+                    return nullptr;
+                });
+            break;
+        }
+
+        case Load8S: {
+            handleMemoryValue(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && (candidate->opcode() == Load8S || candidate->opcode() == Store8);
+                },
+                [&] (MemoryValue* match, Vector& fixups) -> Value* {
+                    if (match->opcode() == Store8) {
+                        Value* sext = m_proc.add(
+                            SExt8, m_value->origin(), match->child(0));
+                        fixups.append(sext);
+                        return sext;
+                    }
+                    return nullptr;
+                });
+            break;
+        }
+
+        case Load16Z: {
+            handleMemoryValue(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && (candidate->opcode() == Load16Z || candidate->opcode() == Store16);
+                },
+                [&] (MemoryValue* match, Vector& fixups) -> Value* {
+                    if (match->opcode() == Store16) {
+                        Value* mask = m_proc.add(m_value->origin(), 0xffff);
+                        fixups.append(mask);
+                        Value* zext = m_proc.add(
+                            BitAnd, m_value->origin(), match->child(0), mask);
+                        fixups.append(zext);
+                        return zext;
+                    }
+                    return nullptr;
+                });
+            break;
+        }
+
+        case Load16S: {
+            handleMemoryValue(
+                ptr, range, [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && (candidate->opcode() == Load16S || candidate->opcode() == Store16);
+                },
+                [&] (MemoryValue* match, Vector& fixups) -> Value* {
+                    if (match->opcode() == Store16) {
+                        Value* sext = m_proc.add(
+                            SExt16, m_value->origin(), match->child(0));
+                        fixups.append(sext);
+                        return sext;
+                    }
+                    return nullptr;
+                });
+            break;
+        }
+
+        case Load: {
+            handleMemoryValue(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    if (candidate->offset() != offset)
+                        return false;
+
+                    if (candidate->opcode() == Load && candidate->type() == type)
+                        return true;
+
+                    if (candidate->opcode() == Store && candidate->child(0)->type() == type)
+                        return true;
+
+                    return false;
+                });
+            break;
+        }
+
+        case Store8: {
+            handleStoreAfterClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->opcode() == Store8
+                        && candidate->offset() == offset;
+                });
+            break;
+        }
+            
+        case Store16: {
+            handleStoreAfterClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->opcode() == Store16
+                        && candidate->offset() == offset;
+                });
+            break;
+        }
+            
+        case Store: {
+            handleStoreAfterClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->opcode() == Store
+                        && candidate->offset() == offset;
+                });
+            break;
+        }
+
+        default:
+            dataLog("Bad memory value: ", deepDump(m_proc, m_value), "\n");
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+    }
+
+    template
+    bool handleStoreBeforeClobber(Value* ptr, HeapRange range, const Filter& filter)
+    {
+        MemoryMatches matches = findMemoryValue(ptr, range, filter);
+        if (matches.isEmpty())
+            return false;
+
+        m_value->replaceWithNop();
+        m_changed = true;
+        return true;
+    }
+
+    template
+    void handleStoreAfterClobber(Value* ptr, HeapRange range, const Filter& filter)
+    {
+        if (!m_value->traps() && findStoreAfterClobber(ptr, range, filter)) {
+            m_value->replaceWithNop();
+            m_changed = true;
+            return;
+        }
+
+        m_data.memoryValuesAtTail.add(m_value->as());
+    }
+
+    template
+    bool findStoreAfterClobber(Value* ptr, HeapRange range, const Filter& filter)
+    {
+        // We can eliminate a store if every forward path hits a store to the same location before
+        // hitting any operation that observes the store. This search seems like it should be
+        // expensive, but in the overwhelming majority of cases it will almost immediately hit an 
+        // operation that interferes.
+
+        if (verbose)
+            dataLog(*m_value, ": looking forward for stores to ", *ptr, "...\n");
+
+        // First search forward in this basic block.
+        // FIXME: It would be cool to get rid of this linear search. It's not super critical since
+        // we will probably bail out very quickly, but it *is* annoying.
+        for (unsigned index = m_index + 1; index < m_block->size(); ++index) {
+            Value* value = m_block->at(index);
+
+            if (MemoryValue* memoryValue = value->as()) {
+                if (memoryValue->lastChild() == ptr && filter(memoryValue))
+                    return true;
+            }
+
+            Effects effects = value->effects();
+            if (effects.reads.overlaps(range) || effects.writes.overlaps(range))
+                return false;
+        }
+
+        if (!m_block->numSuccessors())
+            return false;
+
+        BlockWorklist worklist;
+        worklist.pushAll(m_block->successorBlocks());
+
+        while (BasicBlock* block = worklist.pop()) {
+            ImpureBlockData& data = m_impureBlockData[block];
+
+            MemoryValue* match = data.storesAtHead.find(ptr, filter);
+            if (match && match != m_value)
+                continue;
+
+            if (data.writes.overlaps(range) || data.reads.overlaps(range))
+                return false;
+
+            if (!block->numSuccessors())
+                return false;
+
+            worklist.pushAll(block->successorBlocks());
+        }
+
+        return true;
+    }
+
+    template
+    void handleMemoryValue(Value* ptr, HeapRange range, const Filter& filter)
+    {
+        handleMemoryValue(
+            ptr, range, filter,
+            [] (MemoryValue*, Vector&) -> Value* {
+                return nullptr;
+            });
+    }
+
+    template
+    void handleMemoryValue(
+        Value* ptr, HeapRange range, const Filter& filter, const Replace& replace)
+    {
+        MemoryMatches matches = findMemoryValue(ptr, range, filter);
+        if (replaceMemoryValue(matches, replace))
+            return;
+        m_data.memoryValuesAtTail.add(m_value->as());
+    }
+
+    template
+    bool replaceMemoryValue(const MemoryMatches& matches, const Replace& replace)
+    {
+        if (matches.isEmpty())
+            return false;
+
+        if (verbose)
+            dataLog("Eliminating ", *m_value, " due to ", pointerListDump(matches), "\n");
+        
+        m_changed = true;
+
+        if (matches.size() == 1) {
+            MemoryValue* dominatingMatch = matches[0];
+            RELEASE_ASSERT(m_dominators.dominates(dominatingMatch->owner, m_block));
+            
+            if (verbose)
+                dataLog("    Eliminating using ", *dominatingMatch, "\n");
+            Vector extraValues;
+            if (Value* value = replace(dominatingMatch, extraValues)) {
+                for (Value* extraValue : extraValues)
+                    m_insertionSet.insertValue(m_index, extraValue);
+                m_value->replaceWithIdentity(value);
+            } else {
+                if (dominatingMatch->isStore())
+                    m_value->replaceWithIdentity(dominatingMatch->child(0));
+                else
+                    m_value->replaceWithIdentity(dominatingMatch);
+            }
+            return true;
+        }
+
+        // FIXME: It would be way better if this phase just did SSA calculation directly.
+        // Right now we're relying on the fact that CSE's position in the phase order is
+        // almost right before SSA fixup.
+
+        Variable* variable = m_proc.addVariable(m_value->type());
+
+        VariableValue* get = m_insertionSet.insert(
+            m_index, Get, m_value->origin(), variable);
+        if (verbose)
+            dataLog("    Inserting get of value: ", *get, "\n");
+        m_value->replaceWithIdentity(get);
+            
+        for (MemoryValue* match : matches) {
+            Vector& sets = m_sets.add(match, Vector()).iterator->value;
+
+            Value* value = replace(match, sets);
+            if (!value) {
+                if (match->isStore())
+                    value = match->child(0);
+                else
+                    value = match;
+            }
+                
+            Value* set = m_proc.add(Set, m_value->origin(), variable, value);
+            sets.append(set);
+        }
+
+        return true;
+    }
+
+    template
+    MemoryMatches findMemoryValue(Value* ptr, HeapRange range, const Filter& filter)
+    {
+        if (verbose)
+            dataLog(*m_value, ": looking backward for ", *ptr, "...\n");
+        
+        if (MemoryValue* match = m_data.memoryValuesAtTail.find(ptr, filter)) {
+            if (verbose)
+                dataLog("    Found ", *match, " locally.\n");
+            return { match };
+        }
+
+        if (m_data.writes.overlaps(range)) {
+            if (verbose)
+                dataLog("    Giving up because of writes.\n");
+            return { };
+        }
+
+        BlockWorklist worklist;
+        worklist.pushAll(m_block->predecessors());
+
+        MemoryMatches matches;
+
+        while (BasicBlock* block = worklist.pop()) {
+            if (verbose)
+                dataLog("    Looking at ", *block, "\n");
+
+            ImpureBlockData& data = m_impureBlockData[block];
+
+            MemoryValue* match = data.memoryValuesAtTail.find(ptr, filter);
+            if (match && match != m_value) {
+                if (verbose)
+                    dataLog("    Found match: ", *match, "\n");
+                matches.append(match);
+                continue;
+            }
+
+            if (data.writes.overlaps(range)) {
+                if (verbose)
+                    dataLog("    Giving up because of writes.\n");
+                return { };
+            }
+
+            if (!block->numPredecessors()) {
+                if (verbose)
+                    dataLog("    Giving up because it's live at root.\n");
+                // This essentially proves that this is live at the prologue. That means that we
+                // cannot reliably optimize this case.
+                return { };
+            }
+            
+            worklist.pushAll(block->predecessors());
+        }
+
+        if (verbose)
+            dataLog("    Got matches: ", pointerListDump(matches), "\n");
+        return matches;
+    }
+
+    Procedure& m_proc;
+
+    Dominators& m_dominators;
+    PureCSE m_pureCSE;
+    
+    IndexMap m_impureBlockData;
+
+    ImpureBlockData m_data;
+
+    BasicBlock* m_block;
+    unsigned m_index;
+    Value* m_value;
+
+    HashMap> m_sets;
+
+    InsertionSet m_insertionSet;
+
+    bool m_changed { false };
+};
+
+} // anonymous namespace
+
+bool eliminateCommonSubexpressions(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "eliminateCommonSubexpressions");
+
+    CSE cse(proc);
+    return cse.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.h b/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.h
new file mode 100644
index 000000000..ce994beb9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// This does global common subexpression elimination (CSE) over both pure values and memory accesses.
+
+bool eliminateCommonSubexpressions(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3FenceValue.cpp b/Source/JavaScriptCore/b3/B3FenceValue.cpp
new file mode 100644
index 000000000..80e27928c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FenceValue.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3FenceValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+FenceValue::~FenceValue()
+{
+}
+
+Value* FenceValue::cloneImpl() const
+{
+    return new FenceValue(*this);
+}
+
+FenceValue::FenceValue(Origin origin, HeapRange read, HeapRange write)
+    : Value(CheckedOpcode, Fence, Void, origin)
+    , read(read)
+    , write(write)
+{
+}
+
+FenceValue::FenceValue(Origin origin)
+    : FenceValue(origin, HeapRange::top(), HeapRange::top())
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3FenceValue.h b/Source/JavaScriptCore/b3/B3FenceValue.h
new file mode 100644
index 000000000..d147052d1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FenceValue.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE FenceValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Fence; }
+    
+    ~FenceValue();
+    
+    // The read/write heaps are reflected in the effects() of this value. The compiler may change
+    // the lowering of a Fence based on the heaps. For example, if a fence does not write anything
+    // then it is understood to be a store-store fence. On x86, this may lead us to not emit any
+    // code, while on ARM we may emit a cheaper fence (dmb ishst instead of dmb ish). We will do
+    // the same optimization for load-load fences, which are expressed as a Fence that writes but
+    // does not read.
+    //
+    // This abstraction allows us to cover all of the fences on x86 and all of the standalone fences
+    // on ARM. X86 really just has one fence: mfence. This fence should be used to protect stores
+    // from being sunk below loads. WTF calls it the storeLoadFence. A classic example is the Steele
+    // barrier:
+    //
+    //     o.f = v  =>  o.f = v
+    //                  if (color(o) == black)
+    //                      log(o)
+    //
+    // We are trying to ensure that if the store to o.f occurs after the collector has started
+    // visiting o, then we will log o. Under sequential consistency, this would work. The collector
+    // would set color(o) to black just before it started visiting. But x86's illusion of sequential
+    // consistency is broken in exactly just this store->load ordering case. The store to o.f may
+    // get buffered, and it may occur some time after we have loaded and checked color(o). As well,
+    // the collector's store to set color(o) to black may get buffered and it may occur some time
+    // after the collector has finished visiting o. Therefore, we need mfences. In B3 we model this
+    // as a Fence that reads and writes some heaps. Setting writes to the empty set will cause B3 to
+    // not emit any barrier on x86.
+    //
+    // On ARM there are many more fences. The Fence instruction is meant to model just two of them:
+    // dmb ish and dmb ishst. You can emit a dmb ishst by using a Fence with an empty write heap.
+    // Otherwise, you will get a dmb ish.
+    // FIXME: Add fenced memory accesses. https://bugs.webkit.org/show_bug.cgi?id=162349
+    // FIXME: Add a Depend operation. https://bugs.webkit.org/show_bug.cgi?id=162350
+    HeapRange read { HeapRange::top() };
+    HeapRange write { HeapRange::top() };
+
+protected:
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+    
+    JS_EXPORT_PRIVATE FenceValue(Origin origin, HeapRange read, HeapRange write);
+    
+    JS_EXPORT_PRIVATE FenceValue(Origin origin);
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3FixSSA.cpp b/Source/JavaScriptCore/b3/B3FixSSA.cpp
new file mode 100644
index 000000000..730c2c876
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FixSSA.cpp
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3FixSSA.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BreakCriticalEdges.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SSACalculator.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+const bool verbose = false;
+} // anonymous namespace
+
+void demoteValues(Procedure& proc, const IndexSet& values)
+{
+    HashMap map;
+    HashMap phiMap;
+
+    // Create stack slots.
+    for (Value* value : values.values(proc.values())) {
+        map.add(value, proc.addVariable(value->type()));
+
+        if (value->opcode() == Phi)
+            phiMap.add(value, proc.addVariable(value->type()));
+    }
+
+    if (verbose) {
+        dataLog("Demoting values as follows:\n");
+        dataLog("   map = ");
+        CommaPrinter comma;
+        for (auto& entry : map)
+            dataLog(comma, *entry.key, "=>", *entry.value);
+        dataLog("\n");
+        dataLog("   phiMap = ");
+        comma = CommaPrinter();
+        for (auto& entry : phiMap)
+            dataLog(comma, *entry.key, "=>", *entry.value);
+        dataLog("\n");
+    }
+
+    // Change accesses to the values to accesses to the stack slots.
+    InsertionSet insertionSet(proc);
+    for (BasicBlock* block : proc) {
+        for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+            Value* value = block->at(valueIndex);
+
+            if (value->opcode() == Phi) {
+                if (Variable* variable = phiMap.get(value)) {
+                    value->replaceWithIdentity(
+                        insertionSet.insert(
+                            valueIndex, Get, value->origin(), variable));
+                }
+            } else {
+                for (Value*& child : value->children()) {
+                    if (Variable* variable = map.get(child)) {
+                        child = insertionSet.insert(
+                            valueIndex, Get, value->origin(), variable);
+                    }
+                }
+
+                if (UpsilonValue* upsilon = value->as()) {
+                    if (Variable* variable = phiMap.get(upsilon->phi())) {
+                        insertionSet.insert(
+                            valueIndex, Set, upsilon->origin(), variable, upsilon->child(0));
+                        value->replaceWithNop();
+                    }
+                }
+            }
+
+            if (Variable* variable = map.get(value)) {
+                insertionSet.insert(
+                    valueIndex + 1, Set, value->origin(), variable, value);
+            }
+        }
+        insertionSet.execute(block);
+    }
+}
+
+bool fixSSA(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "fixSSA");
+
+    // Just for sanity, remove any unused variables first. It's unlikely that this code has any
+    // bugs having to do with dead variables, but it would be silly to have to fix such a bug if
+    // it did arise.
+    IndexSet liveVariables;
+    for (Value* value : proc.values()) {
+        if (VariableValue* variableValue = value->as())
+            liveVariables.add(variableValue->variable());
+    }
+
+    for (Variable* variable : proc.variables()) {
+        if (!liveVariables.contains(variable))
+            proc.deleteVariable(variable);
+    }
+
+    if (proc.variables().isEmpty())
+        return false;
+
+    // We know that we have variables to optimize, so do that now.
+    breakCriticalEdges(proc);
+
+    SSACalculator ssa(proc);
+
+    // Create a SSACalculator::Variable ("calcVar") for every variable.
+    Vector calcVarToVariable;
+    IndexMap variableToCalcVar(proc.variables().size());
+
+    for (Variable* variable : proc.variables()) {
+        SSACalculator::Variable* calcVar = ssa.newVariable();
+        RELEASE_ASSERT(calcVar->index() == calcVarToVariable.size());
+        calcVarToVariable.append(variable);
+        variableToCalcVar[variable] = calcVar;
+    }
+
+    // Create Defs for all of the stores to the stack variable.
+    for (BasicBlock* block : proc) {
+        for (Value* value : *block) {
+            if (value->opcode() != Set)
+                continue;
+
+            Variable* variable = value->as()->variable();
+
+            if (SSACalculator::Variable* calcVar = variableToCalcVar[variable])
+                ssa.newDef(calcVar, block, value->child(0));
+        }
+    }
+
+    // Decide where Phis are to be inserted. This creates them but does not insert them.
+    ssa.computePhis(
+        [&] (SSACalculator::Variable* calcVar, BasicBlock* block) -> Value* {
+            Variable* variable = calcVarToVariable[calcVar->index()];
+            Value* phi = proc.add(Phi, variable->type(), block->at(0)->origin());
+            if (verbose) {
+                dataLog(
+                    "Adding Phi for ", pointerDump(variable), " at ", *block, ": ",
+                    deepDump(proc, phi), "\n");
+            }
+            return phi;
+        });
+
+    // Now perform the conversion.
+    InsertionSet insertionSet(proc);
+    IndexMap mapping(proc.variables().size());
+    for (BasicBlock* block : proc.blocksInPreOrder()) {
+        mapping.clear();
+
+        for (unsigned index = calcVarToVariable.size(); index--;) {
+            Variable* variable = calcVarToVariable[index];
+            SSACalculator::Variable* calcVar = ssa.variable(index);
+
+            SSACalculator::Def* def = ssa.reachingDefAtHead(block, calcVar);
+            if (def)
+                mapping[variable] = def->value();
+        }
+
+        for (SSACalculator::Def* phiDef : ssa.phisForBlock(block)) {
+            Variable* variable = calcVarToVariable[phiDef->variable()->index()];
+
+            insertionSet.insertValue(0, phiDef->value());
+            mapping[variable] = phiDef->value();
+        }
+
+        for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+            Value* value = block->at(valueIndex);
+            value->performSubstitution();
+
+            switch (value->opcode()) {
+            case Get: {
+                VariableValue* variableValue = value->as();
+                Variable* variable = variableValue->variable();
+
+                if (Value* replacement = mapping[variable])
+                    value->replaceWithIdentity(replacement);
+                else {
+                    value->replaceWithIdentity(
+                        insertionSet.insertBottom(valueIndex, value));
+                }
+                break;
+            }
+                
+            case Set: {
+                VariableValue* variableValue = value->as();
+                Variable* variable = variableValue->variable();
+
+                mapping[variable] = value->child(0);
+                value->replaceWithNop();
+                break;
+            }
+
+            default:
+                break;
+            }
+        }
+
+        unsigned upsilonInsertionPoint = block->size() - 1;
+        Origin upsilonOrigin = block->last()->origin();
+        for (BasicBlock* successorBlock : block->successorBlocks()) {
+            for (SSACalculator::Def* phiDef : ssa.phisForBlock(successorBlock)) {
+                Value* phi = phiDef->value();
+                SSACalculator::Variable* calcVar = phiDef->variable();
+                Variable* variable = calcVarToVariable[calcVar->index()];
+
+                Value* mappedValue = mapping[variable];
+                if (verbose) {
+                    dataLog(
+                        "Mapped value for ", *variable, " with successor Phi ", *phi,
+                        " at end of ", *block, ": ", pointerDump(mappedValue), "\n");
+                }
+                
+                if (!mappedValue)
+                    mappedValue = insertionSet.insertBottom(upsilonInsertionPoint, phi);
+                
+                insertionSet.insert(
+                    upsilonInsertionPoint, upsilonOrigin, mappedValue, phi);
+            }
+        }
+
+        insertionSet.execute(block);
+    }
+
+    if (verbose) {
+        dataLog("B3 after SSA conversion:\n");
+        dataLog(proc);
+    }
+
+    return true;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3FixSSA.h b/Source/JavaScriptCore/b3/B3FixSSA.h
new file mode 100644
index 000000000..775c32237
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FixSSA.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Turns all mentions of the given values into accesses to variables. This is meant to be used
+// from phases that don't like SSA for whatever reason.
+void demoteValues(Procedure&, const IndexSet&);
+
+// This fixes SSA for you. Use this after you have done demoteValues() and you have performed
+// whatever evil transformation you needed.
+bool fixSSA(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3FoldPathConstants.cpp b/Source/JavaScriptCore/b3/B3FoldPathConstants.cpp
new file mode 100644
index 000000000..24a01340b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FoldPathConstants.cpp
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3FoldPathConstants.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CaseCollectionInlines.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SwitchValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+class FoldPathConstants {
+public:
+    FoldPathConstants(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+    {
+    }
+
+    void run()
+    {
+        bool changed = false;
+
+        if (verbose)
+            dataLog("B3 before folding path constants: \n", m_proc, "\n");
+        
+        // Find all of the values that are the subject of a branch or switch. For any successor
+        // that we dominate, install a value override at that block.
+
+        HashMap> overrides;
+
+        Dominators& dominators = m_proc.dominators();
+        
+        auto addOverride = [&] (
+            BasicBlock* from, Value* value, const Override& override) {
+
+            if (override.block->numPredecessors() != 1)
+                return;
+            ASSERT(override.block->predecessor(0) == from);
+
+            Vector& forValue =
+                overrides.add(value, Vector()).iterator->value;
+
+            if (!ASSERT_DISABLED) {
+                for (const Override& otherOverride : forValue)
+                    ASSERT_UNUSED(otherOverride, otherOverride.block != override.block);
+            }
+
+            if (verbose)
+                dataLog("Overriding ", *value, " from ", *from, ": ", override, "\n");
+            
+            forValue.append(override);
+        };
+        
+        for (BasicBlock* block : m_proc) {
+            Value* branch = block->last();
+            switch (branch->opcode()) {
+            case Branch:
+                if (block->successorBlock(0) == block->successorBlock(1))
+                    continue;
+                addOverride(
+                    block, branch->child(0),
+                    Override::nonZero(block->successorBlock(0)));
+                addOverride(
+                    block, branch->child(0),
+                    Override::constant(block->successorBlock(1), 0));
+                break;
+            case Switch: {
+                HashMap targetUses;
+                for (const SwitchCase& switchCase : branch->as()->cases(block))
+                    targetUses.add(switchCase.targetBlock(), 0).iterator->value++;
+
+                for (const SwitchCase& switchCase : branch->as()->cases(block)) {
+                    if (targetUses.find(switchCase.targetBlock())->value != 1)
+                        continue;
+
+                    addOverride(
+                        block, branch->child(0),
+                        Override::constant(switchCase.targetBlock(), switchCase.caseValue()));
+                }
+                break;
+            }
+            default:
+                break;
+            }
+        }
+
+        // Install the constants in the override blocks. We use one-shot insertion sets because
+        // each block will get at most one thing inserted into it anyway.
+        for (auto& entry : overrides) {
+            for (Override& override : entry.value) {
+                if (!override.hasValue)
+                    continue;
+                override.valueNode =
+                    m_insertionSet.insertIntConstant(0, entry.key, override.value);
+                m_insertionSet.execute(override.block);
+            }
+        }
+
+        // Replace all uses of a value that has an override with that override, if appropriate.
+        // Certain instructions get special treatment.
+        auto getOverride = [&] (BasicBlock* block, Value* value) -> Override {
+            auto iter = overrides.find(value);
+            if (iter == overrides.end())
+                return Override();
+
+            Vector& forValue = iter->value;
+            Override result;
+            for (Override& override : forValue) {
+                if (dominators.dominates(override.block, block)
+                    && override.isBetterThan(result))
+                    result = override;
+            }
+
+            if (verbose)
+                dataLog("In block ", *block, " getting override for ", *value, ": ", result, "\n");
+
+            return result;
+        };
+        
+        for (BasicBlock* block : m_proc) {
+            for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+                Value* value = block->at(valueIndex);
+
+                switch (value->opcode()) {
+                case Branch: {
+                    if (getOverride(block, value->child(0)).isNonZero) {
+                        value->replaceWithJump(block, block->taken());
+                        changed = true;
+                    }
+                    break;
+                }
+
+                case Equal: {
+                    if (value->child(1)->isInt(0)
+                        && getOverride(block, value->child(0)).isNonZero) {
+                        value->replaceWithIdentity(
+                            m_insertionSet.insertIntConstant(valueIndex, value, 0));
+                    }
+                    break;
+                }
+
+                case NotEqual: {
+                    if (value->child(1)->isInt(0)
+                        && getOverride(block, value->child(0)).isNonZero) {
+                        value->replaceWithIdentity(
+                            m_insertionSet.insertIntConstant(valueIndex, value, 1));
+                    }
+                    break;
+                }
+
+                default:
+                    break;
+                }
+
+                for (Value*& child : value->children()) {
+                    Override override = getOverride(block, child);
+                    if (override.valueNode)
+                        child = override.valueNode;
+                }
+            }
+            m_insertionSet.execute(block);
+        }
+
+        if (changed) {
+            m_proc.resetReachability();
+            m_proc.invalidateCFG();
+        }
+    }
+    
+private:
+    struct Override {
+        Override()
+        {
+        }
+
+        static Override constant(BasicBlock* block, int64_t value)
+        {
+            Override result;
+            result.block = block;
+            result.hasValue = true;
+            result.value = value;
+            if (value)
+                result.isNonZero = true;
+            return result;
+        }
+
+        static Override nonZero(BasicBlock* block)
+        {
+            Override result;
+            result.block = block;
+            result.isNonZero = true;
+            return result;
+        }
+
+        bool isBetterThan(const Override& override)
+        {
+            if (hasValue && !override.hasValue)
+                return true;
+            if (isNonZero && !override.isNonZero)
+                return true;
+            return false;
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print("{block = ", pointerDump(block), ", value = ");
+            if (hasValue)
+                out.print(value);
+            else
+                out.print("");
+            out.print(", isNonZero = ", isNonZero);
+            if (valueNode)
+                out.print(", valueNode = ", *valueNode);
+            out.print("}");
+        }
+
+        BasicBlock* block { nullptr };
+        bool hasValue { false };
+        bool isNonZero { false };
+        int64_t value { 0 };
+        Value* valueNode { nullptr };
+    };
+
+    Procedure& m_proc;
+    InsertionSet m_insertionSet;
+};
+
+} // anonymous namespace
+
+void foldPathConstants(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "foldPathConstants");
+    FoldPathConstants foldPathConstants(proc);
+    foldPathConstants.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3FoldPathConstants.h b/Source/JavaScriptCore/b3/B3FoldPathConstants.h
new file mode 100644
index 000000000..a55c770b9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FoldPathConstants.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Does very basic simplification of uses of values that were branched on by a dominating branch.
+
+void foldPathConstants(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3FrequencyClass.cpp b/Source/JavaScriptCore/b3/B3FrequencyClass.cpp
new file mode 100644
index 000000000..816850c59
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FrequencyClass.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3FrequencyClass.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, FrequencyClass frequency)
+{
+    switch (frequency) {
+    case FrequencyClass::Normal:
+        out.print("Normal");
+        return;
+    case FrequencyClass::Rare:
+        out.print("Rare");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3FrequencyClass.h b/Source/JavaScriptCore/b3/B3FrequencyClass.h
new file mode 100644
index 000000000..607c439ac
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FrequencyClass.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+enum class FrequencyClass : uint8_t {
+    // We don't have any hypothesis about the frequency of this control flow construct. This is
+    // the common case. We can still use basic block frequency in this case.
+    Normal,
+
+    // We expect that this control flow construct will be reached super rarely. It's valid to
+    // perform optimizations that punish Rare code. Note that there will be situations where you
+    // have to somehow construct a new frequency class from a merging of multiple classes. When
+    // this happens, never choose Rare; always go with Normal. This is necessary because we
+    // really do punish Rare code very badly.
+    Rare
+};
+
+inline FrequencyClass maxFrequency(FrequencyClass a, FrequencyClass b)
+{
+    if (a == FrequencyClass::Normal)
+        return FrequencyClass::Normal;
+    return b;
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::B3::FrequencyClass);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3FrequentedBlock.h b/Source/JavaScriptCore/b3/B3FrequentedBlock.h
new file mode 100644
index 000000000..9b63ff4fd
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FrequentedBlock.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3GenericFrequentedBlock.h"
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+
+typedef GenericFrequentedBlock FrequentedBlock;
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Generate.cpp b/Source/JavaScriptCore/b3/B3Generate.cpp
new file mode 100644
index 000000000..e328c6a9e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Generate.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Generate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerate.h"
+#include "AirInstInlines.h"
+#include "B3Common.h"
+#include "B3DuplicateTails.h"
+#include "B3EliminateCommonSubexpressions.h"
+#include "B3FixSSA.h"
+#include "B3FoldPathConstants.h"
+#include "B3InferSwitches.h"
+#include "B3LegalizeMemoryOffsets.h"
+#include "B3LowerMacros.h"
+#include "B3LowerMacrosAfterOptimizations.h"
+#include "B3LowerToAir.h"
+#include "B3MoveConstants.h"
+#include "B3Procedure.h"
+#include "B3ReduceDoubleToFloat.h"
+#include "B3ReduceStrength.h"
+#include "B3TimingScope.h"
+#include "B3Validate.h"
+#include "PCToCodeOriginMap.h"
+
+namespace JSC { namespace B3 {
+
+void prepareForGeneration(Procedure& procedure, unsigned optLevel)
+{
+    TimingScope timingScope("prepareForGeneration");
+
+    generateToAir(procedure, optLevel);
+    Air::prepareForGeneration(procedure.code());
+}
+
+void generate(Procedure& procedure, CCallHelpers& jit)
+{
+    Air::generate(procedure.code(), jit);
+}
+
+void generateToAir(Procedure& procedure, unsigned optLevel)
+{
+    TimingScope timingScope("generateToAir");
+    
+    if (shouldDumpIR(B3Mode) && !shouldDumpIRAtEachPhase(B3Mode)) {
+        dataLog("Initial B3:\n");
+        dataLog(procedure);
+    }
+
+    // We don't require the incoming IR to have predecessors computed.
+    procedure.resetReachability();
+    
+    if (shouldValidateIR())
+        validate(procedure);
+
+    if (optLevel >= 1) {
+        reduceDoubleToFloat(procedure);
+        reduceStrength(procedure);
+        eliminateCommonSubexpressions(procedure);
+        inferSwitches(procedure);
+        duplicateTails(procedure);
+        fixSSA(procedure);
+        foldPathConstants(procedure);
+        
+        // FIXME: Add more optimizations here.
+        // https://bugs.webkit.org/show_bug.cgi?id=150507
+    }
+
+    lowerMacros(procedure);
+
+    if (optLevel >= 1) {
+        reduceStrength(procedure);
+
+        // FIXME: Add more optimizations here.
+        // https://bugs.webkit.org/show_bug.cgi?id=150507
+    }
+
+    lowerMacrosAfterOptimizations(procedure);
+    legalizeMemoryOffsets(procedure);
+    moveConstants(procedure);
+
+    // FIXME: We should run pureCSE here to clean up some platform specific changes from the previous phases.
+    // https://bugs.webkit.org/show_bug.cgi?id=164873
+
+    if (shouldValidateIR())
+        validate(procedure);
+    
+    // If we're doing super verbose dumping, the phase scope of any phase will already do a dump.
+    // Note that lowerToAir() acts like a phase in this regard.
+    if (shouldDumpIR(B3Mode) && !shouldDumpIRAtEachPhase(B3Mode)) {
+        dataLog("B3 after ", procedure.lastPhaseName(), ", before generation:\n");
+        dataLog(procedure);
+    }
+
+    lowerToAir(procedure);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Generate.h b/Source/JavaScriptCore/b3/B3Generate.h
new file mode 100644
index 000000000..2ffcd0ea8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Generate.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC {
+
+class CCallHelpers;
+
+namespace B3 {
+
+class Procedure;
+namespace Air { class Code; }
+
+// This takes a B3::Procedure, optimizes it in-place, lowers it to Air, and prepares the Air for
+// generation.
+JS_EXPORT_PRIVATE void prepareForGeneration(Procedure&, unsigned optLevel = 1);
+
+// This takes a B3::Procedure that has been prepared for generation (i.e. it has been lowered to Air and
+// the Air has been prepared for generation) and generates it. This is the equivalent of calling
+// Air::generate() on the Procedure::code().
+JS_EXPORT_PRIVATE void generate(Procedure&, CCallHelpers&);
+
+// This takes a B3::Procedure, optimizes it in-place, and lowers it to Air. You can then generate
+// the Air to machine code using Air::prepareForGeneration() and Air::generate() on the Procedure's
+// code().
+void generateToAir(Procedure&, unsigned optLevel = 1);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3GenericFrequentedBlock.h b/Source/JavaScriptCore/b3/B3GenericFrequentedBlock.h
new file mode 100644
index 000000000..1c5e75cfe
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3GenericFrequentedBlock.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3FrequencyClass.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+// A frequented block is a tuple of BasicBlock* and FrequencyClass. It's usually used as a
+// successor edge.
+
+template
+class GenericFrequentedBlock {
+public:
+    GenericFrequentedBlock(
+        BasicBlock* block = nullptr, FrequencyClass frequency = FrequencyClass::Normal)
+        : m_block(block)
+        , m_frequency(frequency)
+    {
+    }
+
+    bool operator==(const GenericFrequentedBlock& other) const
+    {
+        return m_block == other.m_block
+            && m_frequency == other.m_frequency;
+    }
+
+    bool operator!=(const GenericFrequentedBlock& other) const
+    {
+        return !(*this == other);
+    }
+
+    explicit operator bool() const
+    {
+        return *this != GenericFrequentedBlock();
+    }
+
+    BasicBlock* block() const { return m_block; }
+    BasicBlock*& block() { return m_block; }
+    FrequencyClass frequency() const { return m_frequency; }
+    FrequencyClass& frequency() { return m_frequency; }
+
+    bool isRare() const { return frequency() == FrequencyClass::Rare; }
+
+    void dump(PrintStream& out) const
+    {
+        if (frequency() != FrequencyClass::Normal)
+            out.print(frequency(), ":");
+        out.print(pointerDump(m_block));
+    }
+
+private:
+    BasicBlock* m_block;
+    FrequencyClass m_frequency;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3HeapRange.cpp b/Source/JavaScriptCore/b3/B3HeapRange.cpp
new file mode 100644
index 000000000..a5768f9f6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3HeapRange.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3HeapRange.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+void HeapRange::dump(PrintStream& out) const
+{
+    if (*this == HeapRange()) {
+        out.print("Bottom");
+        return;
+    }
+    if (*this == top()) {
+        out.print("Top");
+        return;
+    }
+    out.print(m_begin, "...", m_end);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3HeapRange.h b/Source/JavaScriptCore/b3/B3HeapRange.h
new file mode 100644
index 000000000..03866bdab
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3HeapRange.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+// Alias analysis in B3 is done by checking if two integer ranges overlap. This is powerful enough
+// to be used for TBAA-style alias analysis used by the DFG, FTL, and LLVM: you just turn each node
+// in the tree of abstract heaps into a pre/post range.
+//
+// Note that the 'begin' is inclusive, while the 'end' is exclusive. These two ranges are non-
+// overlapping:
+//
+//     rangeA = 0...8
+//     rangeB = 8...16
+
+class HeapRange {
+public:
+    typedef unsigned Type;
+    
+    HeapRange()
+        : m_begin(0)
+        , m_end(0)
+    {
+    }
+
+    explicit HeapRange(unsigned value)
+        : m_begin(value)
+        , m_end(value + 1)
+    {
+        ASSERT(m_end >= m_begin);
+    }
+
+    HeapRange(unsigned begin, unsigned end)
+        : m_begin(begin)
+        , m_end(end)
+    {
+        ASSERT(m_end >= m_begin);
+        if (m_begin == m_end) {
+            // Canonicalize empty ranges.
+            m_begin = 0;
+            m_end = 0;
+        }
+    }
+
+    static HeapRange top()
+    {
+        return HeapRange(0, UINT_MAX);
+    }
+
+    bool operator==(const HeapRange& other) const
+    {
+        return m_begin == other.m_begin
+            && m_end == other.m_end;
+    }
+
+    bool operator!=(const HeapRange& other) const
+    {
+        return !(*this == other);
+    }
+    
+    explicit operator bool() const { return m_begin != m_end; }
+
+    unsigned begin() const { return m_begin; }
+    unsigned end() const { return m_end; }
+
+    bool overlaps(const HeapRange& other) const
+    {
+        return WTF::rangesOverlap(m_begin, m_end, other.m_begin, other.m_end);
+    }
+
+    JS_EXPORT_PRIVATE void dump(PrintStream& out) const;
+
+private:
+    unsigned m_begin;
+    unsigned m_end;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3InferSwitches.cpp b/Source/JavaScriptCore/b3/B3InferSwitches.cpp
new file mode 100644
index 000000000..2f1781241
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3InferSwitches.cpp
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3InferSwitches.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CaseCollectionInlines.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SwitchValue.h"
+#include "B3UseCounts.h"
+#include "B3ValueInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+class InferSwitches {
+public:
+    InferSwitches(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+        , m_useCounts(proc)
+    {
+    }
+    
+    bool run()
+    {
+        if (verbose)
+            dataLog("B3 before inferSwitches:\n", m_proc);
+        
+        bool changed = true;
+        bool everChanged = false;
+        while (changed) {
+            changed = false;
+            
+            if (verbose)
+                dataLog("Performing fixpoint iteration:\n");
+            
+            for (BasicBlock* block : m_proc)
+                changed |= attemptToMergeWithPredecessor(block);
+
+            everChanged |= changed;
+        }
+        
+        if (everChanged) {
+            m_proc.resetReachability();
+            m_proc.invalidateCFG();
+            
+            m_proc.deleteOrphans();
+            
+            if (verbose)
+                dataLog("B3 after inferSwitches:\n", m_proc);
+            return true;
+        }
+        
+        return false;
+    }
+    
+private:
+    bool attemptToMergeWithPredecessor(BasicBlock* block)
+    {
+        // No point in considering the root block. We also don't consider blocks with multiple
+        // predecessors, but we could handle this if we made this code a bit more general and we were
+        // not afraid of code bloat.
+        if (block->numPredecessors() != 1)
+            return false;
+        
+        SwitchDescription description = describe(block);
+        if (verbose)
+            dataLog("Description of primary block ", *block, ": ", description, "\n");
+        if (!description) {
+            if (verbose)
+                dataLog("    Bailing because not switch-like.\n");
+            return false;
+        }
+        
+        // We know that this block behaves like a switch. But we need to verify that it doesn't also
+        // perform any effects or do expensive things. We don't want to create a switch if that will
+        // make expensive things execute unconditionally. We're very conservative about how we define
+        // "expensive".
+        for (Value* value : *block) {
+            if (value->isFree())
+                continue;
+            if (value == description.extra)
+                continue;
+            if (value == description.branch)
+                continue;
+            if (verbose)
+                dataLog("    Bailing because of ", deepDump(m_proc, value), "\n");
+            return false;
+        }
+        
+        BasicBlock* predecessor = block->predecessor(0);
+        SwitchDescription predecessorDescription = describe(predecessor);
+        if (verbose)
+            dataLog("    Description of predecessor block ", *predecessor, ": ", predecessorDescription, "\n");
+        if (!predecessorDescription) {
+            if (verbose)
+                dataLog("    Bailing because not switch-like.\n");
+            return false;
+        }
+        
+        // Both us and the predecessor are switch-like, but that doesn't mean that we're compatible.
+        // We may be switching on different values!
+        if (description.source != predecessorDescription.source) {
+            if (verbose)
+                dataLog("    Bailing because sources don't match.\n");
+            return false;
+        }
+        
+        // We expect that we are the fall-through destination of the predecessor. This is a bit of a
+        // goofy condition. If we were not the fall-through destination then our switch is probably
+        // just totally redundant and we should be getting rid of it. But we don't handle that here,
+        // yet.
+        if (predecessorDescription.fallThrough.block() != block) {
+            if (verbose)
+                dataLog("    Bailing because fall-through of predecessor is not the primary block.\n");
+            return false;
+        }
+        
+        // Make sure that there ain't no loops.
+        if (description.fallThrough.block() == block
+            || description.fallThrough.block() == predecessor) {
+            if (verbose)
+                dataLog("    Bailing because of fall-through loop.\n");
+            return false;
+        }
+        for (SwitchCase switchCase : description.cases) {
+            if (switchCase.targetBlock() == block
+                || switchCase.targetBlock() == predecessor) {
+                if (verbose)
+                    dataLog("    Bailing because of loop in primary cases.\n");
+                return false;
+            }
+        }
+        for (SwitchCase switchCase : predecessorDescription.cases) {
+            if (switchCase.targetBlock() == block
+                || switchCase.targetBlock() == predecessor) {
+                if (verbose)
+                    dataLog("    Bailing because of loop in predecessor cases.\n");
+                return false;
+            }
+        }
+        
+        if (verbose)
+            dataLog("    Doing it!\n");
+        // We're committed to doing the thing.
+        
+        // Delete the extra value from the predecessor, since that would break downstream inference
+        // on the next fixpoint iteration. We would think that this block is too expensive to merge
+        // because of the Equal or NotEqual value even though that value is dead! We know it's dead
+        // so we kill it ourselves.
+        for (Value* value : *predecessor) {
+            if (value == predecessorDescription.extra)
+                value->replaceWithNopIgnoringType();
+        }
+        
+        // Insert all non-terminal values from our block into our predecessor. We definitely need to
+        // do this for constants. We must not do it for the extra value, since that would break
+        // downstream inference on the next fixpoint iteration. As a bonus, we don't do it for nops,
+        // so that we limit how big blocks get in this phase.
+        for (unsigned i = 0; i < block->size() - 1; ++i) {
+            Value* value = block->at(i);
+            if (value != description.extra && value->opcode() != Nop)
+                m_insertionSet.insertValue(predecessor->size() - 1, value);
+        }
+        m_insertionSet.execute(predecessor);
+        block->values().resize(0);
+        block->appendNew(m_proc, Oops, description.branch->origin());
+        block->removePredecessor(predecessor);
+        
+        for (BasicBlock* successorBlock : description.block->successorBlocks())
+            successorBlock->replacePredecessor(block, predecessor);
+
+        block->clearSuccessors();
+        
+        SwitchValue* switchValue = predecessor->replaceLastWithNew(
+            m_proc, predecessor->last()->origin(), description.source);
+        predecessor->clearSuccessors();
+        switchValue->setFallThrough(description.fallThrough);
+        
+        Vector predecessorCases;
+        for (SwitchCase switchCase : predecessorDescription.cases) {
+            switchValue->appendCase(switchCase);
+            predecessorCases.append(switchCase.caseValue());
+        }
+        std::sort(predecessorCases.begin(), predecessorCases.end());
+        auto isPredecessorCase = [&] (int64_t value) -> bool {
+            return !!tryBinarySearch(
+                predecessorCases, predecessorCases.size(), value,
+                [] (int64_t* element) -> int64_t { return *element; });
+        };
+        
+        for (SwitchCase switchCase : description.cases) {
+            if (!isPredecessorCase(switchCase.caseValue()))
+                switchValue->appendCase(switchCase);
+        }
+        return true;
+    }
+
+    struct SwitchDescription {
+        SwitchDescription()
+        {
+        }
+        
+        explicit operator bool() { return !!block; }
+        
+        void dump(PrintStream& out) const
+        {
+            out.print(
+                "{block = ", pointerDump(block),
+                ", branch = ", pointerDump(branch),
+                ", extra = ", pointerDump(extra),
+                ", source = ", pointerDump(source),
+                ", cases = ", listDump(cases),
+                ", fallThrough = ", fallThrough, "}");
+        }
+
+        BasicBlock* block { nullptr };
+        Value* branch { nullptr };
+        Value* extra { nullptr }; // This is the Equal or NotEqual value, if applicable.
+        Value* source { nullptr };
+        Vector cases;
+        FrequentedBlock fallThrough;
+    };
+    
+    SwitchDescription describe(BasicBlock* block)
+    {
+        SwitchDescription result;
+        result.block = block;
+        result.branch = block->last();
+        
+        switch (result.branch->opcode()) {
+        case Branch: {
+            Value* predicate = result.branch->child(0);
+            FrequentedBlock taken = result.block->taken();
+            FrequentedBlock notTaken = result.block->notTaken();
+            bool handled = false;
+            // NOTE: This uses UseCounts that we computed before any transformation. This is fine
+            // because although we may have mutated the IR, we would not have added any new
+            // predicates.
+            if (predicate->numChildren() == 2
+                && predicate->child(1)->hasInt()
+                && m_useCounts.numUses(predicate) == 1) {
+                switch (predicate->opcode()) {
+                case Equal:
+                    result.source = predicate->child(0);
+                    result.extra = predicate;
+                    result.cases.append(SwitchCase(predicate->child(1)->asInt(), taken));
+                    result.fallThrough = notTaken;
+                    handled = true;
+                    break;
+                case NotEqual:
+                    result.source = predicate->child(0);
+                    result.extra = predicate;
+                    result.cases.append(SwitchCase(predicate->child(1)->asInt(), notTaken));
+                    result.fallThrough = taken;
+                    handled = true;
+                    break;
+                default:
+                    break;
+                }
+            }
+            if (handled)
+                break;
+            result.source = predicate;
+            result.cases.append(SwitchCase(0, notTaken));
+            result.fallThrough = taken;
+            break;
+        }
+            
+        case Switch: {
+            SwitchValue* switchValue = result.branch->as();
+            result.source = switchValue->child(0);
+            for (SwitchCase switchCase : switchValue->cases(result.block))
+                result.cases.append(switchCase);
+            result.fallThrough = result.block->fallThrough();
+            break;
+        }
+            
+        default:
+            result.block = nullptr;
+            result.branch = nullptr;
+            break;
+        }
+        
+        return result;
+    }
+    
+    Procedure& m_proc;
+    InsertionSet m_insertionSet;
+    UseCounts m_useCounts;
+};
+
+} // anonymous namespace
+
+bool inferSwitches(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "inferSwitches");
+    InferSwitches inferSwitches(proc);
+    return inferSwitches.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3InferSwitches.h b/Source/JavaScriptCore/b3/B3InferSwitches.h
new file mode 100644
index 000000000..d0466f840
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3InferSwitches.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Fixpoints to convert chains of branches into switches.
+
+bool inferSwitches(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3InsertionSet.cpp b/Source/JavaScriptCore/b3/B3InsertionSet.cpp
new file mode 100644
index 000000000..a6e119fa2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3InsertionSet.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3InsertionSet.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+Value* InsertionSet::insertIntConstant(size_t index, Origin origin, Type type, int64_t value)
+{
+    return insertValue(index, m_procedure.addIntConstant(origin, type, value));
+}
+
+Value* InsertionSet::insertIntConstant(size_t index, Value* likeValue, int64_t value)
+{
+    return insertIntConstant(index, likeValue->origin(), likeValue->type(), value);
+}
+
+Value* InsertionSet::insertBottom(size_t index, Origin origin, Type type)
+{
+    Value*& bottom = m_bottomForType[type];
+    if (!bottom)
+        bottom = insertValue(index, m_procedure.addBottom(origin, type));
+    return bottom;
+}
+
+Value* InsertionSet::insertBottom(size_t index, Value* likeValue)
+{
+    return insertBottom(index, likeValue->origin(), likeValue->type());
+}
+
+void InsertionSet::execute(BasicBlock* block)
+{
+    bubbleSort(m_insertions.begin(), m_insertions.end());
+    executeInsertions(block->m_values, m_insertions);
+    m_bottomForType = TypeMap();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3InsertionSet.h b/Source/JavaScriptCore/b3/B3InsertionSet.h
new file mode 100644
index 000000000..1eb527287
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3InsertionSet.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+#include "B3Type.h"
+#include "B3TypeMap.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class Procedure;
+class Value;
+
+typedef WTF::Insertion Insertion;
+
+class InsertionSet {
+public:
+    InsertionSet(Procedure& procedure)
+        : m_procedure(procedure)
+    {
+    }
+
+    bool isEmpty() const { return m_insertions.isEmpty(); }
+
+    Procedure& code() { return m_procedure; }
+
+    void appendInsertion(const Insertion& insertion)
+    {
+        m_insertions.append(insertion);
+    }
+
+    Value* insertValue(size_t index, Value* value)
+    {
+        appendInsertion(Insertion(index, value));
+        return value;
+    }
+
+    template
+    ValueType* insert(size_t index, Arguments... arguments);
+
+    Value* insertIntConstant(size_t index, Origin, Type, int64_t value);
+    Value* insertIntConstant(size_t index, Value* likeValue, int64_t value);
+
+    Value* insertBottom(size_t index, Origin, Type);
+    Value* insertBottom(size_t index, Value*);
+
+    void execute(BasicBlock*);
+
+private:
+    Procedure& m_procedure;
+    Vector m_insertions;
+
+    TypeMap m_bottomForType;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3InsertionSetInlines.h b/Source/JavaScriptCore/b3/B3InsertionSetInlines.h
new file mode 100644
index 000000000..c5b03df03
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3InsertionSetInlines.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3InsertionSet.h"
+#include "B3ProcedureInlines.h"
+
+namespace JSC { namespace B3 {
+
+template
+ValueType* InsertionSet::insert(size_t index, Arguments... arguments)
+{
+    return static_cast(insertValue(index, m_procedure.add(arguments...)));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Kind.cpp b/Source/JavaScriptCore/b3/B3Kind.cpp
new file mode 100644
index 000000000..147ab23c6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Kind.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Kind.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 {
+
+void Kind::dump(PrintStream& out) const
+{
+    out.print(m_opcode);
+    
+    CommaPrinter comma(", ", "<");
+    if (isChill())
+        out.print(comma, "Chill");
+    if (traps())
+        out.print(comma, "Traps");
+    if (comma.didPrint())
+        out.print(">");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Kind.h b/Source/JavaScriptCore/b3/B3Kind.h
new file mode 100644
index 000000000..268c8e766
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Kind.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef B3Kind_h
+#define B3Kind_h
+
+#if ENABLE(B3_JIT)
+
+#include "B3Opcode.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+// A Kind is a terse summary of what a Value does. There is a fixed number of possible
+// Kinds. Kind is a tuple of Opcode (see B3Opcode.h) and some extra bits. Most opcodes don't
+// get any extra bits, and those bits must remain zero if the Kind's opcode field is set to
+// one of those opcodes. The purpose of Kind is to be like an opcode in other IRs, but to
+// be multidimensional. For example, a Load has many dimensions of customization that we may
+// eventually implement. A Load can have different alignments, alignment failure modes,
+// temporality modes, trapping modes, ordering modes, etc. It's fine to put such flags into
+// subclasses of Value, but in some cases that would be overkill, particularly since if you
+// did that for a pure value then you'd also have to thread it through ValueKey. It's much
+// easier to put it in Kind, and then your extra bit will get carried around by everyone who
+// knows how to carry around Kinds. Most importantly, putting flags into Kind allows you to
+// use them as part of B3::Value's dynamic cast facility. For example we could have a
+// trapping Load that uses a Value subclass that has a stackmap while non-trapping Loads
+// continue to use the normal MemoryValue.
+//
+// Note that any code in the compiler that transcribes IR (like a strength reduction that
+// replaces an Add with a different Add, or even with a different opcode entirely) will
+// probably drop unknown bits by default. This is definitely not correct for many bits (like
+// isChill for Div/Mod and all of the envisioned Load/Store flags), so if you add a new bit
+// you will probably have to audit the compiler to make sure that phases that transcribe
+// your opcode do the right thing with your bit.
+
+class Kind {
+public:
+    Kind(Opcode opcode)
+        : m_opcode(opcode)
+        , m_isChill(false)
+        , m_traps(false)
+    {
+    }
+    
+    Kind()
+        : Kind(Oops)
+    {
+    }
+    
+    Opcode opcode() const { return m_opcode; }
+    void setOpcode(Opcode opcode) { m_opcode = opcode; }
+    
+    bool hasExtraBits() const { return m_isChill || m_traps; }
+    
+    // Chill bit. This applies to division-based arithmetic ops, which may trap on some
+    // platforms or exhibit bizarre behavior when passed certain inputs. The non-chill
+    // version will behave as unpredictably as it wants. For example, it's legal to
+    // constant-fold Div(x, 0) to any value or to replace it with any effectful operation.
+    // But when it's chill, that means that the semantics when it would have trapped are
+    // the JS semantics. For example, Div(@a, @b) means:
+    //
+    //     ((a | 0) / (b | 0)) | 0
+    //
+    // And Mod(a, b) means:
+    //
+    //     ((a | 0) % (b | 0)) | 0
+    //
+    // Note that Div matches exactly how ARM handles integer division.
+    bool hasIsChill() const
+    {
+        switch (m_opcode) {
+        case Div:
+        case Mod:
+            return true;
+        default:
+            return false;
+        }
+    }
+    bool isChill() const
+    {
+        return m_isChill;
+    }
+    void setIsChill(bool isChill)
+    {
+        ASSERT(hasIsChill());
+        m_isChill = isChill;
+    }
+    
+    // Traps bit. This applies to memory access ops. It means that the instruction could
+    // trap as part of some check it performs, and that we mean to make this observable. This
+    // currently only applies to memory accesses (loads and stores). You don't get to find out where
+    // in the Procedure the trap happened. If you try to work it out using Origin, you'll have a bad
+    // time because the instruction selector is too sloppy with Origin().
+    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=162688
+    bool hasTraps() const
+    {
+        switch (m_opcode) {
+        case Load8Z:
+        case Load8S:
+        case Load16Z:
+        case Load16S:
+        case Load:
+        case Store8:
+        case Store16:
+        case Store:
+            return true;
+        default:
+            return false;
+        }
+    }
+    bool traps() const
+    {
+        return m_traps;
+    }
+    void setTraps(bool traps)
+    {
+        ASSERT(hasTraps());
+        m_traps = traps;
+    }
+    
+    // Rules for adding new properties:
+    // - Put the accessors here.
+    // - hasBlah() should check if the opcode allows for your property.
+    // - blah() returns a default value if !hasBlah()
+    // - setBlah() asserts if !hasBlah()
+    // - Try not to increase the size of Kind too much. But it wouldn't be the end of the
+    //   world if it bloated to 64 bits.
+    
+    bool operator==(const Kind& other) const
+    {
+        return m_opcode == other.m_opcode
+            && m_isChill == other.m_isChill
+            && m_traps == other.m_traps;
+    }
+    
+    bool operator!=(const Kind& other) const
+    {
+        return !(*this == other);
+    }
+    
+    void dump(PrintStream&) const;
+    
+    unsigned hash() const
+    {
+        // It's almost certainly more important that this hash function is cheap to compute than
+        // anything else. We can live with some kind hash collisions.
+        return m_opcode + (static_cast(m_isChill) << 16) + (static_cast(m_traps) << 7);
+    }
+    
+    Kind(WTF::HashTableDeletedValueType)
+        : m_opcode(Oops)
+        , m_isChill(true)
+        , m_traps(false)
+    {
+    }
+    
+    bool isHashTableDeletedValue() const
+    {
+        return *this == Kind(WTF::HashTableDeletedValue);
+    }
+    
+private:
+    Opcode m_opcode;
+    bool m_isChill : 1;
+    bool m_traps : 1;
+};
+
+// For every flag 'foo' you add, it's customary to create a Kind B3::foo(Kind) function that makes
+// a kind with the flag set. For example, for chill, this lets us say:
+//
+//     block->appendNew(m_proc, chill(Mod), Origin(), a, b);
+//
+// I like to make the flag name fill in the sentence "Mod _____" (like "isChill" or "traps") while
+// the flag constructor fills in the phrase "_____ Mod" (like "chill" or "trapping").
+
+inline Kind chill(Kind kind)
+{
+    kind.setIsChill(true);
+    return kind;
+}
+
+inline Kind trapping(Kind kind)
+{
+    kind.setTraps(true);
+    return kind;
+}
+
+struct KindHash {
+    static unsigned hash(const Kind& key) { return key.hash(); }
+    static bool equal(const Kind& a, const Kind& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::B3::KindHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : public SimpleClassHashTraits {
+    static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
+
+#endif // B3Kind_h
+
diff --git a/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.cpp b/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.cpp
new file mode 100644
index 000000000..8c17ff58e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3LegalizeMemoryOffsets.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class LegalizeMemoryOffsets {
+public:
+    LegalizeMemoryOffsets(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+    {
+    }
+
+    void run()
+    {
+        if (!isARM64())
+            return;
+
+        for (BasicBlock* block : m_proc) {
+            for (unsigned index = 0; index < block->size(); ++index) {
+                MemoryValue* memoryValue = block->at(index)->as();
+                if (!memoryValue)
+                    continue;
+
+                int32_t offset = memoryValue->offset();
+                Air::Arg::Width width = Air::Arg::widthForBytes(memoryValue->accessByteSize());
+                if (!Air::Arg::isValidAddrForm(offset, width)) {
+                    Value* base = memoryValue->lastChild();
+                    Value* offsetValue = m_insertionSet.insertIntConstant(index, memoryValue->origin(), pointerType(), offset);
+                    Value* resolvedAddress = m_proc.add(Add, memoryValue->origin(), base, offsetValue);
+                    m_insertionSet.insertValue(index, resolvedAddress);
+
+                    memoryValue->lastChild() = resolvedAddress;
+                    memoryValue->setOffset(0);
+                }
+            }
+            m_insertionSet.execute(block);
+        }
+    }
+
+    Procedure& m_proc;
+    InsertionSet m_insertionSet;
+};
+
+} // anonymous namespace
+
+void legalizeMemoryOffsets(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "legalizeMemoryOffsets");
+    LegalizeMemoryOffsets legalizeMemoryOffsets(proc);
+    legalizeMemoryOffsets.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.h b/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.h
new file mode 100644
index 000000000..c482ab230
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// If the offsets of a MemoryValue cannot be represented in the target instruction set,
+// compute it explicitly.
+void legalizeMemoryOffsets(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3LowerMacros.cpp b/Source/JavaScriptCore/b3/B3LowerMacros.cpp
new file mode 100644
index 000000000..68415108d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerMacros.cpp
@@ -0,0 +1,500 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3LowerMacros.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AllowMacroScratchRegisterUsage.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3CCallValue.h"
+#include "B3CaseCollectionInlines.h"
+#include "B3ConstPtrValue.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PatchpointValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "CCallHelpers.h"
+#include "LinkBuffer.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class LowerMacros {
+public:
+    LowerMacros(Procedure& proc)
+        : m_proc(proc)
+        , m_blockInsertionSet(proc)
+        , m_insertionSet(proc)
+    {
+    }
+
+    bool run()
+    {
+        for (BasicBlock* block : m_proc) {
+            m_block = block;
+            processCurrentBlock();
+        }
+        m_changed |= m_blockInsertionSet.execute();
+        if (m_changed) {
+            m_proc.resetReachability();
+            m_proc.invalidateCFG();
+        }
+        return m_changed;
+    }
+    
+private:
+    void processCurrentBlock()
+    {
+        for (m_index = 0; m_index < m_block->size(); ++m_index) {
+            m_value = m_block->at(m_index);
+            m_origin = m_value->origin();
+            switch (m_value->opcode()) {
+            case Mod: {
+                if (m_value->isChill()) {
+                    if (isARM64()) {
+                        BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet);
+                        BasicBlock* zeroDenCase = m_blockInsertionSet.insertBefore(m_block);
+                        BasicBlock* normalModCase = m_blockInsertionSet.insertBefore(m_block);
+
+                        before->replaceLastWithNew(m_proc, Branch, m_origin, m_value->child(1));
+                        before->setSuccessors(
+                            FrequentedBlock(normalModCase, FrequencyClass::Normal),
+                            FrequentedBlock(zeroDenCase, FrequencyClass::Rare));
+
+                        Value* divResult = normalModCase->appendNew(m_proc, chill(Div), m_origin, m_value->child(0), m_value->child(1));
+                        Value* multipliedBack = normalModCase->appendNew(m_proc, Mul, m_origin, divResult, m_value->child(1));
+                        Value* result = normalModCase->appendNew(m_proc, Sub, m_origin, m_value->child(0), multipliedBack);
+                        UpsilonValue* normalResult = normalModCase->appendNew(m_proc, m_origin, result);
+                        normalModCase->appendNew(m_proc, Jump, m_origin);
+                        normalModCase->setSuccessors(FrequentedBlock(m_block));
+
+                        UpsilonValue* zeroResult = zeroDenCase->appendNew(
+                            m_proc, m_origin,
+                            zeroDenCase->appendIntConstant(m_proc, m_value, 0));
+                        zeroDenCase->appendNew(m_proc, Jump, m_origin);
+                        zeroDenCase->setSuccessors(FrequentedBlock(m_block));
+
+                        Value* phi = m_insertionSet.insert(m_index, Phi, m_value->type(), m_origin);
+                        normalResult->setPhi(phi);
+                        zeroResult->setPhi(phi);
+                        m_value->replaceWithIdentity(phi);
+                        before->updatePredecessorsAfter();
+                        m_changed = true;
+                    } else
+                        makeDivisionChill(Mod);
+                    break;
+                }
+                
+                double (*fmodDouble)(double, double) = fmod;
+                if (m_value->type() == Double) {
+                    Value* functionAddress = m_insertionSet.insert(m_index, m_origin, fmodDouble);
+                    Value* result = m_insertionSet.insert(m_index, Double, m_origin,
+                        Effects::none(),
+                        functionAddress,
+                        m_value->child(0),
+                        m_value->child(1));
+                    m_value->replaceWithIdentity(result);
+                    m_changed = true;
+                } else if (m_value->type() == Float) {
+                    Value* numeratorAsDouble = m_insertionSet.insert(m_index, FloatToDouble, m_origin, m_value->child(0));
+                    Value* denominatorAsDouble = m_insertionSet.insert(m_index, FloatToDouble, m_origin, m_value->child(1));
+                    Value* functionAddress = m_insertionSet.insert(m_index, m_origin, fmodDouble);
+                    Value* doubleMod = m_insertionSet.insert(m_index, Double, m_origin,
+                        Effects::none(),
+                        functionAddress,
+                        numeratorAsDouble,
+                        denominatorAsDouble);
+                    Value* result = m_insertionSet.insert(m_index, DoubleToFloat, m_origin, doubleMod);
+                    m_value->replaceWithIdentity(result);
+                    m_changed = true;
+                } else if (isARM64()) {
+                    Value* divResult = m_insertionSet.insert(m_index, chill(Div), m_origin, m_value->child(0), m_value->child(1));
+                    Value* multipliedBack = m_insertionSet.insert(m_index, Mul, m_origin, divResult, m_value->child(1));
+                    Value* result = m_insertionSet.insert(m_index, Sub, m_origin, m_value->child(0), multipliedBack);
+                    m_value->replaceWithIdentity(result);
+                    m_changed = true;
+                }
+                break;
+            }
+
+            case UMod: {
+                if (isARM64()) {
+                    Value* divResult = m_insertionSet.insert(m_index, UDiv, m_origin, m_value->child(0), m_value->child(1));
+                    Value* multipliedBack = m_insertionSet.insert(m_index, Mul, m_origin, divResult, m_value->child(1));
+                    Value* result = m_insertionSet.insert(m_index, Sub, m_origin, m_value->child(0), multipliedBack);
+                    m_value->replaceWithIdentity(result);
+                    m_changed = true;
+                }
+                break;
+            }
+
+            case Div: {
+                if (m_value->isChill())
+                    makeDivisionChill(Div);
+                break;
+            }
+
+            case Switch: {
+                SwitchValue* switchValue = m_value->as();
+                Vector cases;
+                for (const SwitchCase& switchCase : switchValue->cases(m_block))
+                    cases.append(switchCase);
+                std::sort(
+                    cases.begin(), cases.end(),
+                    [] (const SwitchCase& left, const SwitchCase& right) {
+                        return left.caseValue() < right.caseValue();
+                    });
+                FrequentedBlock fallThrough = m_block->fallThrough();
+                m_block->values().removeLast();
+                recursivelyBuildSwitch(cases, fallThrough, 0, false, cases.size(), m_block);
+                m_proc.deleteValue(switchValue);
+                m_block->updatePredecessorsAfter();
+                m_changed = true;
+                break;
+            }
+
+            default:
+                break;
+            }
+        }
+        m_insertionSet.execute(m_block);
+    }
+
+    void makeDivisionChill(Opcode nonChillOpcode)
+    {
+        ASSERT(nonChillOpcode == Div || nonChillOpcode == Mod);
+
+        // ARM supports this instruction natively.
+        if (isARM64())
+            return;
+
+        // We implement "res = Div/Mod(num, den)" as follows:
+        //
+        //     if (den + 1 <=_unsigned 1) {
+        //         if (!den) {
+        //             res = 0;
+        //             goto done;
+        //         }
+        //         if (num == -2147483648) {
+        //             res = isDiv ? num : 0;
+        //             goto done;
+        //         }
+        //     }
+        //     res = num (/ or %) dev;
+        // done:
+        m_changed = true;
+
+        Value* num = m_value->child(0);
+        Value* den = m_value->child(1);
+
+        Value* one = m_insertionSet.insertIntConstant(m_index, m_value, 1);
+        Value* isDenOK = m_insertionSet.insert(
+            m_index, Above, m_origin,
+            m_insertionSet.insert(m_index, Add, m_origin, den, one),
+            one);
+
+        BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet);
+
+        BasicBlock* normalDivCase = m_blockInsertionSet.insertBefore(m_block);
+        BasicBlock* shadyDenCase = m_blockInsertionSet.insertBefore(m_block);
+        BasicBlock* zeroDenCase = m_blockInsertionSet.insertBefore(m_block);
+        BasicBlock* neg1DenCase = m_blockInsertionSet.insertBefore(m_block);
+        BasicBlock* intMinCase = m_blockInsertionSet.insertBefore(m_block);
+
+        before->replaceLastWithNew(m_proc, Branch, m_origin, isDenOK);
+        before->setSuccessors(
+            FrequentedBlock(normalDivCase, FrequencyClass::Normal),
+            FrequentedBlock(shadyDenCase, FrequencyClass::Rare));
+
+        UpsilonValue* normalResult = normalDivCase->appendNew(
+            m_proc, m_origin,
+            normalDivCase->appendNew(m_proc, nonChillOpcode, m_origin, num, den));
+        normalDivCase->appendNew(m_proc, Jump, m_origin);
+        normalDivCase->setSuccessors(FrequentedBlock(m_block));
+
+        shadyDenCase->appendNew(m_proc, Branch, m_origin, den);
+        shadyDenCase->setSuccessors(
+            FrequentedBlock(neg1DenCase, FrequencyClass::Normal),
+            FrequentedBlock(zeroDenCase, FrequencyClass::Rare));
+
+        UpsilonValue* zeroResult = zeroDenCase->appendNew(
+            m_proc, m_origin,
+            zeroDenCase->appendIntConstant(m_proc, m_value, 0));
+        zeroDenCase->appendNew(m_proc, Jump, m_origin);
+        zeroDenCase->setSuccessors(FrequentedBlock(m_block));
+
+        int64_t badNumeratorConst = 0;
+        switch (m_value->type()) {
+        case Int32:
+            badNumeratorConst = std::numeric_limits::min();
+            break;
+        case Int64:
+            badNumeratorConst = std::numeric_limits::min();
+            break;
+        default:
+            ASSERT_NOT_REACHED();
+            badNumeratorConst = 0;
+        }
+
+        Value* badNumerator =
+            neg1DenCase->appendIntConstant(m_proc, m_value, badNumeratorConst);
+
+        neg1DenCase->appendNew(
+            m_proc, Branch, m_origin,
+            neg1DenCase->appendNew(
+                m_proc, Equal, m_origin, num, badNumerator));
+        neg1DenCase->setSuccessors(
+            FrequentedBlock(intMinCase, FrequencyClass::Rare),
+            FrequentedBlock(normalDivCase, FrequencyClass::Normal));
+
+        Value* intMinResult = nonChillOpcode == Div ? badNumerator : intMinCase->appendIntConstant(m_proc, m_value, 0);
+        UpsilonValue* intMinResultUpsilon = intMinCase->appendNew(
+            m_proc, m_origin, intMinResult);
+        intMinCase->appendNew(m_proc, Jump, m_origin);
+        intMinCase->setSuccessors(FrequentedBlock(m_block));
+
+        Value* phi = m_insertionSet.insert(
+            m_index, Phi, m_value->type(), m_origin);
+        normalResult->setPhi(phi);
+        zeroResult->setPhi(phi);
+        intMinResultUpsilon->setPhi(phi);
+
+        m_value->replaceWithIdentity(phi);
+        before->updatePredecessorsAfter();
+    }
+
+    void recursivelyBuildSwitch(
+        const Vector& cases, FrequentedBlock fallThrough, unsigned start, bool hardStart,
+        unsigned end, BasicBlock* before)
+    {
+        Value* child = m_value->child(0);
+        Type type = child->type();
+        
+        // It's a good idea to use a table-based switch in some cases: the number of cases has to be
+        // large enough and they have to be dense enough. This could probably be improved a lot. For
+        // example, we could still use a jump table in cases where the inputs are sparse so long as we
+        // shift off the uninteresting bits. On the other hand, it's not clear that this would
+        // actually be any better than what we have done here and it's not clear that it would be
+        // better than a binary switch.
+        const unsigned minCasesForTable = 7;
+        const unsigned densityLimit = 4;
+        if (end - start >= minCasesForTable) {
+            int64_t firstValue = cases[start].caseValue();
+            int64_t lastValue = cases[end - 1].caseValue();
+            if ((lastValue - firstValue + 1) / (end - start) < densityLimit) {
+                BasicBlock* switchBlock = m_blockInsertionSet.insertAfter(m_block);
+                Value* index = before->appendNew(
+                    m_proc, Sub, m_origin, child,
+                    before->appendIntConstant(m_proc, m_origin, type, firstValue));
+                before->appendNew(
+                    m_proc, Branch, m_origin,
+                    before->appendNew(
+                        m_proc, Above, m_origin, index,
+                        before->appendIntConstant(m_proc, m_origin, type, lastValue - firstValue)));
+                before->setSuccessors(fallThrough, FrequentedBlock(switchBlock));
+                
+                size_t tableSize = lastValue - firstValue + 1;
+                
+                if (index->type() != pointerType() && index->type() == Int32)
+                    index = switchBlock->appendNew(m_proc, ZExt32, m_origin, index);
+                
+                PatchpointValue* patchpoint =
+                    switchBlock->appendNew(m_proc, Void, m_origin);
+
+                // Even though this loads from the jump table, the jump table is immutable. For the
+                // purpose of alias analysis, reading something immutable is like reading nothing.
+                patchpoint->effects = Effects();
+                patchpoint->effects.terminal = true;
+                
+                patchpoint->appendSomeRegister(index);
+                patchpoint->numGPScratchRegisters++;
+                // Technically, we don't have to clobber macro registers on X86_64. This is probably
+                // OK though.
+                patchpoint->clobber(RegisterSet::macroScratchRegisters());
+                
+                BitVector handledIndices;
+                for (unsigned i = start; i < end; ++i) {
+                    FrequentedBlock block = cases[i].target();
+                    int64_t value = cases[i].caseValue();
+                    switchBlock->appendSuccessor(block);
+                    size_t index = value - firstValue;
+                    ASSERT(!handledIndices.get(index));
+                    handledIndices.set(index);
+                }
+                
+                bool hasUnhandledIndex = false;
+                for (unsigned i = 0; i < tableSize; ++i) {
+                    if (!handledIndices.get(i)) {
+                        hasUnhandledIndex = true;
+                        break;
+                    }
+                }
+                
+                if (hasUnhandledIndex)
+                    switchBlock->appendSuccessor(fallThrough);
+
+                patchpoint->setGenerator(
+                    [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                        AllowMacroScratchRegisterUsage allowScratch(jit);
+                        
+                        MacroAssemblerCodePtr* jumpTable = static_cast(
+                            params.proc().addDataSection(sizeof(MacroAssemblerCodePtr) * tableSize));
+                        
+                        GPRReg index = params[0].gpr();
+                        GPRReg scratch = params.gpScratch(0);
+                        
+                        jit.move(CCallHelpers::TrustedImmPtr(jumpTable), scratch);
+                        jit.jump(CCallHelpers::BaseIndex(scratch, index, CCallHelpers::timesPtr()));
+                        
+                        // These labels are guaranteed to be populated before either late paths or
+                        // link tasks run.
+                        Vector> labels = params.successorLabels();
+                        
+                        jit.addLinkTask(
+                            [=] (LinkBuffer& linkBuffer) {
+                                if (hasUnhandledIndex) {
+                                    MacroAssemblerCodePtr fallThrough =
+                                        linkBuffer.locationOf(*labels.last());
+                                    for (unsigned i = tableSize; i--;)
+                                        jumpTable[i] = fallThrough;
+                                }
+                                
+                                unsigned labelIndex = 0;
+                                for (unsigned tableIndex : handledIndices) {
+                                    jumpTable[tableIndex] =
+                                        linkBuffer.locationOf(*labels[labelIndex++]);
+                                }
+                            });
+                    });
+                return;
+            }
+        }
+        
+        // See comments in jit/BinarySwitch.cpp for a justification of this algorithm. The only
+        // thing we do differently is that we don't use randomness.
+
+        const unsigned leafThreshold = 3;
+
+        unsigned size = end - start;
+
+        if (size <= leafThreshold) {
+            bool allConsecutive = false;
+
+            if ((hardStart || (start && cases[start - 1].caseValue() == cases[start].caseValue() - 1))
+                && end < cases.size()
+                && cases[end - 1].caseValue() == cases[end].caseValue() - 1) {
+                allConsecutive = true;
+                for (unsigned i = 0; i < size - 1; ++i) {
+                    if (cases[start + i].caseValue() + 1 != cases[start + i + 1].caseValue()) {
+                        allConsecutive = false;
+                        break;
+                    }
+                }
+            }
+
+            unsigned limit = allConsecutive ? size - 1 : size;
+            
+            for (unsigned i = 0; i < limit; ++i) {
+                BasicBlock* nextCheck = m_blockInsertionSet.insertAfter(m_block);
+                before->appendNew(
+                    m_proc, Branch, m_origin,
+                    before->appendNew(
+                        m_proc, Equal, m_origin, child,
+                        before->appendIntConstant(
+                            m_proc, m_origin, type,
+                            cases[start + i].caseValue())));
+                before->setSuccessors(cases[start + i].target(), FrequentedBlock(nextCheck));
+
+                before = nextCheck;
+            }
+
+            before->appendNew(m_proc, Jump, m_origin);
+            if (allConsecutive)
+                before->setSuccessors(cases[end - 1].target());
+            else
+                before->setSuccessors(fallThrough);
+            return;
+        }
+
+        unsigned medianIndex = (start + end) / 2;
+
+        BasicBlock* left = m_blockInsertionSet.insertAfter(m_block);
+        BasicBlock* right = m_blockInsertionSet.insertAfter(m_block);
+
+        before->appendNew(
+            m_proc, Branch, m_origin,
+            before->appendNew(
+                m_proc, LessThan, m_origin, child,
+                before->appendIntConstant(
+                    m_proc, m_origin, type,
+                    cases[medianIndex].caseValue())));
+        before->setSuccessors(FrequentedBlock(left), FrequentedBlock(right));
+
+        recursivelyBuildSwitch(cases, fallThrough, start, hardStart, medianIndex, left);
+        recursivelyBuildSwitch(cases, fallThrough, medianIndex, true, end, right);
+    }
+    
+    Procedure& m_proc;
+    BlockInsertionSet m_blockInsertionSet;
+    InsertionSet m_insertionSet;
+    BasicBlock* m_block;
+    unsigned m_index;
+    Value* m_value;
+    Origin m_origin;
+    bool m_changed { false };
+};
+
+bool lowerMacrosImpl(Procedure& proc)
+{
+    LowerMacros lowerMacros(proc);
+    return lowerMacros.run();
+}
+
+} // anonymous namespace
+
+bool lowerMacros(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "lowerMacros");
+    bool result = lowerMacrosImpl(proc);
+    if (shouldValidateIR())
+        RELEASE_ASSERT(!lowerMacrosImpl(proc));
+    return result;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3LowerMacros.h b/Source/JavaScriptCore/b3/B3LowerMacros.h
new file mode 100644
index 000000000..f9649e2c8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerMacros.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Lowers high-level operations that it's easier to deal with once they are broken up. Currently
+// this includes Switch and ChillDiv.
+
+bool lowerMacros(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.cpp b/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.cpp
new file mode 100644
index 000000000..dbe158b5c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3LowerMacrosAfterOptimizations.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3CCallValue.h"
+#include "B3ConstDoubleValue.h"
+#include "B3ConstFloatValue.h"
+#include "B3ConstPtrValue.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class LowerMacros {
+public:
+    LowerMacros(Procedure& proc)
+        : m_proc(proc)
+        , m_blockInsertionSet(proc)
+        , m_insertionSet(proc)
+    {
+    }
+
+    bool run()
+    {
+        for (BasicBlock* block : m_proc) {
+            m_block = block;
+            processCurrentBlock();
+        }
+        m_changed |= m_blockInsertionSet.execute();
+        if (m_changed) {
+            m_proc.resetReachability();
+            m_proc.invalidateCFG();
+        }
+        return m_changed;
+    }
+    
+private:
+    void processCurrentBlock()
+    {
+        for (m_index = 0; m_index < m_block->size(); ++m_index) {
+            m_value = m_block->at(m_index);
+            m_origin = m_value->origin();
+            switch (m_value->opcode()) {
+            case Abs: {
+                // ARM supports this instruction natively.
+                if (isARM64())
+                    break;
+
+                Value* mask = nullptr;
+                if (m_value->type() == Double)
+                    mask = m_insertionSet.insert(m_index, m_origin, bitwise_cast(~(1ll << 63)));
+                else if (m_value->type() == Float)
+                    mask = m_insertionSet.insert(m_index, m_origin, bitwise_cast(~(1 << 31)));
+                else
+                    RELEASE_ASSERT_NOT_REACHED();
+                Value* result = m_insertionSet.insert(m_index, BitAnd, m_origin, m_value->child(0), mask);
+                m_value->replaceWithIdentity(result);
+                break;
+            }
+            case Ceil: {
+                if (MacroAssembler::supportsFloatingPointRounding())
+                    break;
+
+                Value* functionAddress = nullptr;
+                if (m_value->type() == Double) {
+                    double (*ceilDouble)(double) = ceil;
+                    functionAddress = m_insertionSet.insert(m_index, m_origin, ceilDouble);
+                } else if (m_value->type() == Float)
+                    functionAddress = m_insertionSet.insert(m_index, m_origin, ceilf);
+                else
+                    RELEASE_ASSERT_NOT_REACHED();
+
+                Value* result = m_insertionSet.insert(m_index,
+                    m_value->type(),
+                    m_origin,
+                    Effects::none(),
+                    functionAddress,
+                    m_value->child(0));
+                m_value->replaceWithIdentity(result);
+                break;
+            }
+            case Floor: {
+                if (MacroAssembler::supportsFloatingPointRounding())
+                    break;
+
+                Value* functionAddress = nullptr;
+                if (m_value->type() == Double) {
+                    double (*floorDouble)(double) = floor;
+                    functionAddress = m_insertionSet.insert(m_index, m_origin, floorDouble);
+                } else if (m_value->type() == Float)
+                    functionAddress = m_insertionSet.insert(m_index, m_origin, floorf);
+                else
+                    RELEASE_ASSERT_NOT_REACHED();
+
+                Value* result = m_insertionSet.insert(m_index,
+                    m_value->type(),
+                    m_origin,
+                    Effects::none(),
+                    functionAddress,
+                    m_value->child(0));
+                m_value->replaceWithIdentity(result);
+                break;
+            }
+            case Neg: {
+                if (!isFloat(m_value->type()))
+                    break;
+                
+                // X86 is odd in that it requires this.
+                if (!isX86())
+                    break;
+
+                Value* mask = nullptr;
+                if (m_value->type() == Double)
+                    mask = m_insertionSet.insert(m_index, m_origin, -0.0);
+                else {
+                    RELEASE_ASSERT(m_value->type() == Float);
+                    mask = m_insertionSet.insert(m_index, m_origin, -0.0f);
+                }
+
+                Value* result = m_insertionSet.insert(
+                    m_index, BitXor, m_origin, m_value->child(0), mask);
+                m_value->replaceWithIdentity(result);
+                break;
+            }
+
+            case RotL: {
+                // ARM64 doesn't have a rotate left.
+                if (isARM64()) {
+                    if (isARM64()) {
+                        Value* newShift = m_insertionSet.insert(m_index, Neg, m_value->origin(), m_value->child(1));
+                        Value* rotate = m_insertionSet.insert(m_index, RotR, m_value->origin(), m_value->child(0), newShift);
+                        m_value->replaceWithIdentity(rotate);
+                        break;
+                    }
+                }
+                break;
+            }
+            default:
+                break;
+            }
+        }
+        m_insertionSet.execute(m_block);
+    }
+    
+    Procedure& m_proc;
+    BlockInsertionSet m_blockInsertionSet;
+    InsertionSet m_insertionSet;
+    BasicBlock* m_block;
+    unsigned m_index;
+    Value* m_value;
+    Origin m_origin;
+    bool m_changed { false };
+};
+
+bool lowerMacrosImpl(Procedure& proc)
+{
+    LowerMacros lowerMacros(proc);
+    return lowerMacros.run();
+}
+
+} // anonymous namespace
+
+bool lowerMacrosAfterOptimizations(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "lowerMacrosAfterOptimizations");
+    bool result = lowerMacrosImpl(proc);
+    if (shouldValidateIR())
+        RELEASE_ASSERT(!lowerMacrosImpl(proc));
+    return result;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.h b/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.h
new file mode 100644
index 000000000..f7b653665
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Lower certain high level opcodes to lower-level opcode to help code generation.
+
+bool lowerMacrosAfterOptimizations(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3LowerToAir.cpp b/Source/JavaScriptCore/b3/B3LowerToAir.cpp
new file mode 100644
index 000000000..29a4379dc
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerToAir.cpp
@@ -0,0 +1,2899 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3LowerToAir.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallSpecial.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirStackSlot.h"
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BlockWorklist.h"
+#include "B3CCallValue.h"
+#include "B3CheckSpecial.h"
+#include "B3Commutativity.h"
+#include "B3Dominators.h"
+#include "B3FenceValue.h"
+#include "B3MemoryValue.h"
+#include "B3PatchpointSpecial.h"
+#include "B3PatchpointValue.h"
+#include "B3PhaseScope.h"
+#include "B3PhiChildren.h"
+#include "B3Procedure.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3UpsilonValue.h"
+#include "B3UseCounts.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include "B3WasmAddressValue.h"
+#include 
+#include 
+#include 
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+namespace {
+
+const bool verbose = false;
+
+class LowerToAir {
+public:
+    LowerToAir(Procedure& procedure)
+        : m_valueToTmp(procedure.values().size())
+        , m_phiToTmp(procedure.values().size())
+        , m_blockToBlock(procedure.size())
+        , m_useCounts(procedure)
+        , m_phiChildren(procedure)
+        , m_dominators(procedure.dominators())
+        , m_procedure(procedure)
+        , m_code(procedure.code())
+    {
+    }
+
+    void run()
+    {
+        for (B3::BasicBlock* block : m_procedure)
+            m_blockToBlock[block] = m_code.addBlock(block->frequency());
+        
+        for (Value* value : m_procedure.values()) {
+            switch (value->opcode()) {
+            case Phi: {
+                m_phiToTmp[value] = m_code.newTmp(Arg::typeForB3Type(value->type()));
+                if (verbose)
+                    dataLog("Phi tmp for ", *value, ": ", m_phiToTmp[value], "\n");
+                break;
+            }
+            default:
+                break;
+            }
+        }
+
+        for (B3::StackSlot* stack : m_procedure.stackSlots())
+            m_stackToStack.add(stack, m_code.addStackSlot(stack));
+        for (Variable* variable : m_procedure.variables())
+            m_variableToTmp.add(variable, m_code.newTmp(Arg::typeForB3Type(variable->type())));
+
+        // Figure out which blocks are not rare.
+        m_fastWorklist.push(m_procedure[0]);
+        while (B3::BasicBlock* block = m_fastWorklist.pop()) {
+            for (B3::FrequentedBlock& successor : block->successors()) {
+                if (!successor.isRare())
+                    m_fastWorklist.push(successor.block());
+            }
+        }
+
+        m_procedure.resetValueOwners(); // Used by crossesInterference().
+
+        // Lower defs before uses on a global level. This is a good heuristic to lock down a
+        // hoisted address expression before we duplicate it back into the loop.
+        for (B3::BasicBlock* block : m_procedure.blocksInPreOrder()) {
+            m_block = block;
+            // Reset some state.
+            m_insts.resize(0);
+
+            m_isRare = !m_fastWorklist.saw(block);
+
+            if (verbose)
+                dataLog("Lowering Block ", *block, ":\n");
+            
+            // Process blocks in reverse order so we see uses before defs. That's what allows us
+            // to match patterns effectively.
+            for (unsigned i = block->size(); i--;) {
+                m_index = i;
+                m_value = block->at(i);
+                if (m_locked.contains(m_value))
+                    continue;
+                m_insts.append(Vector());
+                if (verbose)
+                    dataLog("Lowering ", deepDump(m_procedure, m_value), ":\n");
+                lower();
+                if (verbose) {
+                    for (Inst& inst : m_insts.last())
+                        dataLog("    ", inst, "\n");
+                }
+            }
+
+            // Now append the instructions. m_insts contains them in reverse order, so we process
+            // it in reverse.
+            for (unsigned i = m_insts.size(); i--;) {
+                for (Inst& inst : m_insts[i])
+                    m_blockToBlock[block]->appendInst(WTFMove(inst));
+            }
+
+            // Make sure that the successors are set up correctly.
+            for (B3::FrequentedBlock successor : block->successors()) {
+                m_blockToBlock[block]->successors().append(
+                    Air::FrequentedBlock(m_blockToBlock[successor.block()], successor.frequency()));
+            }
+        }
+
+        Air::InsertionSet insertionSet(m_code);
+        for (Inst& inst : m_prologue)
+            insertionSet.insertInst(0, WTFMove(inst));
+        insertionSet.execute(m_code[0]);
+    }
+
+private:
+    bool shouldCopyPropagate(Value* value)
+    {
+        switch (value->opcode()) {
+        case Trunc:
+        case Identity:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    class ArgPromise {
+        WTF_MAKE_NONCOPYABLE(ArgPromise);
+    public:
+        ArgPromise() { }
+
+        ArgPromise(const Arg& arg, Value* valueToLock = nullptr)
+            : m_arg(arg)
+            , m_value(valueToLock)
+        {
+        }
+        
+        void swap(ArgPromise& other)
+        {
+            std::swap(m_arg, other.m_arg);
+            std::swap(m_value, other.m_value);
+            std::swap(m_wasConsumed, other.m_wasConsumed);
+            std::swap(m_wasWrapped, other.m_wasWrapped);
+            std::swap(m_traps, other.m_traps);
+        }
+        
+        ArgPromise(ArgPromise&& other)
+        {
+            swap(other);
+        }
+        
+        ArgPromise& operator=(ArgPromise&& other)
+        {
+            swap(other);
+            return *this;
+        }
+        
+        ~ArgPromise()
+        {
+            if (m_wasConsumed)
+                RELEASE_ASSERT(m_wasWrapped);
+        }
+        
+        void setTraps(bool value)
+        {
+            m_traps = value;
+        }
+
+        static ArgPromise tmp(Value* value)
+        {
+            ArgPromise result;
+            result.m_value = value;
+            return result;
+        }
+
+        explicit operator bool() const { return m_arg || m_value; }
+
+        Arg::Kind kind() const
+        {
+            if (!m_arg && m_value)
+                return Arg::Tmp;
+            return m_arg.kind();
+        }
+
+        const Arg& peek() const
+        {
+            return m_arg;
+        }
+
+        Arg consume(LowerToAir& lower)
+        {
+            m_wasConsumed = true;
+            if (!m_arg && m_value)
+                return lower.tmp(m_value);
+            if (m_value)
+                lower.commitInternal(m_value);
+            return m_arg;
+        }
+        
+        template
+        Inst inst(Args&&... args)
+        {
+            Inst result(std::forward(args)...);
+            result.kind.traps |= m_traps;
+            m_wasWrapped = true;
+            return result;
+        }
+        
+    private:
+        // Three forms:
+        // Everything null: invalid.
+        // Arg non-null, value null: just use the arg, nothing special.
+        // Arg null, value non-null: it's a tmp, pin it when necessary.
+        // Arg non-null, value non-null: use the arg, lock the value.
+        Arg m_arg;
+        Value* m_value { nullptr };
+        bool m_wasConsumed { false };
+        bool m_wasWrapped { false };
+        bool m_traps { false };
+    };
+
+    // Consider using tmpPromise() in cases where you aren't sure that you want to pin the value yet.
+    // Here are three canonical ways of using tmp() and tmpPromise():
+    //
+    // Idiom #1: You know that you want a tmp() and you know that it will be valid for the
+    // instruction you're emitting.
+    //
+    //     append(Foo, tmp(bar));
+    //
+    // Idiom #2: You don't know if you want to use a tmp() because you haven't determined if the
+    // instruction will accept it, so you query first. Note that the call to tmp() happens only after
+    // you are sure that you will use it.
+    //
+    //     if (isValidForm(Foo, Arg::Tmp))
+    //         append(Foo, tmp(bar))
+    //
+    // Idiom #3: Same as Idiom #2, but using tmpPromise. Notice that this calls consume() only after
+    // it's sure it will use the tmp. That's deliberate. Also note that you're required to pass any
+    // Inst you create with consumed promises through that promise's inst() function.
+    //
+    //     ArgPromise promise = tmpPromise(bar);
+    //     if (isValidForm(Foo, promise.kind()))
+    //         append(promise.inst(Foo, promise.consume(*this)))
+    //
+    // In both idiom #2 and idiom #3, we don't pin the value to a temporary except when we actually
+    // emit the instruction. Both tmp() and tmpPromise().consume(*this) will pin it. Pinning means
+    // that we will henceforth require that the value of 'bar' is generated as a separate
+    // instruction. We don't want to pin the value to a temporary if we might change our minds, and
+    // pass an address operand representing 'bar' to Foo instead.
+    //
+    // Because tmp() pins, the following is not an idiom you should use:
+    //
+    //     Tmp tmp = this->tmp(bar);
+    //     if (isValidForm(Foo, tmp.kind()))
+    //         append(Foo, tmp);
+    //
+    // That's because if isValidForm() returns false, you will have already pinned the 'bar' to a
+    // temporary. You might later want to try to do something like loadPromise(), and that will fail.
+    // This arises in operations that have both a Addr,Tmp and Tmp,Addr forms. The following code
+    // seems right, but will actually fail to ever match the Tmp,Addr form because by then, the right
+    // value is already pinned.
+    //
+    //     auto tryThings = [this] (const Arg& left, const Arg& right) {
+    //         if (isValidForm(Foo, left.kind(), right.kind()))
+    //             return Inst(Foo, m_value, left, right);
+    //         return Inst();
+    //     };
+    //     if (Inst result = tryThings(loadAddr(left), tmp(right)))
+    //         return result;
+    //     if (Inst result = tryThings(tmp(left), loadAddr(right))) // this never succeeds.
+    //         return result;
+    //     return Inst(Foo, m_value, tmp(left), tmp(right));
+    //
+    // If you imagine that loadAddr(value) is just loadPromise(value).consume(*this), then this code
+    // will run correctly - it will generate OK code - but the second form is never matched.
+    // loadAddr(right) will never succeed because it will observe that 'right' is already pinned.
+    // Of course, it's exactly because of the risky nature of such code that we don't have a
+    // loadAddr() helper and require you to balance ArgPromise's in code like this. Such code will
+    // work fine if written as:
+    //
+    //     auto tryThings = [this] (ArgPromise& left, ArgPromise& right) {
+    //         if (isValidForm(Foo, left.kind(), right.kind()))
+    //             return left.inst(right.inst(Foo, m_value, left.consume(*this), right.consume(*this)));
+    //         return Inst();
+    //     };
+    //     if (Inst result = tryThings(loadPromise(left), tmpPromise(right)))
+    //         return result;
+    //     if (Inst result = tryThings(tmpPromise(left), loadPromise(right)))
+    //         return result;
+    //     return Inst(Foo, m_value, tmp(left), tmp(right));
+    //
+    // Notice that we did use tmp in the fall-back case at the end, because by then, we know for sure
+    // that we want a tmp. But using tmpPromise in the tryThings() calls ensures that doing so
+    // doesn't prevent us from trying loadPromise on the same value.
+    Tmp tmp(Value* value)
+    {
+        Tmp& tmp = m_valueToTmp[value];
+        if (!tmp) {
+            while (shouldCopyPropagate(value))
+                value = value->child(0);
+
+            if (value->opcode() == FramePointer)
+                return Tmp(GPRInfo::callFrameRegister);
+
+            Tmp& realTmp = m_valueToTmp[value];
+            if (!realTmp) {
+                realTmp = m_code.newTmp(Arg::typeForB3Type(value->type()));
+                if (m_procedure.isFastConstant(value->key()))
+                    m_code.addFastTmp(realTmp);
+                if (verbose)
+                    dataLog("Tmp for ", *value, ": ", realTmp, "\n");
+            }
+            tmp = realTmp;
+        }
+        return tmp;
+    }
+
+    ArgPromise tmpPromise(Value* value)
+    {
+        return ArgPromise::tmp(value);
+    }
+
+    bool canBeInternal(Value* value)
+    {
+        // If one of the internal things has already been computed, then we don't want to cause
+        // it to be recomputed again.
+        if (m_valueToTmp[value])
+            return false;
+        
+        // We require internals to have only one use - us. It's not clear if this should be numUses() or
+        // numUsingInstructions(). Ideally, it would be numUsingInstructions(), except that it's not clear
+        // if we'd actually do the right thing when matching over such a DAG pattern. For now, it simply
+        // doesn't matter because we don't implement patterns that would trigger this.
+        if (m_useCounts.numUses(value) != 1)
+            return false;
+
+        return true;
+    }
+
+    // If you ask canBeInternal() and then construct something from that, and you commit to emitting
+    // that code, then you must commitInternal() on that value. This is tricky, and you only need to
+    // do it if you're pattern matching by hand rather than using the patterns language. Long story
+    // short, you should avoid this by using the pattern matcher to match patterns.
+    void commitInternal(Value* value)
+    {
+        if (value)
+            m_locked.add(value);
+    }
+
+    bool crossesInterference(Value* value)
+    {
+        // If it's in a foreign block, then be conservative. We could handle this if we were
+        // willing to do heavier analysis. For example, if we had liveness, then we could label
+        // values as "crossing interference" if they interfere with anything that they are live
+        // across. But, it's not clear how useful this would be.
+        if (value->owner != m_value->owner)
+            return true;
+
+        Effects effects = value->effects();
+
+        for (unsigned i = m_index; i--;) {
+            Value* otherValue = m_block->at(i);
+            if (otherValue == value)
+                return false;
+            if (effects.interferes(otherValue->effects()))
+                return true;
+        }
+
+        ASSERT_NOT_REACHED();
+        return true;
+    }
+    
+    std::optional scaleForShl(Value* shl, int32_t offset, std::optional width = std::nullopt)
+    {
+        if (shl->opcode() != Shl)
+            return std::nullopt;
+        if (!shl->child(1)->hasInt32())
+            return std::nullopt;
+        unsigned logScale = shl->child(1)->asInt32();
+        if (shl->type() == Int32)
+            logScale &= 31;
+        else
+            logScale &= 63;
+        // Use 64-bit math to perform the shift so that <<32 does the right thing, but then switch
+        // to signed since that's what all of our APIs want.
+        int64_t bigScale = static_cast(1) << static_cast(logScale);
+        if (!isRepresentableAs(bigScale))
+            return std::nullopt;
+        unsigned scale = static_cast(bigScale);
+        if (!Arg::isValidIndexForm(scale, offset, width))
+            return std::nullopt;
+        return scale;
+    }
+
+    // This turns the given operand into an address.
+    Arg effectiveAddr(Value* address, int32_t offset, Arg::Width width)
+    {
+        ASSERT(Arg::isValidAddrForm(offset, width));
+
+        auto fallback = [&] () -> Arg {
+            return Arg::addr(tmp(address), offset);
+        };
+        
+        static const unsigned lotsOfUses = 10; // This is arbitrary and we should tune it eventually.
+
+        // Only match if the address value isn't used in some large number of places.
+        if (m_useCounts.numUses(address) > lotsOfUses)
+            return fallback();
+        
+        switch (address->opcode()) {
+        case Add: {
+            Value* left = address->child(0);
+            Value* right = address->child(1);
+
+            auto tryIndex = [&] (Value* index, Value* base) -> Arg {
+                std::optional scale = scaleForShl(index, offset, width);
+                if (!scale)
+                    return Arg();
+                if (m_locked.contains(index->child(0)) || m_locked.contains(base))
+                    return Arg();
+                return Arg::index(tmp(base), tmp(index->child(0)), *scale, offset);
+            };
+
+            if (Arg result = tryIndex(left, right))
+                return result;
+            if (Arg result = tryIndex(right, left))
+                return result;
+
+            if (m_locked.contains(left) || m_locked.contains(right)
+                || !Arg::isValidIndexForm(1, offset, width))
+                return fallback();
+            
+            return Arg::index(tmp(left), tmp(right), 1, offset);
+        }
+
+        case Shl: {
+            Value* left = address->child(0);
+
+            // We'll never see child(1)->isInt32(0), since that would have been reduced. If the shift
+            // amount is greater than 1, then there isn't really anything smart that we could do here.
+            // We avoid using baseless indexes because their encoding isn't particularly efficient.
+            if (m_locked.contains(left) || !address->child(1)->isInt32(1)
+                || !Arg::isValidIndexForm(1, offset, width))
+                return fallback();
+
+            return Arg::index(tmp(left), tmp(left), 1, offset);
+        }
+
+        case FramePointer:
+            return Arg::addr(Tmp(GPRInfo::callFrameRegister), offset);
+
+        case SlotBase:
+            return Arg::stack(m_stackToStack.get(address->as()->slot()), offset);
+
+        case WasmAddress: {
+            WasmAddressValue* wasmAddress = address->as();
+            Value* pointer = wasmAddress->child(0);
+            ASSERT(Arg::isValidIndexForm(1, offset, width));
+            if (m_locked.contains(pointer))
+                return fallback();
+
+            // FIXME: We should support ARM64 LDR 32-bit addressing, which will
+            // allow us to fuse a Shl ptr, 2 into the address. Additionally, and
+            // perhaps more importantly, it would allow us to avoid a truncating
+            // move. See: https://bugs.webkit.org/show_bug.cgi?id=163465
+
+            return Arg::index(Tmp(wasmAddress->pinnedGPR()), tmp(pointer), 1, offset);
+        }
+
+        default:
+            return fallback();
+        }
+    }
+
+    // This gives you the address of the given Load or Store. If it's not a Load or Store, then
+    // it returns Arg().
+    Arg addr(Value* memoryValue)
+    {
+        MemoryValue* value = memoryValue->as();
+        if (!value)
+            return Arg();
+
+        int32_t offset = value->offset();
+        Arg::Width width = Arg::widthForBytes(value->accessByteSize());
+
+        Arg result = effectiveAddr(value->lastChild(), offset, width);
+        ASSERT(result.isValidForm(width));
+
+        return result;
+    }
+    
+    template
+    Inst trappingInst(bool traps, Args&&... args)
+    {
+        Inst result(std::forward(args)...);
+        result.kind.traps |= traps;
+        return result;
+    }
+    
+    template
+    Inst trappingInst(Value* value, Args&&... args)
+    {
+        return trappingInst(value->traps(), std::forward(args)...);
+    }
+    
+    ArgPromise loadPromiseAnyOpcode(Value* loadValue)
+    {
+        if (!canBeInternal(loadValue))
+            return Arg();
+        if (crossesInterference(loadValue))
+            return Arg();
+        ArgPromise result(addr(loadValue), loadValue);
+        if (loadValue->traps())
+            result.setTraps(true);
+        return result;
+    }
+
+    ArgPromise loadPromise(Value* loadValue, B3::Opcode loadOpcode)
+    {
+        if (loadValue->opcode() != loadOpcode)
+            return Arg();
+        return loadPromiseAnyOpcode(loadValue);
+    }
+
+    ArgPromise loadPromise(Value* loadValue)
+    {
+        return loadPromise(loadValue, Load);
+    }
+
+    Arg imm(int64_t intValue)
+    {
+        if (Arg::isValidImmForm(intValue))
+            return Arg::imm(intValue);
+        return Arg();
+    }
+
+    Arg imm(Value* value)
+    {
+        if (value->hasInt())
+            return imm(value->asInt());
+        return Arg();
+    }
+
+    Arg bitImm(Value* value)
+    {
+        if (value->hasInt()) {
+            int64_t intValue = value->asInt();
+            if (Arg::isValidBitImmForm(intValue))
+                return Arg::bitImm(intValue);
+        }
+        return Arg();
+    }
+
+    Arg bitImm64(Value* value)
+    {
+        if (value->hasInt()) {
+            int64_t intValue = value->asInt();
+            if (Arg::isValidBitImm64Form(intValue))
+                return Arg::bitImm64(intValue);
+        }
+        return Arg();
+    }
+
+    Arg immOrTmp(Value* value)
+    {
+        if (Arg result = imm(value))
+            return result;
+        return tmp(value);
+    }
+
+    // By convention, we use Oops to mean "I don't know".
+    Air::Opcode tryOpcodeForType(
+        Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type)
+    {
+        Air::Opcode opcode;
+        switch (type) {
+        case Int32:
+            opcode = opcode32;
+            break;
+        case Int64:
+            opcode = opcode64;
+            break;
+        case Float:
+            opcode = opcodeFloat;
+            break;
+        case Double:
+            opcode = opcodeDouble;
+            break;
+        default:
+            opcode = Air::Oops;
+            break;
+        }
+
+        return opcode;
+    }
+
+    Air::Opcode tryOpcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type)
+    {
+        return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type);
+    }
+
+    Air::Opcode opcodeForType(
+        Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type)
+    {
+        Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, type);
+        RELEASE_ASSERT(opcode != Air::Oops);
+        return opcode;
+    }
+
+    Air::Opcode opcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type)
+    {
+        return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type);
+    }
+
+    template
+    void appendUnOp(Value* value)
+    {
+        Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, value->type());
+        
+        Tmp result = tmp(m_value);
+
+        // Two operand forms like:
+        //     Op a, b
+        // mean something like:
+        //     b = Op a
+
+        ArgPromise addr = loadPromise(value);
+        if (isValidForm(opcode, addr.kind(), Arg::Tmp)) {
+            append(addr.inst(opcode, m_value, addr.consume(*this), result));
+            return;
+        }
+
+        if (isValidForm(opcode, Arg::Tmp, Arg::Tmp)) {
+            append(opcode, tmp(value), result);
+            return;
+        }
+
+        ASSERT(value->type() == m_value->type());
+        append(relaxedMoveForType(m_value->type()), tmp(value), result);
+        append(opcode, result);
+    }
+
+    // Call this method when doing two-operand lowering of a commutative operation. You have a choice of
+    // which incoming Value is moved into the result. This will select which one is likely to be most
+    // profitable to use as the result. Doing the right thing can have big performance consequences in tight
+    // kernels.
+    bool preferRightForResult(Value* left, Value* right)
+    {
+        // The default is to move left into result, because that's required for non-commutative instructions.
+        // The value that we want to move into result position is the one that dies here. So, if we're
+        // compiling a commutative operation and we know that actually right is the one that dies right here,
+        // then we can flip things around to help coalescing, which then kills the move instruction.
+        //
+        // But it's more complicated:
+        // - Used-once is a bad estimate of whether the variable dies here.
+        // - A child might be a candidate for coalescing with this value.
+        //
+        // Currently, we have machinery in place to recognize super obvious forms of the latter issue.
+        
+        // We recognize when a child is a Phi that has this value as one of its children. We're very
+        // conservative about this; for example we don't even consider transitive Phi children.
+        bool leftIsPhiWithThis = m_phiChildren[left].transitivelyUses(m_value);
+        bool rightIsPhiWithThis = m_phiChildren[right].transitivelyUses(m_value);
+
+        if (leftIsPhiWithThis != rightIsPhiWithThis)
+            return rightIsPhiWithThis;
+
+        if (m_useCounts.numUsingInstructions(right) != 1)
+            return false;
+        
+        if (m_useCounts.numUsingInstructions(left) != 1)
+            return true;
+
+        // The use count might be 1 if the variable is live around a loop. We can guarantee that we
+        // pick the the variable that is least likely to suffer this problem if we pick the one that
+        // is closest to us in an idom walk. By convention, we slightly bias this in favor of
+        // returning true.
+
+        // We cannot prefer right if right is further away in an idom walk.
+        if (m_dominators.strictlyDominates(right->owner, left->owner))
+            return false;
+
+        return true;
+    }
+
+    template
+    void appendBinOp(Value* left, Value* right)
+    {
+        Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, left->type());
+        
+        Tmp result = tmp(m_value);
+        
+        // Three-operand forms like:
+        //     Op a, b, c
+        // mean something like:
+        //     c = a Op b
+
+        if (isValidForm(opcode, Arg::Imm, Arg::Tmp, Arg::Tmp)) {
+            if (commutativity == Commutative) {
+                if (imm(right)) {
+                    append(opcode, imm(right), tmp(left), result);
+                    return;
+                }
+            } else {
+                // A non-commutative operation could have an immediate in left.
+                if (imm(left)) {
+                    append(opcode, imm(left), tmp(right), result);
+                    return;
+                }
+            }
+        }
+
+        if (isValidForm(opcode, Arg::BitImm, Arg::Tmp, Arg::Tmp)) {
+            if (commutativity == Commutative) {
+                if (Arg rightArg = bitImm(right)) {
+                    append(opcode, rightArg, tmp(left), result);
+                    return;
+                }
+            } else {
+                // A non-commutative operation could have an immediate in left.
+                if (Arg leftArg = bitImm(left)) {
+                    append(opcode, leftArg, tmp(right), result);
+                    return;
+                }
+            }
+        }
+
+        if (isValidForm(opcode, Arg::BitImm64, Arg::Tmp, Arg::Tmp)) {
+            if (commutativity == Commutative) {
+                if (Arg rightArg = bitImm64(right)) {
+                    append(opcode, rightArg, tmp(left), result);
+                    return;
+                }
+            } else {
+                // A non-commutative operation could have an immediate in left.
+                if (Arg leftArg = bitImm64(left)) {
+                    append(opcode, leftArg, tmp(right), result);
+                    return;
+                }
+            }
+        }
+
+        if (imm(right) && isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
+            append(opcode, tmp(left), imm(right), result);
+            return;
+        }
+
+        // Note that no extant architecture has a three-operand form of binary operations that also
+        // load from memory. If such an abomination did exist, we would handle it somewhere around
+        // here.
+
+        // Two-operand forms like:
+        //     Op a, b
+        // mean something like:
+        //     b = b Op a
+
+        // At this point, we prefer versions of the operation that have a fused load or an immediate
+        // over three operand forms.
+
+        if (left != right) {
+            ArgPromise leftAddr = loadPromise(left);
+            if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp, Arg::Tmp)) {
+                append(leftAddr.inst(opcode, m_value, leftAddr.consume(*this), tmp(right), result));
+                return;
+            }
+
+            if (commutativity == Commutative) {
+                if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp)) {
+                    append(relaxedMoveForType(m_value->type()), tmp(right), result);
+                    append(leftAddr.inst(opcode, m_value, leftAddr.consume(*this), result));
+                    return;
+                }
+            }
+
+            ArgPromise rightAddr = loadPromise(right);
+            if (isValidForm(opcode, Arg::Tmp, rightAddr.kind(), Arg::Tmp)) {
+                append(rightAddr.inst(opcode, m_value, tmp(left), rightAddr.consume(*this), result));
+                return;
+            }
+
+            if (commutativity == Commutative) {
+                if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp, Arg::Tmp)) {
+                    append(rightAddr.inst(opcode, m_value, rightAddr.consume(*this), tmp(left), result));
+                    return;
+                }
+            }
+
+            if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp)) {
+                append(relaxedMoveForType(m_value->type()), tmp(left), result);
+                append(rightAddr.inst(opcode, m_value, rightAddr.consume(*this), result));
+                return;
+            }
+        }
+
+        if (imm(right) && isValidForm(opcode, Arg::Imm, Arg::Tmp)) {
+            append(relaxedMoveForType(m_value->type()), tmp(left), result);
+            append(opcode, imm(right), result);
+            return;
+        }
+
+        if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+            append(opcode, tmp(left), tmp(right), result);
+            return;
+        }
+
+        if (commutativity == Commutative && preferRightForResult(left, right)) {
+            append(relaxedMoveForType(m_value->type()), tmp(right), result);
+            append(opcode, tmp(left), result);
+            return;
+        }
+        
+        append(relaxedMoveForType(m_value->type()), tmp(left), result);
+        append(opcode, tmp(right), result);
+    }
+
+    template
+    void appendBinOp(Value* left, Value* right)
+    {
+        appendBinOp(left, right);
+    }
+
+    template
+    void appendShift(Value* value, Value* amount)
+    {
+        Air::Opcode opcode = opcodeForType(opcode32, opcode64, value->type());
+        
+        if (imm(amount)) {
+            if (isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
+                append(opcode, tmp(value), imm(amount), tmp(m_value));
+                return;
+            }
+            if (isValidForm(opcode, Arg::Imm, Arg::Tmp)) {
+                append(Move, tmp(value), tmp(m_value));
+                append(opcode, imm(amount), tmp(m_value));
+                return;
+            }
+        }
+
+        if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+            append(opcode, tmp(value), tmp(amount), tmp(m_value));
+            return;
+        }
+
+#if CPU(X86) || CPU(X86_64)
+        append(Move, tmp(value), tmp(m_value));
+        append(Move, tmp(amount), Tmp(X86Registers::ecx));
+        append(opcode, Tmp(X86Registers::ecx), tmp(m_value));
+#endif
+    }
+
+    template
+    bool tryAppendStoreUnOp(Value* value)
+    {
+        Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, value->type());
+        if (opcode == Air::Oops)
+            return false;
+        
+        Arg storeAddr = addr(m_value);
+        ASSERT(storeAddr);
+
+        ArgPromise loadPromise = this->loadPromise(value);
+        if (loadPromise.peek() != storeAddr)
+            return false;
+
+        if (!isValidForm(opcode, storeAddr.kind()))
+            return false;
+        
+        loadPromise.consume(*this);
+        append(trappingInst(m_value, loadPromise.inst(opcode, m_value, storeAddr)));
+        return true;
+    }
+
+    template<
+        Air::Opcode opcode32, Air::Opcode opcode64, Commutativity commutativity = NotCommutative>
+    bool tryAppendStoreBinOp(Value* left, Value* right)
+    {
+        Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, left->type());
+        if (opcode == Air::Oops)
+            return false;
+        
+        Arg storeAddr = addr(m_value);
+        ASSERT(storeAddr);
+
+        auto getLoadPromise = [&] (Value* load) -> ArgPromise {
+            switch (m_value->opcode()) {
+            case B3::Store:
+                if (load->opcode() != B3::Load)
+                    return ArgPromise();
+                break;
+            case B3::Store8:
+                if (load->opcode() != B3::Load8Z && load->opcode() != B3::Load8S)
+                    return ArgPromise();
+                break;
+            case B3::Store16:
+                if (load->opcode() != B3::Load16Z && load->opcode() != B3::Load16S)
+                    return ArgPromise();
+                break;
+            default:
+                return ArgPromise();
+            }
+            return loadPromiseAnyOpcode(load);
+        };
+        
+        ArgPromise loadPromise;
+        Value* otherValue = nullptr;
+
+        loadPromise = getLoadPromise(left);
+        if (loadPromise.peek() == storeAddr)
+            otherValue = right;
+        else if (commutativity == Commutative) {
+            loadPromise = getLoadPromise(right);
+            if (loadPromise.peek() == storeAddr)
+                otherValue = left;
+        }
+
+        if (!otherValue)
+            return false;
+
+        if (isValidForm(opcode, Arg::Imm, storeAddr.kind()) && imm(otherValue)) {
+            loadPromise.consume(*this);
+            append(trappingInst(m_value, loadPromise.inst(opcode, m_value, imm(otherValue), storeAddr)));
+            return true;
+        }
+
+        if (!isValidForm(opcode, Arg::Tmp, storeAddr.kind()))
+            return false;
+
+        loadPromise.consume(*this);
+        append(trappingInst(m_value, loadPromise.inst(opcode, m_value, tmp(otherValue), storeAddr)));
+        return true;
+    }
+
+    Inst createStore(Air::Opcode move, Value* value, const Arg& dest)
+    {
+        if (imm(value) && isValidForm(move, Arg::Imm, dest.kind()))
+            return Inst(move, m_value, imm(value), dest);
+
+        return Inst(move, m_value, tmp(value), dest);
+    }
+
+    Inst createStore(Value* value, const Arg& dest)
+    {
+        Air::Opcode moveOpcode = moveForType(value->type());
+        return createStore(moveOpcode, value, dest);
+    }
+
+    template
+    void appendStore(Args&&... args)
+    {
+        append(trappingInst(m_value, createStore(std::forward(args)...)));
+    }
+
+    Air::Opcode moveForType(Type type)
+    {
+        switch (type) {
+        case Int32:
+            return Move32;
+        case Int64:
+            RELEASE_ASSERT(is64Bit());
+            return Move;
+        case Float:
+            return MoveFloat;
+        case Double:
+            return MoveDouble;
+        case Void:
+            break;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return Air::Oops;
+    }
+
+    Air::Opcode relaxedMoveForType(Type type)
+    {
+        switch (type) {
+        case Int32:
+        case Int64:
+            // For Int32, we could return Move or Move32. It's a trade-off.
+            //
+            // Move32: Using Move32 guarantees that we use the narrower move, but in cases where the
+            //     register allocator can't prove that the variables involved are 32-bit, this will
+            //     disable coalescing.
+            //
+            // Move: Using Move guarantees that the register allocator can coalesce normally, but in
+            //     cases where it can't prove that the variables are 32-bit and it doesn't coalesce,
+            //     this will force us to use a full 64-bit Move instead of the slightly cheaper
+            //     32-bit Move32.
+            //
+            // Coalescing is a lot more profitable than turning Move into Move32. So, it's better to
+            // use Move here because in cases where the register allocator cannot prove that
+            // everything is 32-bit, we still get coalescing.
+            return Move;
+        case Float:
+            // MoveFloat is always coalescable and we never convert MoveDouble to MoveFloat, so we
+            // should use MoveFloat when we know that the temporaries involved are 32-bit.
+            return MoveFloat;
+        case Double:
+            return MoveDouble;
+        case Void:
+            break;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return Air::Oops;
+    }
+
+    template
+    void append(Air::Opcode opcode, Arguments&&... arguments)
+    {
+        m_insts.last().append(Inst(opcode, m_value, std::forward(arguments)...));
+    }
+    
+    void append(Inst&& inst)
+    {
+        m_insts.last().append(WTFMove(inst));
+    }
+    void append(const Inst& inst)
+    {
+        m_insts.last().append(inst);
+    }
+
+    template
+    T* ensureSpecial(T*& field, Arguments&&... arguments)
+    {
+        if (!field) {
+            field = static_cast(
+                m_code.addSpecial(std::make_unique(std::forward(arguments)...)));
+        }
+        return field;
+    }
+
+    template
+    CheckSpecial* ensureCheckSpecial(Arguments&&... arguments)
+    {
+        CheckSpecial::Key key(std::forward(arguments)...);
+        auto result = m_checkSpecials.add(key, nullptr);
+        return ensureSpecial(result.iterator->value, key);
+    }
+
+    void fillStackmap(Inst& inst, StackmapValue* stackmap, unsigned numSkipped)
+    {
+        for (unsigned i = numSkipped; i < stackmap->numChildren(); ++i) {
+            ConstrainedValue value = stackmap->constrainedChild(i);
+
+            Arg arg;
+            switch (value.rep().kind()) {
+            case ValueRep::WarmAny:
+            case ValueRep::ColdAny:
+            case ValueRep::LateColdAny:
+                if (imm(value.value()))
+                    arg = imm(value.value());
+                else if (value.value()->hasInt64())
+                    arg = Arg::bigImm(value.value()->asInt64());
+                else if (value.value()->hasDouble() && canBeInternal(value.value())) {
+                    commitInternal(value.value());
+                    arg = Arg::bigImm(bitwise_cast(value.value()->asDouble()));
+                } else
+                    arg = tmp(value.value());
+                break;
+            case ValueRep::SomeRegister:
+                arg = tmp(value.value());
+                break;
+            case ValueRep::LateRegister:
+            case ValueRep::Register:
+                stackmap->earlyClobbered().clear(value.rep().reg());
+                arg = Tmp(value.rep().reg());
+                append(relaxedMoveForType(value.value()->type()), immOrTmp(value.value()), arg);
+                break;
+            case ValueRep::StackArgument:
+                arg = Arg::callArg(value.rep().offsetFromSP());
+                appendStore(value.value(), arg);
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+            inst.args.append(arg);
+        }
+    }
+    
+    // Create an Inst to do the comparison specified by the given value.
+    template
+    Inst createGenericCompare(
+        Value* value,
+        const CompareFunctor& compare, // Signature: (Arg::Width, Arg relCond, Arg, Arg) -> Inst
+        const TestFunctor& test, // Signature: (Arg::Width, Arg resCond, Arg, Arg) -> Inst
+        const CompareDoubleFunctor& compareDouble, // Signature: (Arg doubleCond, Arg, Arg) -> Inst
+        const CompareFloatFunctor& compareFloat, // Signature: (Arg doubleCond, Arg, Arg) -> Inst
+        bool inverted = false)
+    {
+        // NOTE: This is totally happy to match comparisons that have already been computed elsewhere
+        // since on most architectures, the cost of branching on a previously computed comparison
+        // result is almost always higher than just doing another fused compare/branch. The only time
+        // it could be worse is if we have a binary comparison and both operands are variables (not
+        // constants), and we encounter register pressure. Even in this case, duplicating the compare
+        // so that we can fuse it to the branch will be more efficient most of the time, since
+        // register pressure is not *that* common. For this reason, this algorithm will always
+        // duplicate the comparison.
+        //
+        // However, we cannot duplicate loads. The canBeInternal() on a load will assume that we
+        // already validated canBeInternal() on all of the values that got us to the load. So, even
+        // if we are sharing a value, we still need to call canBeInternal() for the purpose of
+        // tracking whether we are still in good shape to fuse loads.
+        //
+        // We could even have a chain of compare values that we fuse, and any member of the chain
+        // could be shared. Once any of them are shared, then the shared one's transitive children
+        // cannot be locked (i.e. commitInternal()). But if none of them are shared, then we want to
+        // lock all of them because that's a prerequisite to fusing the loads so that the loads don't
+        // get duplicated. For example, we might have: 
+        //
+        //     @tmp1 = LessThan(@a, @b)
+        //     @tmp2 = Equal(@tmp1, 0)
+        //     Branch(@tmp2)
+        //
+        // If either @a or @b are loads, then we want to have locked @tmp1 and @tmp2 so that they
+        // don't emit the loads a second time. But if we had another use of @tmp2, then we cannot
+        // lock @tmp1 (or @a or @b) because then we'll get into trouble when the other values that
+        // try to share @tmp1 with us try to do their lowering.
+        //
+        // There's one more wrinkle. If we don't lock an internal value, then this internal value may
+        // have already separately locked its children. So, if we're not locking a value then we need
+        // to make sure that its children aren't locked. We encapsulate this in two ways:
+        //
+        // canCommitInternal: This variable tells us if the values that we've fused so far are
+        // locked. This means that we're not sharing any of them with anyone. This permits us to fuse
+        // loads. If it's false, then we cannot fuse loads and we also need to ensure that the
+        // children of any values we try to fuse-by-sharing are not already locked. You don't have to
+        // worry about the children locking thing if you use prepareToFuse() before trying to fuse a
+        // sharable value. But, you do need to guard any load fusion by checking if canCommitInternal
+        // is true.
+        //
+        // FusionResult prepareToFuse(value): Call this when you think that you would like to fuse
+        // some value and that value is not a load. It will automatically handle the shared-or-locked
+        // issues and it will clear canCommitInternal if necessary. This will return CannotFuse
+        // (which acts like false) if the value cannot be locked and its children are locked. That's
+        // rare, but you just need to make sure that you do smart things when this happens (i.e. just
+        // use the value rather than trying to fuse it). After you call prepareToFuse(), you can
+        // still change your mind about whether you will actually fuse the value. If you do fuse it,
+        // you need to call commitFusion(value, fusionResult).
+        //
+        // commitFusion(value, fusionResult): Handles calling commitInternal(value) if fusionResult
+        // is FuseAndCommit.
+        
+        bool canCommitInternal = true;
+
+        enum FusionResult {
+            CannotFuse,
+            FuseAndCommit,
+            Fuse
+        };
+        auto prepareToFuse = [&] (Value* value) -> FusionResult {
+            if (value == m_value) {
+                // It's not actually internal. It's the root value. We're good to go.
+                return Fuse;
+            }
+
+            if (canCommitInternal && canBeInternal(value)) {
+                // We are the only users of this value. This also means that the value's children
+                // could not have been locked, since we have now proved that m_value dominates value
+                // in the data flow graph. To only other way to value is from a user of m_value. If
+                // value's children are shared with others, then they could not have been locked
+                // because their use count is greater than 1. If they are only used from value, then
+                // in order for value's children to be locked, value would also have to be locked,
+                // and we just proved that it wasn't.
+                return FuseAndCommit;
+            }
+
+            // We're going to try to share value with others. It's possible that some other basic
+            // block had already emitted code for value and then matched over its children and then
+            // locked them, in which case we just want to use value instead of duplicating it. So, we
+            // validate the children. Note that this only arises in linear chains like:
+            //
+            //     BB#1:
+            //         @1 = Foo(...)
+            //         @2 = Bar(@1)
+            //         Jump(#2)
+            //     BB#2:
+            //         @3 = Baz(@2)
+            //
+            // Notice how we could start by generating code for BB#1 and then decide to lock @1 when
+            // generating code for @2, if we have some way of fusing Bar and Foo into a single
+            // instruction. This is legal, since indeed @1 only has one user. The fact that @2 now
+            // has a tmp (i.e. @2 is pinned), canBeInternal(@2) will return false, which brings us
+            // here. In that case, we cannot match over @2 because then we'd hit a hazard if we end
+            // up deciding not to fuse Foo into the fused Baz/Bar.
+            //
+            // Happily, there are only two places where this kind of child validation happens is in
+            // rules that admit sharing, like this and effectiveAddress().
+            //
+            // N.B. We could probably avoid the need to do value locking if we committed to a well
+            // chosen code generation order. For example, if we guaranteed that all of the users of
+            // a value get generated before that value, then there's no way for the lowering of @3 to
+            // see @1 locked. But we don't want to do that, since this is a greedy instruction
+            // selector and so we want to be able to play with order.
+            for (Value* child : value->children()) {
+                if (m_locked.contains(child))
+                    return CannotFuse;
+            }
+
+            // It's safe to share value, but since we're sharing, it means that we aren't locking it.
+            // If we don't lock it, then fusing loads is off limits and all of value's children will
+            // have to go through the sharing path as well.
+            canCommitInternal = false;
+            
+            return Fuse;
+        };
+
+        auto commitFusion = [&] (Value* value, FusionResult result) {
+            if (result == FuseAndCommit)
+                commitInternal(value);
+        };
+        
+        // Chew through any inversions. This loop isn't necessary for comparisons and branches, but
+        // we do need at least one iteration of it for Check.
+        for (;;) {
+            bool shouldInvert =
+                (value->opcode() == BitXor && value->child(1)->hasInt() && (value->child(1)->asInt() & 1) && value->child(0)->returnsBool())
+                || (value->opcode() == Equal && value->child(1)->isInt(0));
+            if (!shouldInvert)
+                break;
+
+            FusionResult fusionResult = prepareToFuse(value);
+            if (fusionResult == CannotFuse)
+                break;
+            commitFusion(value, fusionResult);
+            
+            value = value->child(0);
+            inverted = !inverted;
+        }
+
+        auto createRelCond = [&] (
+            MacroAssembler::RelationalCondition relationalCondition,
+            MacroAssembler::DoubleCondition doubleCondition) {
+            Arg relCond = Arg::relCond(relationalCondition).inverted(inverted);
+            Arg doubleCond = Arg::doubleCond(doubleCondition).inverted(inverted);
+            Value* left = value->child(0);
+            Value* right = value->child(1);
+
+            if (isInt(value->child(0)->type())) {
+                // FIXME: We wouldn't have to worry about leftImm if we canonicalized integer
+                // comparisons.
+                // https://bugs.webkit.org/show_bug.cgi?id=150958
+                
+                Arg leftImm = imm(left);
+                Arg rightImm = imm(right);
+
+                auto tryCompare = [&] (
+                    Arg::Width width, ArgPromise&& left, ArgPromise&& right) -> Inst {
+                    if (Inst result = compare(width, relCond, left, right))
+                        return result;
+                    if (Inst result = compare(width, relCond.flipped(), right, left))
+                        return result;
+                    return Inst();
+                };
+
+                auto tryCompareLoadImm = [&] (
+                    Arg::Width width, B3::Opcode loadOpcode, Arg::Signedness signedness) -> Inst {
+                    if (rightImm && rightImm.isRepresentableAs(width, signedness)) {
+                        if (Inst result = tryCompare(width, loadPromise(left, loadOpcode), rightImm)) {
+                            commitInternal(left);
+                            return result;
+                        }
+                    }
+                    if (leftImm && leftImm.isRepresentableAs(width, signedness)) {
+                        if (Inst result = tryCompare(width, leftImm, loadPromise(right, loadOpcode))) {
+                            commitInternal(right);
+                            return result;
+                        }
+                    }
+                    return Inst();
+                };
+
+                Arg::Width width = Arg::widthForB3Type(value->child(0)->type());
+                
+                if (canCommitInternal) {
+                    // First handle compares that involve fewer bits than B3's type system supports.
+                    // This is pretty important. For example, we want this to be a single
+                    // instruction:
+                    //
+                    //     @1 = Load8S(...)
+                    //     @2 = Const32(...)
+                    //     @3 = LessThan(@1, @2)
+                    //     Branch(@3)
+                
+                    if (relCond.isSignedCond()) {
+                        if (Inst result = tryCompareLoadImm(Arg::Width8, Load8S, Arg::Signed))
+                            return result;
+                    }
+                
+                    if (relCond.isUnsignedCond()) {
+                        if (Inst result = tryCompareLoadImm(Arg::Width8, Load8Z, Arg::Unsigned))
+                            return result;
+                    }
+
+                    if (relCond.isSignedCond()) {
+                        if (Inst result = tryCompareLoadImm(Arg::Width16, Load16S, Arg::Signed))
+                            return result;
+                    }
+                
+                    if (relCond.isUnsignedCond()) {
+                        if (Inst result = tryCompareLoadImm(Arg::Width16, Load16Z, Arg::Unsigned))
+                            return result;
+                    }
+
+                    // Now handle compares that involve a load and an immediate.
+
+                    if (Inst result = tryCompareLoadImm(width, Load, Arg::Signed))
+                        return result;
+
+                    // Now handle compares that involve a load. It's not obvious that it's better to
+                    // handle this before the immediate cases or not. Probably doesn't matter.
+
+                    if (Inst result = tryCompare(width, loadPromise(left), tmpPromise(right))) {
+                        commitInternal(left);
+                        return result;
+                    }
+                
+                    if (Inst result = tryCompare(width, tmpPromise(left), loadPromise(right))) {
+                        commitInternal(right);
+                        return result;
+                    }
+                }
+
+                // Now handle compares that involve an immediate and a tmp.
+                
+                if (leftImm && leftImm.isRepresentableAs()) {
+                    if (Inst result = tryCompare(width, leftImm, tmpPromise(right)))
+                        return result;
+                }
+                
+                if (rightImm && rightImm.isRepresentableAs()) {
+                    if (Inst result = tryCompare(width, tmpPromise(left), rightImm))
+                        return result;
+                }
+
+                // Finally, handle comparison between tmps.
+                ArgPromise leftPromise = tmpPromise(left);
+                ArgPromise rightPromise = tmpPromise(right);
+                return compare(width, relCond, leftPromise, rightPromise);
+            }
+
+            // Floating point comparisons can't really do anything smart.
+            ArgPromise leftPromise = tmpPromise(left);
+            ArgPromise rightPromise = tmpPromise(right);
+            if (value->child(0)->type() == Float)
+                return compareFloat(doubleCond, leftPromise, rightPromise);
+            return compareDouble(doubleCond, leftPromise, rightPromise);
+        };
+
+        Arg::Width width = Arg::widthForB3Type(value->type());
+        Arg resCond = Arg::resCond(MacroAssembler::NonZero).inverted(inverted);
+        
+        auto tryTest = [&] (
+            Arg::Width width, ArgPromise&& left, ArgPromise&& right) -> Inst {
+            if (Inst result = test(width, resCond, left, right))
+                return result;
+            if (Inst result = test(width, resCond, right, left))
+                return result;
+            return Inst();
+        };
+
+        auto attemptFused = [&] () -> Inst {
+            switch (value->opcode()) {
+            case NotEqual:
+                return createRelCond(MacroAssembler::NotEqual, MacroAssembler::DoubleNotEqualOrUnordered);
+            case Equal:
+                return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqual);
+            case LessThan:
+                return createRelCond(MacroAssembler::LessThan, MacroAssembler::DoubleLessThan);
+            case GreaterThan:
+                return createRelCond(MacroAssembler::GreaterThan, MacroAssembler::DoubleGreaterThan);
+            case LessEqual:
+                return createRelCond(MacroAssembler::LessThanOrEqual, MacroAssembler::DoubleLessThanOrEqual);
+            case GreaterEqual:
+                return createRelCond(MacroAssembler::GreaterThanOrEqual, MacroAssembler::DoubleGreaterThanOrEqual);
+            case EqualOrUnordered:
+                // The integer condition is never used in this case.
+                return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqualOrUnordered);
+            case Above:
+                // We use a bogus double condition because these integer comparisons won't got down that
+                // path anyway.
+                return createRelCond(MacroAssembler::Above, MacroAssembler::DoubleEqual);
+            case Below:
+                return createRelCond(MacroAssembler::Below, MacroAssembler::DoubleEqual);
+            case AboveEqual:
+                return createRelCond(MacroAssembler::AboveOrEqual, MacroAssembler::DoubleEqual);
+            case BelowEqual:
+                return createRelCond(MacroAssembler::BelowOrEqual, MacroAssembler::DoubleEqual);
+            case BitAnd: {
+                Value* left = value->child(0);
+                Value* right = value->child(1);
+
+                bool hasRightConst;
+                int64_t rightConst;
+                Arg rightImm;
+                Arg rightImm64;
+
+                hasRightConst = right->hasInt();
+                if (hasRightConst) {
+                    rightConst = right->asInt();
+                    rightImm = bitImm(right);
+                    rightImm64 = bitImm64(right);
+                }
+                
+                auto tryTestLoadImm = [&] (Arg::Width width, Arg::Signedness signedness, B3::Opcode loadOpcode) -> Inst {
+                    if (!hasRightConst)
+                        return Inst();
+                    // Signed loads will create high bits, so if the immediate has high bits
+                    // then we cannot proceed. Consider BitAnd(Load8S(ptr), 0x101). This cannot
+                    // be turned into testb (ptr), $1, since if the high bit within that byte
+                    // was set then it would be extended to include 0x100. The handling below
+                    // won't anticipate this, so we need to catch it here.
+                    if (signedness == Arg::Signed
+                        && !Arg::isRepresentableAs(width, Arg::Unsigned, rightConst))
+                        return Inst();
+                    
+                    // FIXME: If this is unsigned then we can chop things off of the immediate.
+                    // This might make the immediate more legal. Perhaps that's a job for
+                    // strength reduction?
+                    
+                    if (rightImm) {
+                        if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm)) {
+                            commitInternal(left);
+                            return result;
+                        }
+                    }
+                    if (rightImm64) {
+                        if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm64)) {
+                            commitInternal(left);
+                            return result;
+                        }
+                    }
+                    return Inst();
+                };
+
+                if (canCommitInternal) {
+                    // First handle test's that involve fewer bits than B3's type system supports.
+
+                    if (Inst result = tryTestLoadImm(Arg::Width8, Arg::Unsigned, Load8Z))
+                        return result;
+                    
+                    if (Inst result = tryTestLoadImm(Arg::Width8, Arg::Signed, Load8S))
+                        return result;
+                    
+                    if (Inst result = tryTestLoadImm(Arg::Width16, Arg::Unsigned, Load16Z))
+                        return result;
+                    
+                    if (Inst result = tryTestLoadImm(Arg::Width16, Arg::Signed, Load16S))
+                        return result;
+
+                    // This allows us to use a 32-bit test for 64-bit BitAnd if the immediate is
+                    // representable as an unsigned 32-bit value. The logic involved is the same
+                    // as if we were pondering using a 32-bit test for
+                    // BitAnd(SExt(Load(ptr)), const), in the sense that in both cases we have
+                    // to worry about high bits. So, we use the "Signed" version of this helper.
+                    if (Inst result = tryTestLoadImm(Arg::Width32, Arg::Signed, Load))
+                        return result;
+                    
+                    // This is needed to handle 32-bit test for arbitrary 32-bit immediates.
+                    if (Inst result = tryTestLoadImm(width, Arg::Unsigned, Load))
+                        return result;
+                    
+                    // Now handle test's that involve a load.
+                    
+                    Arg::Width width = Arg::widthForB3Type(value->child(0)->type());
+                    if (Inst result = tryTest(width, loadPromise(left), tmpPromise(right))) {
+                        commitInternal(left);
+                        return result;
+                    }
+                    
+                    if (Inst result = tryTest(width, tmpPromise(left), loadPromise(right))) {
+                        commitInternal(right);
+                        return result;
+                    }
+                }
+
+                // Now handle test's that involve an immediate and a tmp.
+
+                if (hasRightConst) {
+                    if ((width == Arg::Width32 && rightConst == 0xffffffff)
+                        || (width == Arg::Width64 && rightConst == -1)) {
+                        if (Inst result = tryTest(width, tmpPromise(left), tmpPromise(left)))
+                            return result;
+                    }
+                    if (isRepresentableAs(rightConst)) {
+                        if (Inst result = tryTest(Arg::Width32, tmpPromise(left), rightImm))
+                            return result;
+                        if (Inst result = tryTest(Arg::Width32, tmpPromise(left), rightImm64))
+                            return result;
+                    }
+                    if (Inst result = tryTest(width, tmpPromise(left), rightImm))
+                        return result;
+                    if (Inst result = tryTest(width, tmpPromise(left), rightImm64))
+                        return result;
+                }
+
+                // Finally, just do tmp's.
+                return tryTest(width, tmpPromise(left), tmpPromise(right));
+            }
+            default:
+                return Inst();
+            }
+        };
+
+        if (FusionResult fusionResult = prepareToFuse(value)) {
+            if (Inst result = attemptFused()) {
+                commitFusion(value, fusionResult);
+                return result;
+            }
+        }
+
+        if (Arg::isValidBitImmForm(-1)) {
+            if (canCommitInternal && value->as()) {
+                // Handle things like Branch(Load8Z(value))
+
+                if (Inst result = tryTest(Arg::Width8, loadPromise(value, Load8Z), Arg::bitImm(-1))) {
+                    commitInternal(value);
+                    return result;
+                }
+
+                if (Inst result = tryTest(Arg::Width8, loadPromise(value, Load8S), Arg::bitImm(-1))) {
+                    commitInternal(value);
+                    return result;
+                }
+
+                if (Inst result = tryTest(Arg::Width16, loadPromise(value, Load16Z), Arg::bitImm(-1))) {
+                    commitInternal(value);
+                    return result;
+                }
+
+                if (Inst result = tryTest(Arg::Width16, loadPromise(value, Load16S), Arg::bitImm(-1))) {
+                    commitInternal(value);
+                    return result;
+                }
+
+                if (Inst result = tryTest(width, loadPromise(value), Arg::bitImm(-1))) {
+                    commitInternal(value);
+                    return result;
+                }
+            }
+
+            ArgPromise leftPromise = tmpPromise(value);
+            ArgPromise rightPromise = Arg::bitImm(-1);
+            if (Inst result = test(width, resCond, leftPromise, rightPromise))
+                return result;
+        }
+        
+        // Sometimes this is the only form of test available. We prefer not to use this because
+        // it's less canonical.
+        ArgPromise leftPromise = tmpPromise(value);
+        ArgPromise rightPromise = tmpPromise(value);
+        return test(width, resCond, leftPromise, rightPromise);
+    }
+
+    Inst createBranch(Value* value, bool inverted = false)
+    {
+        return createGenericCompare(
+            value,
+            [this] (
+                Arg::Width width, const Arg& relCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                    if (isValidForm(Branch8, Arg::RelCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            Branch8, m_value, relCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    if (isValidForm(Branch32, Arg::RelCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            Branch32, m_value, relCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                case Arg::Width64:
+                    if (isValidForm(Branch64, Arg::RelCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            Branch64, m_value, relCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [this] (
+                Arg::Width width, const Arg& resCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                    if (isValidForm(BranchTest8, Arg::ResCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            BranchTest8, m_value, resCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    if (isValidForm(BranchTest32, Arg::ResCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            BranchTest32, m_value, resCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                case Arg::Width64:
+                    if (isValidForm(BranchTest64, Arg::ResCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            BranchTest64, m_value, resCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [this] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                if (isValidForm(BranchDouble, Arg::DoubleCond, left.kind(), right.kind())) {
+                    return left.inst(right.inst(
+                        BranchDouble, m_value, doubleCond,
+                        left.consume(*this), right.consume(*this)));
+                }
+                return Inst();
+            },
+            [this] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                if (isValidForm(BranchFloat, Arg::DoubleCond, left.kind(), right.kind())) {
+                    return left.inst(right.inst(
+                        BranchFloat, m_value, doubleCond,
+                        left.consume(*this), right.consume(*this)));
+                }
+                return Inst();
+            },
+            inverted);
+    }
+
+    Inst createCompare(Value* value, bool inverted = false)
+    {
+        return createGenericCompare(
+            value,
+            [this] (
+                Arg::Width width, const Arg& relCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    if (isValidForm(Compare32, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) {
+                        return left.inst(right.inst(
+                            Compare32, m_value, relCond,
+                            left.consume(*this), right.consume(*this), tmp(m_value)));
+                    }
+                    return Inst();
+                case Arg::Width64:
+                    if (isValidForm(Compare64, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) {
+                        return left.inst(right.inst(
+                            Compare64, m_value, relCond,
+                            left.consume(*this), right.consume(*this), tmp(m_value)));
+                    }
+                    return Inst();
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [this] (
+                Arg::Width width, const Arg& resCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    if (isValidForm(Test32, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) {
+                        return left.inst(right.inst(
+                            Test32, m_value, resCond,
+                            left.consume(*this), right.consume(*this), tmp(m_value)));
+                    }
+                    return Inst();
+                case Arg::Width64:
+                    if (isValidForm(Test64, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) {
+                        return left.inst(right.inst(
+                            Test64, m_value, resCond,
+                            left.consume(*this), right.consume(*this), tmp(m_value)));
+                    }
+                    return Inst();
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [this] (const Arg& doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                if (isValidForm(CompareDouble, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) {
+                    return left.inst(right.inst(
+                        CompareDouble, m_value, doubleCond,
+                        left.consume(*this), right.consume(*this), tmp(m_value)));
+                }
+                return Inst();
+            },
+            [this] (const Arg& doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                if (isValidForm(CompareFloat, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) {
+                    return left.inst(right.inst(
+                        CompareFloat, m_value, doubleCond,
+                        left.consume(*this), right.consume(*this), tmp(m_value)));
+                }
+                return Inst();
+            },
+            inverted);
+    }
+
+    struct MoveConditionallyConfig {
+        Air::Opcode moveConditionally32;
+        Air::Opcode moveConditionally64;
+        Air::Opcode moveConditionallyTest32;
+        Air::Opcode moveConditionallyTest64;
+        Air::Opcode moveConditionallyDouble;
+        Air::Opcode moveConditionallyFloat;
+    };
+    Inst createSelect(const MoveConditionallyConfig& config)
+    {
+        auto createSelectInstruction = [&] (Air::Opcode opcode, const Arg& condition, ArgPromise& left, ArgPromise& right) -> Inst {
+            if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+                Tmp result = tmp(m_value);
+                Tmp thenCase = tmp(m_value->child(1));
+                Tmp elseCase = tmp(m_value->child(2));
+                return left.inst(right.inst(
+                    opcode, m_value, condition,
+                    left.consume(*this), right.consume(*this), thenCase, elseCase, result));
+            }
+            if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp)) {
+                Tmp result = tmp(m_value);
+                Tmp source = tmp(m_value->child(1));
+                append(relaxedMoveForType(m_value->type()), tmp(m_value->child(2)), result);
+                return left.inst(right.inst(
+                    opcode, m_value, condition,
+                    left.consume(*this), right.consume(*this), source, result));
+            }
+            return Inst();
+        };
+
+        return createGenericCompare(
+            m_value->child(0),
+            [&] (
+                Arg::Width width, const Arg& relCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                    // FIXME: Support these things.
+                    // https://bugs.webkit.org/show_bug.cgi?id=151504
+                    return Inst();
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    return createSelectInstruction(config.moveConditionally32, relCond, left, right);
+                case Arg::Width64:
+                    return createSelectInstruction(config.moveConditionally64, relCond, left, right);
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [&] (
+                Arg::Width width, const Arg& resCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                    // FIXME: Support more things.
+                    // https://bugs.webkit.org/show_bug.cgi?id=151504
+                    return Inst();
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    return createSelectInstruction(config.moveConditionallyTest32, resCond, left, right);
+                case Arg::Width64:
+                    return createSelectInstruction(config.moveConditionallyTest64, resCond, left, right);
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [&] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                return createSelectInstruction(config.moveConditionallyDouble, doubleCond, left, right);
+            },
+            [&] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                return createSelectInstruction(config.moveConditionallyFloat, doubleCond, left, right);
+            },
+            false);
+    }
+    
+    bool tryAppendLea()
+    {
+        Air::Opcode leaOpcode = tryOpcodeForType(Lea32, Lea64, m_value->type());
+        if (!isValidForm(leaOpcode, Arg::Index, Arg::Tmp))
+            return false;
+        
+        // This lets us turn things like this:
+        //
+        //     Add(Add(@x, Shl(@y, $2)), $100)
+        //
+        // Into this:
+        //
+        //     lea 100(%rdi,%rsi,4), %rax
+        //
+        // We have a choice here between committing the internal bits of an index or sharing
+        // them. There are solid arguments for both.
+        //
+        // Sharing: The word on the street is that the cost of a lea is one cycle no matter
+        // what it does. Every experiment I've ever seen seems to confirm this. So, sharing
+        // helps us in situations where Wasm input did this:
+        //
+        //     x = a[i].x;
+        //     y = a[i].y;
+        //
+        // With sharing we would do:
+        //
+        //     leal (%a,%i,4), %tmp
+        //     cmp (%size, %tmp)
+        //     ja _fail
+        //     movl (%base, %tmp), %x
+        //     leal 4(%a,%i,4), %tmp
+        //     cmp (%size, %tmp)
+        //     ja _fail
+        //     movl (%base, %tmp), %y
+        //
+        // In the absence of sharing, we may find ourselves needing separate registers for
+        // the innards of the index. That's relatively unlikely to be a thing due to other
+        // optimizations that we already have, but it could happen
+        //
+        // Committing: The worst case is that there is a complicated graph of additions and
+        // shifts, where each value has multiple uses. In that case, it's better to compute
+        // each one separately from the others since that way, each calculation will use a
+        // relatively nearby tmp as its input. That seems uncommon, but in those cases,
+        // committing is a clear winner: it would result in a simple interference graph
+        // while sharing would result in a complex one. Interference sucks because it means
+        // more time in IRC and it means worse code.
+        //
+        // It's not super clear if any of these corner cases would ever arise. Committing
+        // has the benefit that it's easier to reason about, and protects a much darker
+        // corner case (more interference).
+                
+        // Here are the things we want to match:
+        // Add(Add(@x, @y), $c)
+        // Add(Shl(@x, $c), @y)
+        // Add(@x, Shl(@y, $c))
+        // Add(Add(@x, Shl(@y, $c)), $d)
+        // Add(Add(Shl(@x, $c), @y), $d)
+        //
+        // Note that if you do Add(Shl(@x, $c), $d) then we will treat $d as a non-constant and
+        // force it to materialize. You'll get something like this:
+        //
+        // movl $d, %tmp
+        // leal (%tmp,%x,1<child(1)->isRepresentableAs()
+            && canBeInternal(value->child(0))
+            && value->child(0)->opcode() == Add) {
+            innerAdd = value->child(0);
+            offset = static_cast(value->child(1)->asInt());
+            value = value->child(0);
+        }
+        
+        auto tryShl = [&] (Value* shl, Value* other) -> bool {
+            std::optional scale = scaleForShl(shl, offset);
+            if (!scale)
+                return false;
+            if (!canBeInternal(shl))
+                return false;
+            
+            ASSERT(!m_locked.contains(shl->child(0)));
+            ASSERT(!m_locked.contains(other));
+            
+            append(leaOpcode, Arg::index(tmp(other), tmp(shl->child(0)), *scale, offset), tmp(m_value));
+            commitInternal(innerAdd);
+            commitInternal(shl);
+            return true;
+        };
+        
+        if (tryShl(value->child(0), value->child(1)))
+            return true;
+        if (tryShl(value->child(1), value->child(0)))
+            return true;
+        
+        // The remaining pattern is just:
+        // Add(@x, @y) (only if offset != 0)
+        if (!offset)
+            return false;
+        ASSERT(!m_locked.contains(value->child(0)));
+        ASSERT(!m_locked.contains(value->child(1)));
+        append(leaOpcode, Arg::index(tmp(value->child(0)), tmp(value->child(1)), 1, offset), tmp(m_value));
+        commitInternal(innerAdd);
+        return true;
+    }
+
+    void lower()
+    {
+        switch (m_value->opcode()) {
+        case B3::Nop: {
+            // Yes, we will totally see Nop's because some phases will replaceWithNop() instead of
+            // properly removing things.
+            return;
+        }
+            
+        case Load: {
+            append(trappingInst(m_value, moveForType(m_value->type()), m_value, addr(m_value), tmp(m_value)));
+            return;
+        }
+            
+        case Load8S: {
+            append(trappingInst(m_value, Load8SignedExtendTo32, m_value, addr(m_value), tmp(m_value)));
+            return;
+        }
+
+        case Load8Z: {
+            append(trappingInst(m_value, Load8, m_value, addr(m_value), tmp(m_value)));
+            return;
+        }
+
+        case Load16S: {
+            append(trappingInst(m_value, Load16SignedExtendTo32, m_value, addr(m_value), tmp(m_value)));
+            return;
+        }
+
+        case Load16Z: {
+            append(trappingInst(m_value, Load16, m_value, addr(m_value), tmp(m_value)));
+            return;
+        }
+
+        case Add: {
+            if (tryAppendLea())
+                return;
+            
+            Air::Opcode multiplyAddOpcode = tryOpcodeForType(MultiplyAdd32, MultiplyAdd64, m_value->type());
+            if (isValidForm(multiplyAddOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+                Value* left = m_value->child(0);
+                Value* right = m_value->child(1);
+                if (!imm(right) || m_valueToTmp[right]) {
+                    auto tryAppendMultiplyAdd = [&] (Value* left, Value* right) -> bool {
+                        if (left->opcode() != Mul || !canBeInternal(left))
+                            return false;
+
+                        Value* multiplyLeft = left->child(0);
+                        Value* multiplyRight = left->child(1);
+                        if (canBeInternal(multiplyLeft) || canBeInternal(multiplyRight))
+                            return false;
+
+                        append(multiplyAddOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(right), tmp(m_value));
+                        commitInternal(left);
+
+                        return true;
+                    };
+
+                    if (tryAppendMultiplyAdd(left, right))
+                        return;
+                    if (tryAppendMultiplyAdd(right, left))
+                        return;
+                }
+            }
+
+            appendBinOp(
+                m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case Sub: {
+            Air::Opcode multiplySubOpcode = tryOpcodeForType(MultiplySub32, MultiplySub64, m_value->type());
+            if (multiplySubOpcode != Air::Oops
+                && isValidForm(multiplySubOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+                Value* left = m_value->child(0);
+                Value* right = m_value->child(1);
+                if (!imm(right) || m_valueToTmp[right]) {
+                    auto tryAppendMultiplySub = [&] () -> bool {
+                        if (right->opcode() != Mul || !canBeInternal(right))
+                            return false;
+
+                        Value* multiplyLeft = right->child(0);
+                        Value* multiplyRight = right->child(1);
+                        if (m_locked.contains(multiplyLeft) || m_locked.contains(multiplyRight))
+                            return false;
+
+                        append(multiplySubOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(left), tmp(m_value));
+                        commitInternal(right);
+
+                        return true;
+                    };
+
+                    if (tryAppendMultiplySub())
+                        return;
+                }
+            }
+
+            appendBinOp(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case Neg: {
+            Air::Opcode multiplyNegOpcode = tryOpcodeForType(MultiplyNeg32, MultiplyNeg64, m_value->type());
+            if (multiplyNegOpcode != Air::Oops
+                && isValidForm(multiplyNegOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)
+                && m_value->child(0)->opcode() == Mul
+                && canBeInternal(m_value->child(0))) {
+                Value* multiplyOperation = m_value->child(0);
+                Value* multiplyLeft = multiplyOperation->child(0);
+                Value* multiplyRight = multiplyOperation->child(1);
+                if (!m_locked.contains(multiplyLeft) && !m_locked.contains(multiplyRight)) {
+                    append(multiplyNegOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(m_value));
+                    commitInternal(multiplyOperation);
+                    return;
+                }
+            }
+
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Mul: {
+            appendBinOp(
+                m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case Div: {
+            if (m_value->isChill())
+                RELEASE_ASSERT(isARM64());
+            if (isInt(m_value->type()) && isX86()) {
+                lowerX86Div(Div);
+                return;
+            }
+            ASSERT(!isX86() || isFloat(m_value->type()));
+
+            appendBinOp(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case UDiv: {
+            if (isInt(m_value->type()) && isX86()) {
+                lowerX86UDiv(UDiv);
+                return;
+            }
+
+            ASSERT(!isX86() && !isFloat(m_value->type()));
+
+            appendBinOp(m_value->child(0), m_value->child(1));
+            return;
+
+        }
+
+        case Mod: {
+            RELEASE_ASSERT(isX86());
+            RELEASE_ASSERT(!m_value->isChill());
+            lowerX86Div(Mod);
+            return;
+        }
+
+        case UMod: {
+            RELEASE_ASSERT(isX86());
+            lowerX86UDiv(UMod);
+            return;
+        }
+
+        case BitAnd: {
+            if (m_value->child(1)->isInt(0xff)) {
+                appendUnOp(m_value->child(0));
+                return;
+            }
+            
+            if (m_value->child(1)->isInt(0xffff)) {
+                appendUnOp(m_value->child(0));
+                return;
+            }
+
+            if (m_value->child(1)->isInt(0xffffffff)) {
+                appendUnOp(m_value->child(0));
+                return;
+            }
+            
+            appendBinOp(
+                m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case BitOr: {
+            appendBinOp(
+                m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case BitXor: {
+            // FIXME: If canBeInternal(child), we should generate this using the comparison path.
+            // https://bugs.webkit.org/show_bug.cgi?id=152367
+            
+            if (m_value->child(1)->isInt(-1)) {
+                appendUnOp(m_value->child(0));
+                return;
+            }
+            appendBinOp(
+                m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case Shl: {
+            if (m_value->child(1)->isInt32(1)) {
+                appendBinOp(m_value->child(0), m_value->child(0));
+                return;
+            }
+            
+            appendShift(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case SShr: {
+            appendShift(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case ZShr: {
+            appendShift(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case RotR: {
+            appendShift(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case RotL: {
+            appendShift(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case Clz: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Abs: {
+            RELEASE_ASSERT_WITH_MESSAGE(!isX86(), "Abs is not supported natively on x86. It must be replaced before generation.");
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Ceil: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Floor: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Sqrt: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case BitwiseCast: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Store: {
+            Value* valueToStore = m_value->child(0);
+            if (canBeInternal(valueToStore)) {
+                bool matched = false;
+                switch (valueToStore->opcode()) {
+                case Add:
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                case Sub:
+                    if (valueToStore->child(0)->isInt(0)) {
+                        matched = tryAppendStoreUnOp(valueToStore->child(1));
+                        break;
+                    }
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                case BitAnd:
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                case BitXor:
+                    if (valueToStore->child(1)->isInt(-1)) {
+                        matched = tryAppendStoreUnOp(valueToStore->child(0));
+                        break;
+                    }
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                default:
+                    break;
+                }
+                if (matched) {
+                    commitInternal(valueToStore);
+                    return;
+                }
+            }
+
+            appendStore(valueToStore, addr(m_value));
+            return;
+        }
+
+        case B3::Store8: {
+            Value* valueToStore = m_value->child(0);
+            if (canBeInternal(valueToStore)) {
+                bool matched = false;
+                switch (valueToStore->opcode()) {
+                case Add:
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                default:
+                    break;
+                }
+                if (matched) {
+                    commitInternal(valueToStore);
+                    return;
+                }
+            }
+            appendStore(Air::Store8, valueToStore, addr(m_value));
+            return;
+        }
+
+        case B3::Store16: {
+            Value* valueToStore = m_value->child(0);
+            if (canBeInternal(valueToStore)) {
+                bool matched = false;
+                switch (valueToStore->opcode()) {
+                case Add:
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                default:
+                    break;
+                }
+                if (matched) {
+                    commitInternal(valueToStore);
+                    return;
+                }
+            }
+            appendStore(Air::Store16, valueToStore, addr(m_value));
+            return;
+        }
+
+        case WasmAddress: {
+            WasmAddressValue* address = m_value->as();
+
+            append(Add64, Arg(address->pinnedGPR()), tmp(address));
+            return;
+        }
+
+        case Fence: {
+            FenceValue* fence = m_value->as();
+            if (!fence->write && !fence->read)
+                return;
+            if (!fence->write) {
+                // A fence that reads but does not write is for protecting motion of stores.
+                append(StoreFence);
+                return;
+            }
+            if (!fence->read) {
+                // A fence that writes but does not read is for protecting motion of loads.
+                append(LoadFence);
+                return;
+            }
+            append(MemoryFence);
+            return;
+        }
+
+        case Trunc: {
+            ASSERT(tmp(m_value->child(0)) == tmp(m_value));
+            return;
+        }
+
+        case SExt8: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case SExt16: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case ZExt32: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case SExt32: {
+            // FIXME: We should have support for movsbq/movswq
+            // https://bugs.webkit.org/show_bug.cgi?id=152232
+            
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case FloatToDouble: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case DoubleToFloat: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case ArgumentReg: {
+            m_prologue.append(Inst(
+                moveForType(m_value->type()), m_value,
+                Tmp(m_value->as()->argumentReg()),
+                tmp(m_value)));
+            return;
+        }
+
+        case Const32:
+        case Const64: {
+            if (imm(m_value))
+                append(Move, imm(m_value), tmp(m_value));
+            else
+                append(Move, Arg::bigImm(m_value->asInt()), tmp(m_value));
+            return;
+        }
+
+        case ConstDouble:
+        case ConstFloat: {
+            // We expect that the moveConstants() phase has run, and any doubles referenced from
+            // stackmaps get fused.
+            RELEASE_ASSERT(m_value->opcode() == ConstFloat || isIdentical(m_value->asDouble(), 0.0));
+            RELEASE_ASSERT(m_value->opcode() == ConstDouble || isIdentical(m_value->asFloat(), 0.0f));
+            append(MoveZeroToDouble, tmp(m_value));
+            return;
+        }
+
+        case FramePointer: {
+            ASSERT(tmp(m_value) == Tmp(GPRInfo::callFrameRegister));
+            return;
+        }
+
+        case SlotBase: {
+            append(
+                pointerType() == Int64 ? Lea64 : Lea32,
+                Arg::stack(m_stackToStack.get(m_value->as()->slot())),
+                tmp(m_value));
+            return;
+        }
+
+        case Equal:
+        case NotEqual:
+        case LessThan:
+        case GreaterThan:
+        case LessEqual:
+        case GreaterEqual:
+        case Above:
+        case Below:
+        case AboveEqual:
+        case BelowEqual:
+        case EqualOrUnordered: {
+            m_insts.last().append(createCompare(m_value));
+            return;
+        }
+
+        case Select: {
+            MoveConditionallyConfig config;
+            if (isInt(m_value->type())) {
+                config.moveConditionally32 = MoveConditionally32;
+                config.moveConditionally64 = MoveConditionally64;
+                config.moveConditionallyTest32 = MoveConditionallyTest32;
+                config.moveConditionallyTest64 = MoveConditionallyTest64;
+                config.moveConditionallyDouble = MoveConditionallyDouble;
+                config.moveConditionallyFloat = MoveConditionallyFloat;
+            } else {
+                // FIXME: it's not obvious that these are particularly efficient.
+                config.moveConditionally32 = MoveDoubleConditionally32;
+                config.moveConditionally64 = MoveDoubleConditionally64;
+                config.moveConditionallyTest32 = MoveDoubleConditionallyTest32;
+                config.moveConditionallyTest64 = MoveDoubleConditionallyTest64;
+                config.moveConditionallyDouble = MoveDoubleConditionallyDouble;
+                config.moveConditionallyFloat = MoveDoubleConditionallyFloat;
+            }
+            
+            m_insts.last().append(createSelect(config));
+            return;
+        }
+
+        case IToD: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case IToF: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case B3::CCall: {
+            CCallValue* cCall = m_value->as();
+
+            Inst inst(m_isRare ? Air::ColdCCall : Air::CCall, cCall);
+
+            // We have a ton of flexibility regarding the callee argument, but currently, we don't
+            // use it yet. It gets weird for reasons:
+            // 1) We probably will never take advantage of this. We don't have C calls to locations
+            //    loaded from addresses. We have JS calls like that, but those use Patchpoints.
+            // 2) On X86_64 we still don't support call with BaseIndex.
+            // 3) On non-X86, we don't natively support any kind of loading from address.
+            // 4) We don't have an isValidForm() for the CCallSpecial so we have no smart way to
+            //    decide.
+            // FIXME: https://bugs.webkit.org/show_bug.cgi?id=151052
+            inst.args.append(tmp(cCall->child(0)));
+
+            if (cCall->type() != Void)
+                inst.args.append(tmp(cCall));
+
+            for (unsigned i = 1; i < cCall->numChildren(); ++i)
+                inst.args.append(immOrTmp(cCall->child(i)));
+
+            m_insts.last().append(WTFMove(inst));
+            return;
+        }
+
+        case Patchpoint: {
+            PatchpointValue* patchpointValue = m_value->as();
+            ensureSpecial(m_patchpointSpecial);
+            
+            Inst inst(Patch, patchpointValue, Arg::special(m_patchpointSpecial));
+
+            Vector after;
+            if (patchpointValue->type() != Void) {
+                switch (patchpointValue->resultConstraint.kind()) {
+                case ValueRep::WarmAny:
+                case ValueRep::ColdAny:
+                case ValueRep::LateColdAny:
+                case ValueRep::SomeRegister:
+                case ValueRep::SomeEarlyRegister:
+                    inst.args.append(tmp(patchpointValue));
+                    break;
+                case ValueRep::Register: {
+                    Tmp reg = Tmp(patchpointValue->resultConstraint.reg());
+                    inst.args.append(reg);
+                    after.append(Inst(
+                        relaxedMoveForType(patchpointValue->type()), m_value, reg, tmp(patchpointValue)));
+                    break;
+                }
+                case ValueRep::StackArgument: {
+                    Arg arg = Arg::callArg(patchpointValue->resultConstraint.offsetFromSP());
+                    inst.args.append(arg);
+                    after.append(Inst(
+                        moveForType(patchpointValue->type()), m_value, arg, tmp(patchpointValue)));
+                    break;
+                }
+                default:
+                    RELEASE_ASSERT_NOT_REACHED();
+                    break;
+                }
+            }
+            
+            fillStackmap(inst, patchpointValue, 0);
+            
+            if (patchpointValue->resultConstraint.isReg())
+                patchpointValue->lateClobbered().clear(patchpointValue->resultConstraint.reg());
+
+            for (unsigned i = patchpointValue->numGPScratchRegisters; i--;)
+                inst.args.append(m_code.newTmp(Arg::GP));
+            for (unsigned i = patchpointValue->numFPScratchRegisters; i--;)
+                inst.args.append(m_code.newTmp(Arg::FP));
+            
+            m_insts.last().append(WTFMove(inst));
+            m_insts.last().appendVector(after);
+            return;
+        }
+
+        case CheckAdd:
+        case CheckSub:
+        case CheckMul: {
+            CheckValue* checkValue = m_value->as();
+
+            Value* left = checkValue->child(0);
+            Value* right = checkValue->child(1);
+
+            Tmp result = tmp(m_value);
+
+            // Handle checked negation.
+            if (checkValue->opcode() == CheckSub && left->isInt(0)) {
+                append(Move, tmp(right), result);
+
+                Air::Opcode opcode =
+                    opcodeForType(BranchNeg32, BranchNeg64, checkValue->type());
+                CheckSpecial* special = ensureCheckSpecial(opcode, 2);
+
+                Inst inst(Patch, checkValue, Arg::special(special));
+                inst.args.append(Arg::resCond(MacroAssembler::Overflow));
+                inst.args.append(result);
+
+                fillStackmap(inst, checkValue, 2);
+
+                m_insts.last().append(WTFMove(inst));
+                return;
+            }
+
+            Air::Opcode opcode = Air::Oops;
+            Commutativity commutativity = NotCommutative;
+            StackmapSpecial::RoleMode stackmapRole = StackmapSpecial::SameAsRep;
+            switch (m_value->opcode()) {
+            case CheckAdd:
+                opcode = opcodeForType(BranchAdd32, BranchAdd64, m_value->type());
+                stackmapRole = StackmapSpecial::ForceLateUseUnlessRecoverable;
+                commutativity = Commutative;
+                break;
+            case CheckSub:
+                opcode = opcodeForType(BranchSub32, BranchSub64, m_value->type());
+                break;
+            case CheckMul:
+                opcode = opcodeForType(BranchMul32, BranchMul64, checkValue->type());
+                stackmapRole = StackmapSpecial::ForceLateUse;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+
+            // FIXME: It would be great to fuse Loads into these. We currently don't do it because the
+            // rule for stackmaps is that all addresses are just stack addresses. Maybe we could relax
+            // this rule here.
+            // https://bugs.webkit.org/show_bug.cgi?id=151228
+
+            Vector sources;
+            if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
+                sources.append(tmp(left));
+                sources.append(imm(right));
+            } else if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Imm, Arg::Tmp)) {
+                sources.append(imm(right));
+                append(Move, tmp(left), result);
+            } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+                sources.append(tmp(left));
+                sources.append(tmp(right));
+            }  else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp)) {
+                if (commutativity == Commutative && preferRightForResult(left, right)) {
+                    sources.append(tmp(left));
+                    append(Move, tmp(right), result);
+                } else {
+                    sources.append(tmp(right));
+                    append(Move, tmp(left), result);
+                }
+            } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+                sources.append(tmp(left));
+                sources.append(tmp(right));
+                sources.append(m_code.newTmp(Arg::typeForB3Type(m_value->type())));
+                sources.append(m_code.newTmp(Arg::typeForB3Type(m_value->type())));
+            }
+
+            // There is a really hilarious case that arises when we do BranchAdd32(%x, %x). We won't emit
+            // such code, but the coalescing in our register allocator also does copy propagation, so
+            // although we emit:
+            //
+            //     Move %tmp1, %tmp2
+            //     BranchAdd32 %tmp1, %tmp2
+            //
+            // The register allocator may turn this into:
+            //
+            //     BranchAdd32 %rax, %rax
+            //
+            // Currently we handle this by ensuring that even this kind of addition can be undone. We can
+            // undo it by using the carry flag. It's tempting to get rid of that code and just "fix" this
+            // here by forcing LateUse on the stackmap. If we did that unconditionally, we'd lose a lot of
+            // performance. So it's tempting to do it only if left == right. But that creates an awkward
+            // constraint on Air: it means that Air would not be allowed to do any copy propagation.
+            // Notice that the %rax,%rax situation happened after Air copy-propagated the Move we are
+            // emitting. We know that copy-propagating over that Move causes add-to-self. But what if we
+            // emit something like a Move - or even do other kinds of copy-propagation on tmp's -
+            // somewhere else in this code. The add-to-self situation may only emerge after some other Air
+            // optimizations remove other Move's or identity-like operations. That's why we don't use
+            // LateUse here to take care of add-to-self.
+            
+            CheckSpecial* special = ensureCheckSpecial(opcode, 2 + sources.size(), stackmapRole);
+            
+            Inst inst(Patch, checkValue, Arg::special(special));
+
+            inst.args.append(Arg::resCond(MacroAssembler::Overflow));
+
+            inst.args.appendVector(sources);
+            inst.args.append(result);
+
+            fillStackmap(inst, checkValue, 2);
+
+            m_insts.last().append(WTFMove(inst));
+            return;
+        }
+
+        case Check: {
+            Inst branch = createBranch(m_value->child(0));
+
+            CheckSpecial* special = ensureCheckSpecial(branch);
+            
+            CheckValue* checkValue = m_value->as();
+            
+            Inst inst(Patch, checkValue, Arg::special(special));
+            inst.args.appendVector(branch.args);
+            
+            fillStackmap(inst, checkValue, 1);
+            
+            m_insts.last().append(WTFMove(inst));
+            return;
+        }
+
+        case B3::WasmBoundsCheck: {
+            WasmBoundsCheckValue* value = m_value->as();
+
+            Value* ptr = value->child(0);
+
+            Arg temp = m_code.newTmp(Arg::GP);
+            append(Inst(Move32, value, tmp(ptr), temp));
+            if (value->offset()) {
+                if (imm(value->offset()))
+                    append(Add64, imm(value->offset()), temp);
+                else {
+                    Arg bigImm = m_code.newTmp(Arg::GP);
+                    append(Move, Arg::bigImm(value->offset()), bigImm);
+                    append(Add64, bigImm, temp);
+                }
+            }
+            append(Inst(Air::WasmBoundsCheck, value, temp, Arg(value->pinnedGPR())));
+            return;
+        }
+
+        case Upsilon: {
+            Value* value = m_value->child(0);
+            append(
+                relaxedMoveForType(value->type()), immOrTmp(value),
+                m_phiToTmp[m_value->as()->phi()]);
+            return;
+        }
+
+        case Phi: {
+            // Snapshot the value of the Phi. It may change under us because you could do:
+            // a = Phi()
+            // Upsilon(@x, ^a)
+            // @a => this should get the value of the Phi before the Upsilon, i.e. not @x.
+
+            append(relaxedMoveForType(m_value->type()), m_phiToTmp[m_value], tmp(m_value));
+            return;
+        }
+
+        case Set: {
+            Value* value = m_value->child(0);
+            append(
+                relaxedMoveForType(value->type()), immOrTmp(value),
+                m_variableToTmp.get(m_value->as()->variable()));
+            return;
+        }
+
+        case Get: {
+            append(
+                relaxedMoveForType(m_value->type()),
+                m_variableToTmp.get(m_value->as()->variable()), tmp(m_value));
+            return;
+        }
+
+        case Branch: {
+            m_insts.last().append(createBranch(m_value->child(0)));
+            return;
+        }
+
+        case B3::Jump: {
+            append(Air::Jump);
+            return;
+        }
+            
+        case Identity: {
+            ASSERT(tmp(m_value->child(0)) == tmp(m_value));
+            return;
+        }
+
+        case Return: {
+            if (!m_value->numChildren()) {
+                append(RetVoid);
+                return;
+            }
+            Value* value = m_value->child(0);
+            Tmp returnValueGPR = Tmp(GPRInfo::returnValueGPR);
+            Tmp returnValueFPR = Tmp(FPRInfo::returnValueFPR);
+            switch (value->type()) {
+            case Void:
+                // It's impossible for a void value to be used as a child. If we did want to have a
+                // void return, we'd introduce a different opcode, like ReturnVoid.
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            case Int32:
+                append(Move, immOrTmp(value), returnValueGPR);
+                append(Ret32, returnValueGPR);
+                break;
+            case Int64:
+                append(Move, immOrTmp(value), returnValueGPR);
+                append(Ret64, returnValueGPR);
+                break;
+            case Float:
+                append(MoveFloat, tmp(value), returnValueFPR);
+                append(RetFloat, returnValueFPR);
+                break;
+            case Double:
+                append(MoveDouble, tmp(value), returnValueFPR);
+                append(RetDouble, returnValueFPR);
+                break;
+            }
+            return;
+        }
+
+        case B3::Oops: {
+            append(Air::Oops);
+            return;
+        }
+            
+        case B3::EntrySwitch: {
+            append(Air::EntrySwitch);
+            return;
+        }
+
+        default:
+            break;
+        }
+
+        dataLog("FATAL: could not lower ", deepDump(m_procedure, m_value), "\n");
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+
+    void lowerX86Div(B3::Opcode op)
+    {
+#if CPU(X86) || CPU(X86_64)
+        Tmp eax = Tmp(X86Registers::eax);
+        Tmp edx = Tmp(X86Registers::edx);
+
+        Air::Opcode convertToDoubleWord;
+        Air::Opcode div;
+        switch (m_value->type()) {
+        case Int32:
+            convertToDoubleWord = X86ConvertToDoubleWord32;
+            div = X86Div32;
+            break;
+        case Int64:
+            convertToDoubleWord = X86ConvertToQuadWord64;
+            div = X86Div64;
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return;
+        }
+
+        ASSERT(op == Div || op == Mod);
+        X86Registers::RegisterID result = op == Div ? X86Registers::eax : X86Registers::edx;
+
+        append(Move, tmp(m_value->child(0)), eax);
+        append(convertToDoubleWord, eax, edx);
+        append(div, eax, edx, tmp(m_value->child(1)));
+        append(Move, Tmp(result), tmp(m_value));
+
+#else
+        UNUSED_PARAM(op);
+        UNREACHABLE_FOR_PLATFORM();
+#endif
+    }
+
+    void lowerX86UDiv(B3::Opcode op)
+    {
+#if CPU(X86) || CPU(X86_64)
+        Tmp eax = Tmp(X86Registers::eax);
+        Tmp edx = Tmp(X86Registers::edx);
+
+        Air::Opcode div = m_value->type() == Int32 ? X86UDiv32 : X86UDiv64;
+
+        ASSERT(op == UDiv || op == UMod);
+        X86Registers::RegisterID result = op == UDiv ? X86Registers::eax : X86Registers::edx;
+
+        append(Move, tmp(m_value->child(0)), eax);
+        append(Xor64, edx, edx);
+        append(div, eax, edx, tmp(m_value->child(1)));
+        append(Move, Tmp(result), tmp(m_value));
+#else
+        UNUSED_PARAM(op);
+        UNREACHABLE_FOR_PLATFORM();
+#endif
+    }
+
+    IndexSet m_locked; // These are values that will have no Tmp in Air.
+    IndexMap m_valueToTmp; // These are values that must have a Tmp in Air. We say that a Value* with a non-null Tmp is "pinned".
+    IndexMap m_phiToTmp; // Each Phi gets its own Tmp.
+    IndexMap m_blockToBlock;
+    HashMap m_stackToStack;
+    HashMap m_variableToTmp;
+
+    UseCounts m_useCounts;
+    PhiChildren m_phiChildren;
+    BlockWorklist m_fastWorklist;
+    Dominators& m_dominators;
+
+    Vector> m_insts;
+    Vector m_prologue;
+
+    B3::BasicBlock* m_block;
+    bool m_isRare;
+    unsigned m_index;
+    Value* m_value;
+
+    PatchpointSpecial* m_patchpointSpecial { nullptr };
+    HashMap m_checkSpecials;
+
+    Procedure& m_procedure;
+    Code& m_code;
+};
+
+} // anonymous namespace
+
+void lowerToAir(Procedure& procedure)
+{
+    PhaseScope phaseScope(procedure, "lowerToAir");
+    LowerToAir lowerToAir(procedure);
+    lowerToAir.run();
+}
+
+} } // namespace JSC::B3
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3LowerToAir.h b/Source/JavaScriptCore/b3/B3LowerToAir.h
new file mode 100644
index 000000000..a66837613
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerToAir.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+namespace Air { class Code; }
+
+// This lowers the current B3 procedure to an Air code.
+
+JS_EXPORT_PRIVATE void lowerToAir(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3MathExtras.cpp b/Source/JavaScriptCore/b3/B3MathExtras.cpp
new file mode 100644
index 000000000..1c99379ba
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MathExtras.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3MathExtras.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CCallValue.h"
+#include "B3Const32Value.h"
+#include "B3ConstDoubleValue.h"
+#include "B3ConstPtrValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "MathCommon.h"
+
+namespace JSC { namespace B3 {
+
+std::pair powDoubleInt32(Procedure& procedure, BasicBlock* start, Origin origin, Value* x, Value* y)
+{
+    BasicBlock* functionCallCase = procedure.addBlock();
+    BasicBlock* loopPreHeaderCase = procedure.addBlock();
+    BasicBlock* loopTestForEvenCase = procedure.addBlock();
+    BasicBlock* loopOdd = procedure.addBlock();
+    BasicBlock* loopEvenOdd = procedure.addBlock();
+    BasicBlock* continuation = procedure.addBlock();
+
+    Value* shouldGoSlowPath = start->appendNew(procedure, Above, origin,
+        y,
+        start->appendNew(procedure, origin, maxExponentForIntegerMathPow));
+    start->appendNew(procedure, Branch, origin, shouldGoSlowPath);
+    start->setSuccessors(FrequentedBlock(functionCallCase), FrequentedBlock(loopPreHeaderCase));
+
+    // Function call.
+    Value* yAsDouble = functionCallCase->appendNew(procedure, IToD, origin, y);
+    double (*powDouble)(double, double) = pow;
+    Value* powResult = functionCallCase->appendNew(
+        procedure, Double, origin,
+        functionCallCase->appendNew(procedure, origin, bitwise_cast(powDouble)),
+        x, yAsDouble);
+    UpsilonValue* powResultUpsilon = functionCallCase->appendNew(procedure, origin, powResult);
+    functionCallCase->appendNew(procedure, Jump, origin);
+    functionCallCase->setSuccessors(FrequentedBlock(continuation));
+
+    // Loop pre-header.
+    Value* initialResult = loopPreHeaderCase->appendNew(procedure, origin, 1.);
+    UpsilonValue* initialLoopValue = loopPreHeaderCase->appendNew(procedure, origin, initialResult);
+    UpsilonValue* initialResultValue = loopPreHeaderCase->appendNew(procedure, origin, initialResult);
+    UpsilonValue* initialSquaredInput = loopPreHeaderCase->appendNew(procedure, origin, x);
+    UpsilonValue* initialLoopCounter = loopPreHeaderCase->appendNew(procedure, origin, y);
+    loopPreHeaderCase->appendNew(procedure, Jump, origin);
+    loopPreHeaderCase->setSuccessors(FrequentedBlock(loopTestForEvenCase));
+
+    // Test if what is left of the counter is even.
+    Value* inLoopCounter = loopTestForEvenCase->appendNew(procedure, Phi, Int32, origin);
+    Value* inLoopSquaredInput = loopTestForEvenCase->appendNew(procedure, Phi, Double, origin);
+    Value* lastCounterBit = loopTestForEvenCase->appendNew(procedure, BitAnd, origin,
+        inLoopCounter,
+        loopTestForEvenCase->appendNew(procedure, origin, 1));
+    loopTestForEvenCase->appendNew(procedure, Branch, origin, lastCounterBit);
+    loopTestForEvenCase->setSuccessors(FrequentedBlock(loopOdd), FrequentedBlock(loopEvenOdd));
+
+    // Counter is odd.
+    Value* inLoopResult = loopOdd->appendNew(procedure, Phi, Double, origin);
+    Value* updatedResult = loopOdd->appendNew(procedure, Mul, origin, inLoopResult, inLoopSquaredInput);
+    UpsilonValue* updatedLoopResultUpsilon = loopOdd->appendNew(procedure, origin, updatedResult);
+    initialLoopValue->setPhi(inLoopResult);
+    updatedLoopResultUpsilon->setPhi(inLoopResult);
+    UpsilonValue* updatedLoopResult = loopOdd->appendNew(procedure, origin, updatedResult);
+
+    loopOdd->appendNew(procedure, Jump, origin);
+    loopOdd->setSuccessors(FrequentedBlock(loopEvenOdd));
+
+    // Even value and following the Odd.
+    Value* squaredInput = loopEvenOdd->appendNew(procedure, Mul, origin, inLoopSquaredInput, inLoopSquaredInput);
+    UpsilonValue* squaredInputUpsilon = loopEvenOdd->appendNew(procedure, origin, squaredInput);
+    initialSquaredInput->setPhi(inLoopSquaredInput);
+    squaredInputUpsilon->setPhi(inLoopSquaredInput);
+
+    Value* updatedCounter = loopEvenOdd->appendNew(procedure, ZShr, origin,
+        inLoopCounter,
+        loopEvenOdd->appendNew(procedure, origin, 1));
+    UpsilonValue* updatedCounterUpsilon = loopEvenOdd->appendNew(procedure, origin, updatedCounter);
+    initialLoopCounter->setPhi(inLoopCounter);
+    updatedCounterUpsilon->setPhi(inLoopCounter);
+
+    loopEvenOdd->appendNew(procedure, Branch, origin, updatedCounter);
+    loopEvenOdd->setSuccessors(FrequentedBlock(loopTestForEvenCase), FrequentedBlock(continuation));
+
+    // Inline loop.
+    Value* finalResultPhi = continuation->appendNew(procedure, Phi, Double, origin);
+    powResultUpsilon->setPhi(finalResultPhi);
+    initialResultValue->setPhi(finalResultPhi);
+    updatedLoopResult->setPhi(finalResultPhi);
+    return std::make_pair(continuation, finalResultPhi);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3MathExtras.h b/Source/JavaScriptCore/b3/B3MathExtras.h
new file mode 100644
index 000000000..b6bddea65
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MathExtras.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class Procedure;
+class Value;
+
+// Raise "x" to "y" power.
+// Return a new block continuing the flow and the value representing the result.
+JS_EXPORT_PRIVATE std::pair powDoubleInt32(Procedure&, BasicBlock*, Origin, Value* x, Value* y);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3MemoryValue.cpp b/Source/JavaScriptCore/b3/B3MemoryValue.cpp
new file mode 100644
index 000000000..3764b7445
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MemoryValue.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3MemoryValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+MemoryValue::~MemoryValue()
+{
+}
+
+size_t MemoryValue::accessByteSize() const
+{
+    switch (opcode()) {
+    case Load8Z:
+    case Load8S:
+    case Store8:
+        return 1;
+    case Load16Z:
+    case Load16S:
+    case Store16:
+        return 2;
+    case Load:
+        return sizeofType(type());
+    case Store:
+        return sizeofType(child(0)->type());
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return 0;
+    }
+}
+
+void MemoryValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    if (m_offset)
+        out.print(comma, "offset = ", m_offset);
+    if ((isLoad() && effects().reads != range())
+        || (isStore() && effects().writes != range()))
+        out.print(comma, "range = ", range());
+}
+
+Value* MemoryValue::cloneImpl() const
+{
+    return new MemoryValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3MemoryValue.h b/Source/JavaScriptCore/b3/B3MemoryValue.h
new file mode 100644
index 000000000..9a0504f98
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MemoryValue.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+// FIXME: We want to allow fenced memory accesses on ARM.
+// https://bugs.webkit.org/show_bug.cgi?id=162349
+
+class JS_EXPORT_PRIVATE MemoryValue : public Value {
+public:
+    static bool accepts(Kind kind)
+    {
+        switch (kind.opcode()) {
+        case Load8Z:
+        case Load8S:
+        case Load16Z:
+        case Load16S:
+        case Load:
+        case Store8:
+        case Store16:
+        case Store:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    static bool isStore(Kind kind)
+    {
+        switch (kind.opcode()) {
+        case Store8:
+        case Store16:
+        case Store:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    static bool isLoad(Kind kind)
+    {
+        return accepts(kind) && !isStore(kind);
+    }
+
+    ~MemoryValue();
+
+    int32_t offset() const { return m_offset; }
+    void setOffset(int32_t offset) { m_offset = offset; }
+
+    const HeapRange& range() const { return m_range; }
+    void setRange(const HeapRange& range) { m_range = range; }
+
+    bool isStore() const { return type() == Void; }
+    bool isLoad() const { return type() != Void; }
+
+    size_t accessByteSize() const;
+
+protected:
+    void dumpMeta(CommaPrinter& comma, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    // Use this form for Load (but not Load8Z, Load8S, or any of the Loads that have a suffix that
+    // describes the returned type).
+    MemoryValue(Kind kind, Type type, Origin origin, Value* pointer, int32_t offset = 0)
+        : Value(CheckedOpcode, kind, type, origin, pointer)
+        , m_offset(offset)
+        , m_range(HeapRange::top())
+    {
+        if (!ASSERT_DISABLED) {
+            switch (kind.opcode()) {
+            case Load:
+                break;
+            case Load8Z:
+            case Load8S:
+            case Load16Z:
+            case Load16S:
+                ASSERT(type == Int32);
+                break;
+            case Store8:
+            case Store16:
+            case Store:
+                ASSERT(type == Void);
+                break;
+            default:
+                ASSERT_NOT_REACHED();
+            }
+        }
+    }
+
+    // Use this form for loads where the return type is implied.
+    MemoryValue(Kind kind, Origin origin, Value* pointer, int32_t offset = 0)
+        : MemoryValue(kind, Int32, origin, pointer, offset)
+    {
+    }
+
+    // Use this form for stores.
+    MemoryValue(Kind kind, Origin origin, Value* value, Value* pointer, int32_t offset = 0)
+        : Value(CheckedOpcode, kind, Void, origin, value, pointer)
+        , m_offset(offset)
+        , m_range(HeapRange::top())
+    {
+        if (!ASSERT_DISABLED) {
+            switch (kind.opcode()) {
+            case Store8:
+            case Store16:
+            case Store:
+                break;
+            default:
+                ASSERT_NOT_REACHED();
+                break;
+            }
+        }
+    }
+
+    int32_t m_offset { 0 };
+    HeapRange m_range;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3MoveConstants.cpp b/Source/JavaScriptCore/b3/B3MoveConstants.cpp
new file mode 100644
index 000000000..0d987738e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MoveConstants.cpp
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3MoveConstants.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+#include "B3ValueKeyInlines.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class MoveConstants {
+public:
+    MoveConstants(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+    {
+    }
+
+    void run()
+    {
+        hoistConstants(
+            [&] (const ValueKey& key) -> bool {
+                return key.opcode() == ConstFloat || key.opcode() == ConstDouble;
+            });
+        
+        lowerFPConstants();
+        
+        hoistConstants(
+            [&] (const ValueKey& key) -> bool {
+                return key.opcode() == Const32 || key.opcode() == Const64 || key.opcode() == ArgumentReg;
+            });
+    }
+
+private:
+    template
+    void hoistConstants(const Filter& filter)
+    {
+        Dominators& dominators = m_proc.dominators();
+        HashMap valueForConstant;
+        IndexMap> materializations(m_proc.size());
+
+        // We determine where things get materialized based on where they are used.
+        for (BasicBlock* block : m_proc) {
+            for (Value* value : *block) {
+                for (Value*& child : value->children()) {
+                    ValueKey key = child->key();
+                    if (!filter(key))
+                        continue;
+
+                    auto result = valueForConstant.add(key, child);
+                    if (result.isNewEntry) {
+                        // Assume that this block is where we want to materialize the value.
+                        child->owner = block;
+                        continue;
+                    }
+
+                    // Make 'value' use the canonical constant rather than the one it was using.
+                    child = result.iterator->value;
+
+                    // Determine the least common dominator. That's the lowest place in the CFG where
+                    // we could materialize the constant while still having only one materialization
+                    // in the resulting code.
+                    while (!dominators.dominates(child->owner, block))
+                        child->owner = dominators.idom(child->owner);
+                }
+            }
+        }
+
+        // Make sure that each basic block knows what to materialize. This also refines the
+        // materialization block based on execution frequency. It finds the minimum block frequency
+        // of all of its dominators, and selects the closest block amongst those that are tied for
+        // lowest frequency.
+        for (auto& entry : valueForConstant) {
+            Value* value = entry.value;
+            for (BasicBlock* block = value->owner; block; block = dominators.idom(block)) {
+                if (block->frequency() < value->owner->frequency())
+                    value->owner = block;
+            }
+            materializations[value->owner].append(value);
+        }
+
+        // Get rid of Value's that are fast constants but aren't canonical. Also remove the canonical
+        // ones from the CFG, since we're going to reinsert them elsewhere.
+        for (BasicBlock* block : m_proc) {
+            for (Value*& value : *block) {
+                ValueKey key = value->key();
+                if (!filter(key))
+                    continue;
+
+                if (valueForConstant.get(key) == value)
+                    value = m_proc.add(Nop, value->origin());
+                else
+                    value->replaceWithNopIgnoringType();
+            }
+        }
+
+        // Now make sure that we move constants to where they are supposed to go. Again, we do this
+        // based on uses.
+        for (BasicBlock* block : m_proc) {
+            for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+                Value* value = block->at(valueIndex);
+                
+                // This finds the outermost (best) block last. So, the functor overrides the result
+                // each time it finds something acceptable.
+                auto findBestConstant = [&] (const auto& predicate) -> Value* {
+                    Value* result = nullptr;
+                    dominators.forAllDominatorsOf(
+                        block,
+                        [&] (BasicBlock* dominator) {
+                            for (Value* value : materializations[dominator]) {
+                                if (predicate(value)) {
+                                    result = value;
+                                    break;
+                                }
+                            }
+                        });
+                    return result;
+                };
+                
+                // We call this when we have found a constant that we'd like to use. It's possible that
+                // we have computed that the constant should be meterialized in this block, but we
+                // haven't inserted it yet. This inserts the constant if necessary.
+                auto materialize = [&] (Value* child) {
+                    ValueKey key = child->key();
+                    if (!filter(key))
+                        return;
+
+                    // If we encounter a fast constant, then it must be canonical, since we already
+                    // got rid of the non-canonical ones.
+                    ASSERT(valueForConstant.get(key) == child);
+
+                    if (child->owner != block) {
+                        // This constant isn't our problem. It's going to be materialized in another
+                        // block.
+                        return;
+                    }
+                    
+                    // We're supposed to materialize this constant in this block, and we haven't
+                    // done it yet.
+                    m_insertionSet.insertValue(valueIndex, child);
+                    child->owner = nullptr;
+                };
+                
+                if (MemoryValue* memoryValue = value->as()) {
+                    Value* pointer = memoryValue->lastChild();
+                    if (pointer->hasIntPtr() && filter(pointer->key())) {
+                        auto desiredOffset = [&] (Value* otherPointer) -> intptr_t {
+                            // We would turn this:
+                            //
+                            //     Load(@p, offset = c)
+                            //
+                            // into this:
+                            //
+                            //     Load(@q, offset = ?)
+                            //
+                            // The offset should be c + @p - @q, because then we're loading from:
+                            //
+                            //     @q + c + @p - @q
+                            uintptr_t c = static_cast(static_cast(memoryValue->offset()));
+                            uintptr_t p = pointer->asIntPtr();
+                            uintptr_t q = otherPointer->asIntPtr();
+                            return c + p - q;
+                        };
+                        
+                        Value* bestPointer = findBestConstant(
+                            [&] (Value* candidatePointer) -> bool {
+                                if (!candidatePointer->hasIntPtr())
+                                    return false;
+                                
+                                intptr_t offset = desiredOffset(candidatePointer);
+                                if (!B3::isRepresentableAs(static_cast(offset)))
+                                    return false;
+                                return Air::Arg::isValidAddrForm(
+                                    static_cast(offset),
+                                    Air::Arg::widthForBytes(memoryValue->accessByteSize()));
+                            });
+                        
+                        if (bestPointer) {
+                            memoryValue->lastChild() = bestPointer;
+                            memoryValue->setOffset(desiredOffset(bestPointer));
+                        }
+                    }
+                } else {
+                    switch (value->opcode()) {
+                    case Add:
+                    case Sub: {
+                        Value* addend = value->child(1);
+                        if (!addend->hasInt() || !filter(addend->key()))
+                            break;
+                        int64_t addendConst = addend->asInt();
+                        Value* bestAddend = findBestConstant(
+                            [&] (Value* candidateAddend) -> bool {
+                                if (candidateAddend->type() != addend->type())
+                                    return false;
+                                if (!candidateAddend->hasInt())
+                                    return false;
+                                return candidateAddend == addend
+                                    || candidateAddend->asInt() == -addendConst;
+                            });
+                        if (!bestAddend || bestAddend == addend)
+                            break;
+                        materialize(value->child(0));
+                        materialize(bestAddend);
+                        value->replaceWithIdentity(
+                            m_insertionSet.insert(
+                                valueIndex, value->opcode() == Add ? Sub : Add, value->origin(),
+                                value->child(0), bestAddend));
+                        break;
+                    }
+                    default:
+                        break;
+                    }
+                }
+                
+                for (Value* child : value->children())
+                    materialize(child);
+            }
+
+            // We may have some constants that need to be materialized right at the end of this
+            // block.
+            for (Value* value : materializations[block]) {
+                if (!value->owner) {
+                    // It's already materialized in this block.
+                    continue;
+                }
+
+                m_insertionSet.insertValue(block->size() - 1, value);
+            }
+            m_insertionSet.execute(block);
+        }
+    }
+
+    void lowerFPConstants()
+    {
+        for (Value* value : m_proc.values()) {
+            ValueKey key = value->key();
+            if (goesInTable(key))
+                m_constTable.add(key, m_constTable.size());
+        }
+        
+        m_dataSection = static_cast(m_proc.addDataSection(m_constTable.size() * sizeof(int64_t)));
+        for (auto& entry : m_constTable)
+            m_dataSection[entry.value] = entry.key.value();
+
+        IndexSet offLimits;
+        for (BasicBlock* block : m_proc) {
+            for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+                StackmapValue* value = block->at(valueIndex)->as();
+                if (!value)
+                    continue;
+
+                for (unsigned childIndex = 0; childIndex < value->numChildren(); ++childIndex) {
+                    if (!value->constrainedChild(childIndex).rep().isAny())
+                        continue;
+                    
+                    Value*& child = value->child(childIndex);
+                    ValueKey key = child->key();
+                    if (!goesInTable(key))
+                        continue;
+
+                    child = m_insertionSet.insertValue(
+                        valueIndex, key.materialize(m_proc, value->origin()));
+                    offLimits.add(child);
+                }
+            }
+
+            m_insertionSet.execute(block);
+        }
+
+        for (BasicBlock* block : m_proc) {
+            for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+                Value* value = block->at(valueIndex);
+                ValueKey key = value->key();
+                if (!goesInTable(key))
+                    continue;
+                if (offLimits.contains(value))
+                    continue;
+
+                Value* tableBase = m_insertionSet.insertIntConstant(
+                    valueIndex, value->origin(), pointerType(),
+                    bitwise_cast(m_dataSection));
+                Value* result = m_insertionSet.insert(
+                    valueIndex, Load, value->type(), value->origin(), tableBase,
+                    sizeof(int64_t) * m_constTable.get(key));
+                value->replaceWithIdentity(result);
+            }
+
+            m_insertionSet.execute(block);
+        }
+    }
+
+    bool goesInTable(const ValueKey& key)
+    {
+        return (key.opcode() == ConstDouble && key != doubleZero())
+            || (key.opcode() == ConstFloat && key != floatZero());
+    }
+
+    static ValueKey doubleZero()
+    {
+        return ValueKey(ConstDouble, Double, 0.0);
+    }
+
+    static ValueKey floatZero()
+    {
+        return ValueKey(ConstFloat, Double, 0.0);
+    }
+
+    Procedure& m_proc;
+    Vector m_toRemove;
+    HashMap m_constTable;
+    int64_t* m_dataSection;
+    HashMap m_constants;
+    InsertionSet m_insertionSet;
+};
+
+} // anonymous namespace
+
+void moveConstants(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "moveConstants");
+    MoveConstants moveConstants(proc);
+    moveConstants.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3MoveConstants.h b/Source/JavaScriptCore/b3/B3MoveConstants.h
new file mode 100644
index 000000000..b9f92ffe1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MoveConstants.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Moves large constants around, with the goal of placing them in the optimal points in the program.
+
+JS_EXPORT_PRIVATE void moveConstants(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3OpaqueByproduct.h b/Source/JavaScriptCore/b3/B3OpaqueByproduct.h
new file mode 100644
index 000000000..35a2a06a4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3OpaqueByproduct.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class OpaqueByproduct {
+    WTF_MAKE_NONCOPYABLE(OpaqueByproduct);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    OpaqueByproduct() { }
+    virtual ~OpaqueByproduct() { }
+
+    virtual void dump(PrintStream&) const = 0;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3OpaqueByproducts.cpp b/Source/JavaScriptCore/b3/B3OpaqueByproducts.cpp
new file mode 100644
index 000000000..f89f8bfed
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3OpaqueByproducts.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3OpaqueByproducts.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+OpaqueByproducts::OpaqueByproducts()
+{
+}
+
+OpaqueByproducts::~OpaqueByproducts()
+{
+}
+
+void OpaqueByproducts::add(std::unique_ptr byproduct)
+{
+    m_byproducts.append(WTFMove(byproduct));
+}
+
+void OpaqueByproducts::dump(PrintStream& out) const
+{
+    out.print("Byproducts:\n");
+    if (m_byproducts.isEmpty()) {
+        out.print("    \n");
+        return;
+    }
+    for (auto& byproduct : m_byproducts)
+        out.print("    ", *byproduct, "\n");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3OpaqueByproducts.h b/Source/JavaScriptCore/b3/B3OpaqueByproducts.h
new file mode 100644
index 000000000..e8eec113f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3OpaqueByproducts.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproduct.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class OpaqueByproducts {
+    WTF_MAKE_NONCOPYABLE(OpaqueByproducts)
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    OpaqueByproducts();
+    JS_EXPORT_PRIVATE ~OpaqueByproducts();
+
+    size_t count() const { return m_byproducts.size(); }
+    
+    void add(std::unique_ptr);
+
+    void dump(PrintStream&) const;
+
+private:
+    Vector> m_byproducts;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Opcode.cpp b/Source/JavaScriptCore/b3/B3Opcode.cpp
new file mode 100644
index 000000000..a0aa5a990
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Opcode.cpp
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Opcode.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 {
+
+std::optional invertedCompare(Opcode opcode, Type type)
+{
+    switch (opcode) {
+    case Equal:
+        return NotEqual;
+    case NotEqual:
+        return Equal;
+    case LessThan:
+        if (isInt(type))
+            return GreaterEqual;
+        return std::nullopt;
+    case GreaterThan:
+        if (isInt(type))
+            return LessEqual;
+        return std::nullopt;
+    case LessEqual:
+        if (isInt(type))
+            return GreaterThan;
+        return std::nullopt;
+    case GreaterEqual:
+        if (isInt(type))
+            return LessThan;
+        return std::nullopt;
+    case Above:
+        return BelowEqual;
+    case Below:
+        return AboveEqual;
+    case AboveEqual:
+        return Below;
+    case BelowEqual:
+        return Above;
+    default:
+        return std::nullopt;
+    }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, Opcode opcode)
+{
+    switch (opcode) {
+    case Nop:
+        out.print("Nop");
+        return;
+    case Identity:
+        out.print("Identity");
+        return;
+    case Const32:
+        out.print("Const32");
+        return;
+    case Const64:
+        out.print("Const64");
+        return;
+    case ConstDouble:
+        out.print("ConstDouble");
+        return;
+    case ConstFloat:
+        out.print("ConstFloat");
+        return;
+    case Get:
+        out.print("Get");
+        return;
+    case Set:
+        out.print("Set");
+        return;
+    case SlotBase:
+        out.print("SlotBase");
+        return;
+    case ArgumentReg:
+        out.print("ArgumentReg");
+        return;
+    case FramePointer:
+        out.print("FramePointer");
+        return;
+    case Add:
+        out.print("Add");
+        return;
+    case Sub:
+        out.print("Sub");
+        return;
+    case Mul:
+        out.print("Mul");
+        return;
+    case Div:
+        out.print("Div");
+        return;
+    case UDiv:
+        out.print("UDiv");
+        return;
+    case Mod:
+        out.print("Mod");
+        return;
+    case UMod:
+        out.print("UMod");
+        return;
+    case Neg:
+        out.print("Neg");
+        return;
+    case BitAnd:
+        out.print("BitAnd");
+        return;
+    case BitOr:
+        out.print("BitOr");
+        return;
+    case BitXor:
+        out.print("BitXor");
+        return;
+    case Shl:
+        out.print("Shl");
+        return;
+    case SShr:
+        out.print("SShr");
+        return;
+    case ZShr:
+        out.print("ZShr");
+        return;
+    case RotR:
+        out.print("RotR");
+        return;
+    case RotL:
+        out.print("RotL");
+        return;
+    case Clz:
+        out.print("Clz");
+        return;
+    case Abs:
+        out.print("Abs");
+        return;
+    case Ceil:
+        out.print("Ceil");
+        return;
+    case Floor:
+        out.print("Floor");
+        return;
+    case Sqrt:
+        out.print("Sqrt");
+        return;
+    case BitwiseCast:
+        out.print("BitwiseCast");
+        return;
+    case SExt8:
+        out.print("SExt8");
+        return;
+    case SExt16:
+        out.print("SExt16");
+        return;
+    case SExt32:
+        out.print("SExt32");
+        return;
+    case ZExt32:
+        out.print("ZExt32");
+        return;
+    case Trunc:
+        out.print("Trunc");
+        return;
+    case IToD:
+        out.print("IToD");
+        return;
+    case IToF:
+        out.print("IToF");
+        return;
+    case FloatToDouble:
+        out.print("FloatToDouble");
+        return;
+    case DoubleToFloat:
+        out.print("DoubleToFloat");
+        return;
+    case Equal:
+        out.print("Equal");
+        return;
+    case NotEqual:
+        out.print("NotEqual");
+        return;
+    case LessThan:
+        out.print("LessThan");
+        return;
+    case GreaterThan:
+        out.print("GreaterThan");
+        return;
+    case LessEqual:
+        out.print("LessEqual");
+        return;
+    case GreaterEqual:
+        out.print("GreaterEqual");
+        return;
+    case Above:
+        out.print("Above");
+        return;
+    case Below:
+        out.print("Below");
+        return;
+    case AboveEqual:
+        out.print("AboveEqual");
+        return;
+    case BelowEqual:
+        out.print("BelowEqual");
+        return;
+    case EqualOrUnordered:
+        out.print("EqualOrUnordered");
+        return;
+    case Select:
+        out.print("Select");
+        return;
+    case Load8Z:
+        out.print("Load8Z");
+        return;
+    case Load8S:
+        out.print("Load8S");
+        return;
+    case Load16Z:
+        out.print("Load16Z");
+        return;
+    case Load16S:
+        out.print("Load16S");
+        return;
+    case Load:
+        out.print("Load");
+        return;
+    case Store8:
+        out.print("Store8");
+        return;
+    case Store16:
+        out.print("Store16");
+        return;
+    case Store:
+        out.print("Store");
+        return;
+    case WasmAddress:
+        out.print("WasmAddress");
+        return;
+    case Fence:
+        out.print("Fence");
+        return;
+    case CCall:
+        out.print("CCall");
+        return;
+    case Patchpoint:
+        out.print("Patchpoint");
+        return;
+    case CheckAdd:
+        out.print("CheckAdd");
+        return;
+    case CheckSub:
+        out.print("CheckSub");
+        return;
+    case CheckMul:
+        out.print("CheckMul");
+        return;
+    case Check:
+        out.print("Check");
+        return;
+    case WasmBoundsCheck:
+        out.print("WasmBoundsCheck");
+        return;
+    case Upsilon:
+        out.print("Upsilon");
+        return;
+    case Phi:
+        out.print("Phi");
+        return;
+    case Jump:
+        out.print("Jump");
+        return;
+    case Branch:
+        out.print("Branch");
+        return;
+    case Switch:
+        out.print("Switch");
+        return;
+    case EntrySwitch:
+        out.print("EntrySwitch");
+        return;
+    case Return:
+        out.print("Return");
+        return;
+    case Oops:
+        out.print("Oops");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Opcode.h b/Source/JavaScriptCore/b3/B3Opcode.h
new file mode 100644
index 000000000..956dba99a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Opcode.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Type.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+// Warning: In B3, an Opcode is just one part of a Kind. Kind is used the way that an opcode
+// would be used in simple IRs. See B3Kind.h.
+
+enum Opcode : int16_t {
+    // A no-op that returns Void, useful for when you want to remove a value.
+    Nop,
+    
+    // Polymorphic identity, usable with any value type.
+    Identity,
+
+    // Constants. Use the ConstValue* classes. Constants exist in the control flow, so that we can
+    // reason about where we would construct them. Large constants are expensive to create.
+    Const32,
+    Const64,
+    ConstDouble,
+    ConstFloat,
+
+    // B3 supports non-SSA variables. These are accessed using Get and Set opcodes. Use the
+    // VariableValue class. It's a good idea to run fixSSA() to turn these into SSA. The
+    // optimizer will do that eventually, but if your input tends to use these opcodes, you
+    // should run fixSSA() directly before launching the optimizer.
+    Set,
+    Get,
+
+    // Gets the base address of a StackSlot.
+    SlotBase,
+
+    // The magical argument register. This is viewed as executing at the top of the program
+    // regardless of where in control flow you put it, and the compiler takes care to ensure that we
+    // don't clobber the value by register allocation or calls (either by saving the argument to the
+    // stack or preserving it in a callee-save register). Use the ArgumentRegValue class. The return
+    // type is either pointer() (for GPRs) or Double (for FPRs).
+    ArgumentReg,
+
+    // The frame pointer. You can put this anywhere in control flow but it will always yield the
+    // frame pointer, with a caveat: if our compiler changes the frame pointer temporarily for some
+    // silly reason, the FramePointer intrinsic will return where the frame pointer *should* be not
+    // where it happens to be right now.
+    FramePointer,
+
+    // Polymorphic math, usable with any value type.
+    Add,
+    Sub,
+    Mul,
+    Div, // All bets are off as to what will happen when you execute this for -2^31/-1 and x/0.
+    UDiv,
+    Mod, // All bets are off as to what will happen when you execute this for -2^31%-1 and x%0.
+    UMod,
+
+
+    // Polymorphic negation. Note that we only need this for floating point, since integer negation
+    // is exactly like Sub(0, x). But that's not true for floating point. Sub(0, 0) is 0, while
+    // Neg(0) is -0. Also, we canonicalize Sub(0, x) into Neg(x) in case of integers.
+    Neg,
+
+    // Integer math.
+    BitAnd,
+    BitOr,
+    BitXor,
+    Shl,
+    SShr, // Arithmetic Shift.
+    ZShr, // Logical Shift.
+    RotR, // Rotate Right.
+    RotL, // Rotate Left.
+    Clz, // Count leading zeros.
+
+    // Floating point math.
+    Abs,
+    Ceil,
+    Floor,
+    Sqrt,
+
+    // Casts and such.
+    // Bitwise Cast of Double->Int64 or Int64->Double
+    BitwiseCast,
+    // Takes and returns Int32:
+    SExt8,
+    SExt16,
+    // Takes Int32 and returns Int64:
+    SExt32,
+    ZExt32,
+    // Does a bitwise truncation of Int64->Int32 and Double->Float:
+    Trunc,
+    // Takes ints and returns floating point value. Note that we don't currently provide the opposite operation,
+    // because double-to-int conversions have weirdly different semantics on different platforms. Use
+    // a patchpoint if you need to do that.
+    IToD,
+    IToF,
+    // Convert between double and float.
+    FloatToDouble,
+    DoubleToFloat,
+
+    // Polymorphic comparisons, usable with any value type. Returns int32 0 or 1. Note that "Not"
+    // is just Equal(x, 0), and "ToBoolean" is just NotEqual(x, 0).
+    Equal,
+    NotEqual,
+    LessThan,
+    GreaterThan,
+    LessEqual,
+    GreaterEqual,
+
+    // Integer comparisons. Returns int32 0 or 1.
+    Above,
+    Below,
+    AboveEqual,
+    BelowEqual,
+
+    // Unordered floating point compare: values are equal or either one is NaN.
+    EqualOrUnordered,
+
+    // SSA form of conditional move. The first child is evaluated for truthiness. If true, the second child
+    // is returned. Otherwise, the third child is returned.
+    Select,
+
+    // Memory loads. Opcode indicates how we load and the loaded type. These use MemoryValue.
+    // These return Int32:
+    Load8Z,
+    Load8S,
+    Load16Z,
+    Load16S,
+    // This returns whatever the return type is:
+    Load,
+
+    // Memory stores. Opcode indicates how the value is stored. These use MemoryValue.
+    // These take an Int32 value:
+    Store8,
+    Store16,
+    // This is a polymorphic store for Int32, Int64, Float, and Double.
+    Store,
+
+    // This is used to compute the actual address of a Wasm memory operation. It takes an IntPtr
+    // and a pinned register then computes the appropriate IntPtr address. For the use-case of
+    // Wasm it is important that the first child initially be a ZExt32 so the top bits are cleared.
+    // We do WasmAddress(ZExt32(ptr), ...) so that we can avoid generating extraneous moves in Air.
+    WasmAddress,
+    
+    // This is used to represent standalone fences - i.e. fences that are not part of other
+    // instructions. It's expressive enough to expose mfence on x86 and dmb ish/ishst on ARM. On
+    // x86, it also acts as a compiler store-store fence in those cases where it would have been a
+    // dmb ishst on ARM.
+    Fence,
+
+    // This is a regular ordinary C function call, using the system C calling convention. Make sure
+    // that the arguments are passed using the right types. The first argument is the callee.
+    CCall,
+
+    // This is a patchpoint. Use the PatchpointValue class. This is viewed as behaving like a call,
+    // but only emits code via a code generation callback. That callback gets to emit code inline.
+    // You can pass a stackmap along with constraints on how each stackmap argument must be passed.
+    // It's legal to request that a stackmap argument is in some register and it's legal to request
+    // that a stackmap argument is at some offset from the top of the argument passing area on the
+    // stack.
+    Patchpoint,
+
+    // Checked math. Use the CheckValue class. Like a Patchpoint, this takes a code generation
+    // callback. That callback gets to emit some code after the epilogue, and gets to link the jump
+    // from the check, and the choice of registers. You also get to supply a stackmap. Note that you
+    // are not allowed to jump back into the mainline code from your slow path, since the compiler
+    // will assume that the execution of these instructions proves that overflow didn't happen. For
+    // example, if you have two CheckAdd's:
+    //
+    // a = CheckAdd(x, y)
+    // b = CheckAdd(x, y)
+    //
+    // Then it's valid to change this to:
+    //
+    // a = CheckAdd(x, y)
+    // b = Identity(a)
+    //
+    // This is valid regardless of the callbacks used by the two CheckAdds. They may have different
+    // callbacks. Yet, this transformation is valid even if they are different because we know that
+    // after the first CheckAdd executes, the second CheckAdd could not have possibly taken slow
+    // path. Therefore, the second CheckAdd's callback is irrelevant.
+    //
+    // Note that the first two children of these operations have ValueRep's as input constraints but do
+    // not have output constraints.
+    CheckAdd,
+    CheckSub,
+    CheckMul,
+
+    // Check that side-exits. Use the CheckValue class. Like CheckAdd and friends, this has a
+    // stackmap with a generation callback. This takes an int argument that this branches on, with
+    // full branch fusion in the instruction selector. A true value jumps to the generator's slow
+    // path. Note that the predicate child is has both an input ValueRep. The input constraint must be
+    // WarmAny. It will not have an output constraint.
+    Check,
+
+    // Special Wasm opcode that takes a Int32, a special pinned gpr and an offset. This node exists
+    // to allow us to CSE WasmBoundsChecks if both use the same pointer and one dominates the other.
+    // Without some such node B3 would not have enough information about the inner workings of wasm
+    // to be able to perform such optimizations.
+    WasmBoundsCheck,
+
+    // SSA support, in the style of DFG SSA.
+    Upsilon, // This uses the UpsilonValue class.
+    Phi,
+
+    // Jump.
+    Jump,
+    
+    // Polymorphic branch, usable with any integer type. Branches if not equal to zero. The 0-index
+    // successor is the true successor.
+    Branch,
+
+    // Switch. Switches over either Int32 or Int64. Uses the SwitchValue class.
+    Switch,
+    
+    // Multiple entrypoints are supported via the EntrySwitch operation. Place this in the root
+    // block and list the entrypoints as the successors. All blocks backwards-reachable from
+    // EntrySwitch are duplicated for each entrypoint.
+    EntrySwitch,
+
+    // Return. Note that B3 procedures don't know their return type, so this can just return any
+    // type.
+    Return,
+
+    // This is a terminal that indicates that we will never get here.
+    Oops
+};
+
+inline bool isCheckMath(Opcode opcode)
+{
+    switch (opcode) {
+    case CheckAdd:
+    case CheckSub:
+    case CheckMul:
+        return true;
+    default:
+        return false;
+    }
+}
+
+std::optional invertedCompare(Opcode, Type);
+
+inline Opcode constPtrOpcode()
+{
+    if (is64Bit())
+        return Const64;
+    return Const32;
+}
+
+inline bool isConstant(Opcode opcode)
+{
+    switch (opcode) {
+    case Const32:
+    case Const64:
+    case ConstDouble:
+    case ConstFloat:
+        return true;
+    default:
+        return false;
+    }
+}
+
+inline bool isDefinitelyTerminal(Opcode opcode)
+{
+    switch (opcode) {
+    case Jump:
+    case Branch:
+    case Switch:
+    case Oops:
+    case Return:
+        return true;
+    default:
+        return false;
+    }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Opcode);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Origin.cpp b/Source/JavaScriptCore/b3/B3Origin.cpp
new file mode 100644
index 000000000..8baf012ea
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Origin.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Origin.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+void Origin::dump(PrintStream& out) const
+{
+    out.print("Origin(", RawPointer(m_data), ")");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Origin.h b/Source/JavaScriptCore/b3/B3Origin.h
new file mode 100644
index 000000000..47fd10fd9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Origin.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 {
+
+// Whoever generates B3IR can choose to put origins on values. When you do this, B3 will be able to
+// account, down to the machine code, which instruction corresponds to which origin. B3
+// transformations must preserve Origins carefully. It's an error to write a transformation that
+// either drops Origins or lies about them.
+class Origin {
+public:
+    explicit Origin(const void* data = nullptr)
+        : m_data(data)
+    {
+    }
+
+    explicit operator bool() const { return !!m_data; }
+
+    const void* data() const { return m_data; }
+
+    bool operator==(const Origin& other) const { return m_data == other.m_data; }
+
+    // You should avoid using this. Use OriginDump instead.
+    void dump(PrintStream&) const;
+    
+private:
+    const void* m_data;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3OriginDump.cpp b/Source/JavaScriptCore/b3/B3OriginDump.cpp
new file mode 100644
index 000000000..da7afeeb8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3OriginDump.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3OriginDump.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 {
+
+void OriginDump::dump(PrintStream& out) const
+{
+    if (m_proc)
+        m_proc->printOrigin(out, m_origin);
+    else
+        out.print(m_origin);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3OriginDump.h b/Source/JavaScriptCore/b3/B3OriginDump.h
new file mode 100644
index 000000000..5392ac911
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3OriginDump.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class OriginDump {
+public:
+    OriginDump(const Procedure* proc, Origin origin)
+        : m_proc(proc)
+        , m_origin(origin)
+    {
+    }
+
+    void dump(PrintStream& out) const;
+
+private:
+    const Procedure* m_proc;
+    Origin m_origin;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PCToOriginMap.h b/Source/JavaScriptCore/b3/B3PCToOriginMap.h
new file mode 100644
index 000000000..5e6ce451d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PCToOriginMap.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+#include "MacroAssembler.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class PCToOriginMap {
+    WTF_MAKE_NONCOPYABLE(PCToOriginMap);
+public:
+    PCToOriginMap()
+    { }
+
+    PCToOriginMap(PCToOriginMap&& other)
+        : m_ranges(WTFMove(other.m_ranges))
+    { }
+
+    struct OriginRange {
+        MacroAssembler::Label label;
+        Origin origin;
+    };
+
+    void appendItem(MacroAssembler::Label label, Origin origin)
+    {
+        if (m_ranges.size()) {
+            if (m_ranges.last().label == label)
+                return;
+        }
+
+        m_ranges.append(OriginRange{label, origin});
+    }
+
+    const Vector& ranges() const  { return m_ranges; }
+
+private:
+    Vector m_ranges;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PatchpointSpecial.cpp b/Source/JavaScriptCore/b3/B3PatchpointSpecial.cpp
new file mode 100644
index 000000000..c5fc5885d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PatchpointSpecial.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3PatchpointSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirGenerationContext.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+PatchpointSpecial::PatchpointSpecial()
+{
+}
+
+PatchpointSpecial::~PatchpointSpecial()
+{
+}
+
+void PatchpointSpecial::forEachArg(Inst& inst, const ScopedLambda& callback)
+{
+    PatchpointValue* patchpoint = inst.origin->as();
+    unsigned argIndex = 1;
+
+    if (patchpoint->type() != Void) {
+        Arg::Role role;
+        if (patchpoint->resultConstraint.kind() == ValueRep::SomeEarlyRegister)
+            role = Arg::EarlyDef;
+        else
+            role = Arg::Def;
+        
+        callback(inst.args[argIndex++], role, inst.origin->airType(), inst.origin->airWidth());
+    }
+
+    forEachArgImpl(0, argIndex, inst, SameAsRep, std::nullopt, callback);
+    argIndex += inst.origin->numChildren();
+
+    for (unsigned i = patchpoint->numGPScratchRegisters; i--;)
+        callback(inst.args[argIndex++], Arg::Scratch, Arg::GP, Arg::conservativeWidth(Arg::GP));
+    for (unsigned i = patchpoint->numFPScratchRegisters; i--;)
+        callback(inst.args[argIndex++], Arg::Scratch, Arg::FP, Arg::conservativeWidth(Arg::FP));
+}
+
+bool PatchpointSpecial::isValid(Inst& inst)
+{
+    PatchpointValue* patchpoint = inst.origin->as();
+    unsigned argIndex = 1;
+
+    if (inst.origin->type() != Void) {
+        if (argIndex >= inst.args.size())
+            return false;
+        
+        if (!isArgValidForValue(inst.args[argIndex], patchpoint))
+            return false;
+        if (!isArgValidForRep(code(), inst.args[argIndex], patchpoint->resultConstraint))
+            return false;
+        argIndex++;
+    }
+
+    if (!isValidImpl(0, argIndex, inst))
+        return false;
+    argIndex += patchpoint->numChildren();
+
+    if (argIndex + patchpoint->numGPScratchRegisters + patchpoint->numFPScratchRegisters
+        != inst.args.size())
+        return false;
+
+    for (unsigned i = patchpoint->numGPScratchRegisters; i--;) {
+        Arg arg = inst.args[argIndex++];
+        if (!arg.isGPTmp())
+            return false;
+    }
+    for (unsigned i = patchpoint->numFPScratchRegisters; i--;) {
+        Arg arg = inst.args[argIndex++];
+        if (!arg.isFPTmp())
+            return false;
+    }
+
+    return true;
+}
+
+bool PatchpointSpecial::admitsStack(Inst& inst, unsigned argIndex)
+{
+    if (inst.origin->type() == Void)
+        return admitsStackImpl(0, 1, inst, argIndex);
+
+    if (argIndex == 1) {
+        switch (inst.origin->as()->resultConstraint.kind()) {
+        case ValueRep::WarmAny:
+        case ValueRep::StackArgument:
+            return true;
+        case ValueRep::SomeRegister:
+        case ValueRep::SomeEarlyRegister:
+        case ValueRep::Register:
+        case ValueRep::LateRegister:
+            return false;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return false;
+        }
+    }
+
+    return admitsStackImpl(0, 2, inst, argIndex);
+}
+
+CCallHelpers::Jump PatchpointSpecial::generate(
+    Inst& inst, CCallHelpers& jit, GenerationContext& context)
+{
+    PatchpointValue* value = inst.origin->as();
+    ASSERT(value);
+
+    Vector reps;
+    unsigned offset = 1;
+    if (inst.origin->type() != Void)
+        reps.append(repForArg(*context.code, inst.args[offset++]));
+    reps.appendVector(repsImpl(context, 0, offset, inst));
+    offset += value->numChildren();
+
+    StackmapGenerationParams params(value, reps, context);
+
+    for (unsigned i = value->numGPScratchRegisters; i--;)
+        params.m_gpScratch.append(inst.args[offset++].gpr());
+    for (unsigned i = value->numFPScratchRegisters; i--;)
+        params.m_fpScratch.append(inst.args[offset++].fpr());
+    
+    value->m_generator->run(jit, params);
+
+    return CCallHelpers::Jump();
+}
+
+bool PatchpointSpecial::isTerminal(Inst& inst)
+{
+    return inst.origin->as()->effects.terminal;
+}
+
+void PatchpointSpecial::dumpImpl(PrintStream& out) const
+{
+    out.print("Patchpoint");
+}
+
+void PatchpointSpecial::deepDumpImpl(PrintStream& out) const
+{
+    out.print("Lowered B3::PatchpointValue.");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PatchpointSpecial.h b/Source/JavaScriptCore/b3/B3PatchpointSpecial.h
new file mode 100644
index 000000000..4e1b2a319
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PatchpointSpecial.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackmapSpecial.h"
+
+namespace JSC { namespace B3 {
+
+// This is a special that recognizes that there are two uses of Patchpoint: Void and and non-Void.
+// In the Void case, the syntax of the Air Patch instruction is:
+//
+//     Patch &patchpoint, args...
+//
+// Where "args..." are the lowered arguments to the Patchpoint instruction. In the non-Void case
+// we will have:
+//
+//     Patch &patchpoint, result, args...
+
+class PatchpointSpecial : public StackmapSpecial {
+public:
+    PatchpointSpecial();
+    virtual ~PatchpointSpecial();
+
+protected:
+    void forEachArg(Air::Inst&, const ScopedLambda&) override;
+    bool isValid(Air::Inst&) override;
+    bool admitsStack(Air::Inst&, unsigned argIndex) override;
+
+    // NOTE: the generate method will generate the hidden branch and then register a LatePath that
+    // generates the stackmap. Super crazy dude!
+
+    CCallHelpers::Jump generate(Air::Inst&, CCallHelpers&, Air::GenerationContext&) override;
+    
+    bool isTerminal(Air::Inst&) override;
+
+    void dumpImpl(PrintStream&) const override;
+    void deepDumpImpl(PrintStream&) const override;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PatchpointValue.cpp b/Source/JavaScriptCore/b3/B3PatchpointValue.cpp
new file mode 100644
index 000000000..b33c558bf
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PatchpointValue.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3PatchpointValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+PatchpointValue::~PatchpointValue()
+{
+}
+
+void PatchpointValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    Base::dumpMeta(comma, out);
+    out.print(comma, "resultConstraint = ", resultConstraint);
+    if (numGPScratchRegisters)
+        out.print(comma, "numGPScratchRegisters = ", numGPScratchRegisters);
+    if (numFPScratchRegisters)
+        out.print(comma, "numFPScratchRegisters = ", numFPScratchRegisters);
+}
+
+Value* PatchpointValue::cloneImpl() const
+{
+    return new PatchpointValue(*this);
+}
+
+PatchpointValue::PatchpointValue(Type type, Origin origin)
+    : Base(CheckedOpcode, Patchpoint, type, origin)
+    , effects(Effects::forCall())
+    , resultConstraint(type == Void ? ValueRep::WarmAny : ValueRep::SomeRegister)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PatchpointValue.h b/Source/JavaScriptCore/b3/B3PatchpointValue.h
new file mode 100644
index 000000000..3378dc410
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PatchpointValue.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Effects.h"
+#include "B3StackmapValue.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class PatchpointValue : public StackmapValue {
+public:
+    typedef StackmapValue Base;
+    
+    static bool accepts(Kind kind) { return kind == Patchpoint; }
+
+    ~PatchpointValue();
+
+    // The effects of the patchpoint. This defaults to Effects::forCall(), but you can set it to anything.
+    //
+    // If there are no effects, B3 is free to assume any use of this PatchpointValue can be replaced with
+    // a use of a different PatchpointValue, so long as the other one also has no effects and has the
+    // same children. Note that this comparison ignores child constraints, the result constraint, and all
+    // other StackmapValue meta-data. If there are read effects but not write effects, then this same sort
+    // of substitution could be made so long as there are no interfering writes.
+    Effects effects;
+
+    // The input representation (i.e. constraint) of the return value. This defaults to WarmAny if the
+    // type is Void and it defaults to SomeRegister otherwise. It's illegal to mess with this if the type
+    // is Void. Otherwise you can set this to any input constraint.
+    ValueRep resultConstraint;
+
+    // The number of scratch registers that this patchpoint gets. The scratch register is guaranteed
+    // to be different from any input register and the destination register. It's also guaranteed not
+    // to be clobbered either early or late. These are 0 by default.
+    uint8_t numGPScratchRegisters { 0 };
+    uint8_t numFPScratchRegisters { 0 };
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    JS_EXPORT_PRIVATE PatchpointValue(Type, Origin);
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PhaseScope.cpp b/Source/JavaScriptCore/b3/B3PhaseScope.cpp
new file mode 100644
index 000000000..27b22de21
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PhaseScope.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3PhaseScope.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Common.h"
+#include "B3Procedure.h"
+#include "B3Validate.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+PhaseScope::PhaseScope(Procedure& procedure, const char* name)
+    : m_procedure(procedure)
+    , m_name(name)
+    , m_timingScope(name)
+{
+    if (shouldDumpIRAtEachPhase(B3Mode)) {
+        dataLog("B3 after ", procedure.lastPhaseName(), ", before ", name, ":\n");
+        dataLog(procedure);
+    }
+
+    if (shouldSaveIRBeforePhase())
+        m_dumpBefore = toCString(procedure);
+}
+
+PhaseScope::~PhaseScope()
+{
+    m_procedure.setLastPhaseName(m_name);
+    if (shouldValidateIRAtEachPhase())
+        validate(m_procedure, m_dumpBefore.data());
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PhaseScope.h b/Source/JavaScriptCore/b3/B3PhaseScope.h
new file mode 100644
index 000000000..a17698848
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PhaseScope.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3TimingScope.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class PhaseScope {
+    WTF_MAKE_NONCOPYABLE(PhaseScope);
+public:
+    PhaseScope(Procedure&, const char* name);
+    ~PhaseScope(); // this does validation
+
+private:
+    Procedure& m_procedure;
+    const char* m_name;
+    TimingScope m_timingScope;
+    CString m_dumpBefore;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PhiChildren.cpp b/Source/JavaScriptCore/b3/B3PhiChildren.cpp
new file mode 100644
index 000000000..3b9b4e244
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PhiChildren.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3PhiChildren.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+PhiChildren::PhiChildren(Procedure& proc)
+    : m_upsilons(proc.values().size())
+{
+    for (Value* value : proc.values()) {
+        if (UpsilonValue* upsilon = value->as()) {
+            Value* phi = upsilon->phi();
+            Vector& vector = m_upsilons[phi];
+            if (vector.isEmpty())
+                m_phis.append(phi);
+            vector.append(upsilon);
+        }
+    }
+}
+
+PhiChildren::~PhiChildren()
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3PhiChildren.h b/Source/JavaScriptCore/b3/B3PhiChildren.h
new file mode 100644
index 000000000..22b827730
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PhiChildren.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+#include "B3UpsilonValue.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class PhiChildren {
+public:
+    PhiChildren(Procedure&);
+    ~PhiChildren();
+
+    class ValueCollection {
+    public:
+        ValueCollection(Vector* values = nullptr)
+            : m_values(values)
+        {
+        }
+
+        unsigned size() const { return m_values->size(); }
+        Value* at(unsigned index) const { return m_values->at(index)->child(0); }
+        Value* operator[](unsigned index) const { return at(index); }
+
+        bool contains(Value* value) const
+        {
+            for (unsigned i = size(); i--;) {
+                if (at(i) == value)
+                    return true;
+            }
+            return false;
+        }
+
+        class iterator {
+        public:
+            iterator(Vector* values = nullptr, unsigned index = 0)
+                : m_values(values)
+                , m_index(index)
+            {
+            }
+
+            Value* operator*() const
+            {
+                return m_values->at(m_index)->child(0);
+            }
+
+            iterator& operator++()
+            {
+                m_index++;
+                return *this;
+            }
+
+            bool operator==(const iterator& other) const
+            {
+                ASSERT(m_values == other.m_values);
+                return m_index == other.m_index;
+            }
+
+            bool operator!=(const iterator& other) const
+            {
+                return !(*this == other);
+            }
+
+        private:
+            Vector* m_values;
+            unsigned m_index;
+        };
+
+        iterator begin() const { return iterator(m_values); }
+        iterator end() const { return iterator(m_values, m_values->size()); }
+
+    private:
+        Vector* m_values;
+    };
+    
+    class UpsilonCollection {
+    public:
+        UpsilonCollection()
+        {
+        }
+        
+        UpsilonCollection(PhiChildren* phiChildren, Value* value, Vector* values)
+            : m_phiChildren(phiChildren)
+            , m_value(value)
+            , m_values(values)
+        {
+        }
+
+        unsigned size() const { return m_values->size(); }
+        Value* at(unsigned index) const { return m_values->at(index); }
+        Value* operator[](unsigned index) const { return at(index); }
+
+        bool contains(Value* value) const { return m_values->contains(value); }
+
+        typedef Vector::const_iterator iterator;
+        Vector::const_iterator begin() const { return m_values->begin(); }
+        Vector::const_iterator end() const { return m_values->end(); }
+
+        ValueCollection values() { return ValueCollection(m_values); }
+        
+        template
+        void forAllTransitiveIncomingValues(const Functor& functor)
+        {
+            if (m_value->opcode() != Phi) {
+                functor(m_value);
+                return;
+            }
+            
+            GraphNodeWorklist worklist;
+            worklist.push(m_value);
+            while (Value* phi = worklist.pop()) {
+                for (Value* child : m_phiChildren->at(phi).values()) {
+                    if (child->opcode() == Phi)
+                        worklist.push(child);
+                    else
+                        functor(child);
+                }
+            }
+        }
+
+        bool transitivelyUses(Value* candidate)
+        {
+            bool result = false;
+            forAllTransitiveIncomingValues(
+                [&] (Value* child) {
+                    result |= child == candidate;
+                });
+            return result;
+        }
+
+    private:
+        PhiChildren* m_phiChildren { nullptr };
+        Value* m_value { nullptr };
+        Vector* m_values { nullptr };
+    };
+
+    UpsilonCollection at(Value* value) { return UpsilonCollection(this, value, &m_upsilons[value]); }
+    UpsilonCollection operator[](Value* value) { return at(value); }
+
+    const Vector& phis() const { return m_phis; }
+
+private:
+    IndexMap> m_upsilons;
+    Vector m_phis;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Procedure.cpp b/Source/JavaScriptCore/b3/B3Procedure.cpp
new file mode 100644
index 000000000..0cb48c407
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Procedure.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Procedure.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BasicBlockUtils.h"
+#include "B3BlockWorklist.h"
+#include "B3CFG.h"
+#include "B3DataSection.h"
+#include "B3Dominators.h"
+#include "B3OpaqueByproducts.h"
+#include "B3PhiChildren.h"
+#include "B3StackSlot.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+
+namespace JSC { namespace B3 {
+
+Procedure::Procedure()
+    : m_cfg(new CFG(*this))
+    , m_lastPhaseName("initial")
+    , m_byproducts(std::make_unique())
+    , m_code(new Air::Code(*this))
+{
+}
+
+Procedure::~Procedure()
+{
+}
+
+void Procedure::printOrigin(PrintStream& out, Origin origin) const
+{
+    if (m_originPrinter)
+        m_originPrinter->run(out, origin);
+    else
+        out.print(origin);
+}
+
+BasicBlock* Procedure::addBlock(double frequency)
+{
+    std::unique_ptr block(new BasicBlock(m_blocks.size(), frequency));
+    BasicBlock* result = block.get();
+    m_blocks.append(WTFMove(block));
+    return result;
+}
+
+StackSlot* Procedure::addStackSlot(unsigned byteSize)
+{
+    return m_stackSlots.addNew(byteSize);
+}
+
+Variable* Procedure::addVariable(Type type)
+{
+    return m_variables.addNew(type); 
+}
+
+Value* Procedure::clone(Value* value)
+{
+    std::unique_ptr clone(value->cloneImpl());
+    clone->m_index = UINT_MAX;
+    clone->owner = nullptr;
+    return m_values.add(WTFMove(clone));
+}
+
+Value* Procedure::addIntConstant(Origin origin, Type type, int64_t value)
+{
+    switch (type) {
+    case Int32:
+        return add(origin, static_cast(value));
+    case Int64:
+        return add(origin, value);
+    case Double:
+        return add(origin, static_cast(value));
+    case Float:
+        return add(origin, static_cast(value));
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return nullptr;
+    }
+}
+
+Value* Procedure::addIntConstant(Value* likeValue, int64_t value)
+{
+    return addIntConstant(likeValue->origin(), likeValue->type(), value);
+}
+
+Value* Procedure::addBottom(Origin origin, Type type)
+{
+    return addIntConstant(origin, type, 0);
+}
+
+Value* Procedure::addBottom(Value* value)
+{
+    return addBottom(value->origin(), value->type());
+}
+
+Value* Procedure::addBoolConstant(Origin origin, TriState triState)
+{
+    int32_t value = 0;
+    switch (triState) {
+    case FalseTriState:
+        value = 0;
+        break;
+    case TrueTriState:
+        value = 1;
+        break;
+    case MixedTriState:
+        return nullptr;
+    }
+
+    return addIntConstant(origin, Int32, value);
+}
+
+void Procedure::resetValueOwners()
+{
+    for (BasicBlock* block : *this) {
+        for (Value* value : *block)
+            value->owner = block;
+    }
+}
+
+void Procedure::resetReachability()
+{
+    recomputePredecessors(m_blocks);
+    
+    // The common case is that this does not find any dead blocks.
+    bool foundDead = false;
+    for (auto& block : m_blocks) {
+        if (isBlockDead(block.get())) {
+            foundDead = true;
+            break;
+        }
+    }
+    if (!foundDead)
+        return;
+    
+    resetValueOwners();
+
+    for (Value* value : values()) {
+        if (UpsilonValue* upsilon = value->as()) {
+            if (isBlockDead(upsilon->phi()->owner))
+                upsilon->replaceWithNop();
+        }
+    }
+    
+    for (auto& block : m_blocks) {
+        if (isBlockDead(block.get())) {
+            for (Value* value : *block)
+                deleteValue(value);
+            block = nullptr;
+        }
+    }
+}
+
+void Procedure::invalidateCFG()
+{
+    m_dominators = nullptr;
+}
+
+void Procedure::dump(PrintStream& out) const
+{
+    IndexSet valuesInBlocks;
+    for (BasicBlock* block : *this) {
+        out.print(deepDump(*this, block));
+        valuesInBlocks.addAll(*block);
+    }
+    bool didPrint = false;
+    for (Value* value : values()) {
+        if (valuesInBlocks.contains(value))
+            continue;
+
+        if (!didPrint) {
+            dataLog("Orphaned values:\n");
+            didPrint = true;
+        }
+        dataLog("    ", deepDump(*this, value), "\n");
+    }
+    if (variables().size()) {
+        out.print("Variables:\n");
+        for (Variable* variable : variables())
+            out.print("    ", deepDump(variable), "\n");
+    }
+    if (stackSlots().size()) {
+        out.print("Stack slots:\n");
+        for (StackSlot* slot : stackSlots())
+            out.print("    ", pointerDump(slot), ": ", deepDump(slot), "\n");
+    }
+    if (m_byproducts->count())
+        out.print(*m_byproducts);
+}
+
+Vector Procedure::blocksInPreOrder()
+{
+    return B3::blocksInPreOrder(at(0));
+}
+
+Vector Procedure::blocksInPostOrder()
+{
+    return B3::blocksInPostOrder(at(0));
+}
+
+void Procedure::deleteStackSlot(StackSlot* stackSlot)
+{
+    m_stackSlots.remove(stackSlot);
+}
+
+void Procedure::deleteVariable(Variable* variable)
+{
+    m_variables.remove(variable);
+}
+
+void Procedure::deleteValue(Value* value)
+{
+    m_values.remove(value);
+}
+
+void Procedure::deleteOrphans()
+{
+    IndexSet valuesInBlocks;
+    for (BasicBlock* block : *this)
+        valuesInBlocks.addAll(*block);
+
+    // Since this method is not on any hot path, we do it conservatively: first a pass to
+    // identify the values to be removed, and then a second pass to remove them. This avoids any
+    // risk of the value iteration being broken by removals.
+    Vector toRemove;
+    for (Value* value : values()) {
+        if (!valuesInBlocks.contains(value))
+            toRemove.append(value);
+        else if (UpsilonValue* upsilon = value->as()) {
+            if (!valuesInBlocks.contains(upsilon->phi()))
+                upsilon->replaceWithNop();
+        }
+    }
+
+    for (Value* value : toRemove)
+        deleteValue(value);
+}
+
+Dominators& Procedure::dominators()
+{
+    if (!m_dominators)
+        m_dominators = std::make_unique(*this);
+    return *m_dominators;
+}
+
+void Procedure::addFastConstant(const ValueKey& constant)
+{
+    RELEASE_ASSERT(constant.isConstant());
+    m_fastConstants.add(constant);
+}
+
+bool Procedure::isFastConstant(const ValueKey& constant)
+{
+    if (!constant)
+        return false;
+    return m_fastConstants.contains(constant);
+}
+
+CCallHelpers::Label Procedure::entrypointLabel(unsigned index) const
+{
+    return m_code->entrypointLabel(index);
+}
+
+void* Procedure::addDataSection(size_t size)
+{
+    if (!size)
+        return nullptr;
+    std::unique_ptr dataSection = std::make_unique(size);
+    void* result = dataSection->data();
+    m_byproducts->add(WTFMove(dataSection));
+    return result;
+}
+
+unsigned Procedure::callArgAreaSizeInBytes() const
+{
+    return code().callArgAreaSizeInBytes();
+}
+
+void Procedure::requestCallArgAreaSizeInBytes(unsigned size)
+{
+    code().requestCallArgAreaSizeInBytes(size);
+}
+
+void Procedure::pinRegister(Reg reg)
+{
+    code().pinRegister(reg);
+}
+
+unsigned Procedure::frameSize() const
+{
+    return code().frameSize();
+}
+
+const RegisterAtOffsetList& Procedure::calleeSaveRegisters() const
+{
+    return code().calleeSaveRegisters();
+}
+
+Value* Procedure::addValueImpl(Value* value)
+{
+    return m_values.add(std::unique_ptr(value));
+}
+
+void Procedure::setBlockOrderImpl(Vector& blocks)
+{
+    IndexSet blocksSet;
+    blocksSet.addAll(blocks);
+
+    for (BasicBlock* block : *this) {
+        if (!blocksSet.contains(block))
+            blocks.append(block);
+    }
+
+    // Place blocks into this's block list by first leaking all of the blocks and then readopting
+    // them.
+    for (auto& entry : m_blocks)
+        entry.release();
+
+    m_blocks.resize(blocks.size());
+    for (unsigned i = 0; i < blocks.size(); ++i) {
+        BasicBlock* block = blocks[i];
+        block->m_index = i;
+        m_blocks[i] = std::unique_ptr(block);
+    }
+}
+
+void Procedure::setWasmBoundsCheckGenerator(RefPtr generator)
+{
+    code().setWasmBoundsCheckGenerator(generator);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Procedure.h b/Source/JavaScriptCore/b3/B3Procedure.h
new file mode 100644
index 000000000..2236145ef
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Procedure.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproducts.h"
+#include "B3Origin.h"
+#include "B3PCToOriginMap.h"
+#include "B3SparseCollection.h"
+#include "B3Type.h"
+#include "B3ValueKey.h"
+#include "CCallHelpers.h"
+#include "PureNaN.h"
+#include "RegisterAtOffsetList.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class BlockInsertionSet;
+class CFG;
+class Dominators;
+class StackSlot;
+class Value;
+class Variable;
+
+namespace Air { class Code; }
+
+typedef void WasmBoundsCheckGeneratorFunction(CCallHelpers&, GPRReg, unsigned);
+typedef SharedTask WasmBoundsCheckGenerator;
+
+// This represents B3's view of a piece of code. Note that this object must exist in a 1:1
+// relationship with Air::Code. B3::Procedure and Air::Code are just different facades of the B3
+// compiler's knowledge about a piece of code. Some kinds of state aren't perfect fits for either
+// Procedure or Code, and are placed in one or the other based on convenience. Procedure always
+// allocates a Code, and a Code cannot be allocated without an owning Procedure and they always
+// have references to each other.
+
+class Procedure {
+    WTF_MAKE_NONCOPYABLE(Procedure);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+
+    JS_EXPORT_PRIVATE Procedure();
+    JS_EXPORT_PRIVATE ~Procedure();
+
+    template
+    void setOriginPrinter(Callback&& callback)
+    {
+        m_originPrinter = createSharedTask(
+            std::forward(callback));
+    }
+
+    // Usually you use this via OriginDump, though it's cool to use it directly.
+    void printOrigin(PrintStream& out, Origin origin) const;
+
+    // This is a debugging hack. Sometimes while debugging B3 you need to break the abstraction
+    // and get at the DFG Graph, or whatever data structure the frontend used to describe the
+    // program. The FTL passes the DFG Graph.
+    void setFrontendData(const void* value) { m_frontendData = value; }
+    const void* frontendData() const { return m_frontendData; }
+
+    JS_EXPORT_PRIVATE BasicBlock* addBlock(double frequency = 1);
+
+    // Changes the order of basic blocks to be as in the supplied vector. The vector does not
+    // need to mention every block in the procedure. Blocks not mentioned will be placed after
+    // these blocks in the same order as they were in originally.
+    template
+    void setBlockOrder(const BlockIterable& iterable)
+    {
+        Vector blocks;
+        for (BasicBlock* block : iterable)
+            blocks.append(block);
+        setBlockOrderImpl(blocks);
+    }
+
+    JS_EXPORT_PRIVATE StackSlot* addStackSlot(unsigned byteSize);
+    JS_EXPORT_PRIVATE Variable* addVariable(Type);
+    
+    template
+    ValueType* add(Arguments...);
+
+    Value* clone(Value*);
+
+    Value* addIntConstant(Origin, Type, int64_t value);
+    Value* addIntConstant(Value*, int64_t value);
+
+    Value* addBottom(Origin, Type);
+    Value* addBottom(Value*);
+
+    // Returns null for MixedTriState.
+    Value* addBoolConstant(Origin, TriState);
+
+    void resetValueOwners();
+    JS_EXPORT_PRIVATE void resetReachability();
+
+    // This destroys CFG analyses. If we ask for them again, we will recompute them. Usually you
+    // should call this anytime you call resetReachability().
+    void invalidateCFG();
+
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+    unsigned size() const { return m_blocks.size(); }
+    BasicBlock* at(unsigned index) const { return m_blocks[index].get(); }
+    BasicBlock* operator[](unsigned index) const { return at(index); }
+
+    typedef WTF::IndexedContainerIterator iterator;
+
+    iterator begin() const { return iterator(*this, 0); }
+    iterator end() const { return iterator(*this, size()); }
+
+    Vector blocksInPreOrder();
+    Vector blocksInPostOrder();
+
+    SparseCollection& stackSlots() { return m_stackSlots; }
+    const SparseCollection& stackSlots() const { return m_stackSlots; }
+
+    // Short for stackSlots().remove(). It's better to call this method since it's out of line.
+    void deleteStackSlot(StackSlot*);
+
+    SparseCollection& variables() { return m_variables; }
+    const SparseCollection& variables() const { return m_variables; }
+
+    // Short for variables().remove(). It's better to call this method since it's out of line.
+    void deleteVariable(Variable*);
+
+    SparseCollection& values() { return m_values; }
+    const SparseCollection& values() const { return m_values; }
+
+    // Short for values().remove(). It's better to call this method since it's out of line.
+    void deleteValue(Value*);
+
+    // A valid procedure cannot contain any orphan values. An orphan is a value that is not in
+    // any basic block. It is possible to create an orphan value during code generation or during
+    // transformation. If you know that you may have created some, you can call this method to
+    // delete them, making the procedure valid again.
+    void deleteOrphans();
+
+    CFG& cfg() const { return *m_cfg; }
+
+    Dominators& dominators();
+
+    void addFastConstant(const ValueKey&);
+    bool isFastConstant(const ValueKey&);
+    
+    unsigned numEntrypoints() const { return m_numEntrypoints; }
+    void setNumEntrypoints(unsigned numEntrypoints) { m_numEntrypoints = numEntrypoints; }
+    
+    // Only call this after code generation is complete. Note that the label for the 0th entrypoint
+    // should point to exactly where the code generation cursor was before you started generating
+    // code.
+    JS_EXPORT_PRIVATE CCallHelpers::Label entrypointLabel(unsigned entrypointIndex) const;
+
+    // The name has to be a string literal, since we don't do any memory management for the string.
+    void setLastPhaseName(const char* name)
+    {
+        m_lastPhaseName = name;
+    }
+
+    const char* lastPhaseName() const { return m_lastPhaseName; }
+
+    // Allocates a slab of memory that will be kept alive by anyone who keeps the resulting code
+    // alive. Great for compiler-generated data sections, like switch jump tables and constant pools.
+    // This returns memory that has been zero-initialized.
+    JS_EXPORT_PRIVATE void* addDataSection(size_t);
+
+    OpaqueByproducts& byproducts() { return *m_byproducts; }
+
+    // Below are methods that make sense to call after you have generated code for the procedure.
+
+    // You have to call this method after calling generate(). The code generated by B3::generate()
+    // will require you to keep this object alive for as long as that code is runnable. Usually, this
+    // just keeps alive things like the double constant pool and switch lookup tables. If this sounds
+    // confusing, you should probably be using the B3::Compilation API to compile code. If you use
+    // that API, then you don't have to worry about this.
+    std::unique_ptr releaseByproducts() { return WTFMove(m_byproducts); }
+
+    // This gives you direct access to Code. However, the idea is that clients of B3 shouldn't have to
+    // call this. So, Procedure has some methods (below) that expose some Air::Code functionality.
+    const Air::Code& code() const { return *m_code; }
+    Air::Code& code() { return *m_code; }
+
+    unsigned callArgAreaSizeInBytes() const;
+    void requestCallArgAreaSizeInBytes(unsigned size);
+
+    // This tells the register allocators to stay away from this register.
+    JS_EXPORT_PRIVATE void pinRegister(Reg);
+
+    JS_EXPORT_PRIVATE unsigned frameSize() const;
+    JS_EXPORT_PRIVATE const RegisterAtOffsetList& calleeSaveRegisters() const;
+
+    PCToOriginMap& pcToOriginMap() { return m_pcToOriginMap; }
+    PCToOriginMap releasePCToOriginMap() { return WTFMove(m_pcToOriginMap); }
+
+    JS_EXPORT_PRIVATE void setWasmBoundsCheckGenerator(RefPtr);
+
+    template
+    void setWasmBoundsCheckGenerator(const Functor& functor)
+    {
+        setWasmBoundsCheckGenerator(RefPtr(createSharedTask(functor)));
+    }
+
+private:
+    friend class BlockInsertionSet;
+
+    JS_EXPORT_PRIVATE Value* addValueImpl(Value*);
+    void setBlockOrderImpl(Vector&);
+
+    SparseCollection m_stackSlots;
+    SparseCollection m_variables;
+    Vector> m_blocks;
+    SparseCollection m_values;
+    std::unique_ptr m_cfg;
+    std::unique_ptr m_dominators;
+    HashSet m_fastConstants;
+    unsigned m_numEntrypoints { 1 };
+    const char* m_lastPhaseName;
+    std::unique_ptr m_byproducts;
+    std::unique_ptr m_code;
+    RefPtr> m_originPrinter;
+    const void* m_frontendData;
+    PCToOriginMap m_pcToOriginMap;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ProcedureInlines.h b/Source/JavaScriptCore/b3/B3ProcedureInlines.h
new file mode 100644
index 000000000..990ba31ee
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ProcedureInlines.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 {
+
+template
+ValueType* Procedure::add(Arguments... arguments)
+{
+    return static_cast(addValueImpl(new ValueType(arguments...)));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PureCSE.cpp b/Source/JavaScriptCore/b3/B3PureCSE.cpp
new file mode 100644
index 000000000..0ea344777
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PureCSE.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3PureCSE.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Dominators.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+PureCSE::PureCSE()
+{
+}
+
+PureCSE::~PureCSE()
+{
+}
+
+void PureCSE::clear()
+{
+    m_map.clear();
+}
+
+Value* PureCSE::findMatch(const ValueKey& key, BasicBlock* block, Dominators& dominators)
+{
+    if (!key)
+        return nullptr;
+
+    auto iter = m_map.find(key);
+    if (iter == m_map.end())
+        return nullptr;
+
+    for (Value* match : iter->value) {
+        if (!match->owner)
+            continue;
+        if (dominators.dominates(match->owner, block))
+            return match;
+    }
+
+    return nullptr;
+}
+
+bool PureCSE::process(Value* value, Dominators& dominators)
+{
+    if (value->opcode() == Identity)
+        return false;
+
+    ValueKey key = value->key();
+    if (!key)
+        return false;
+
+    Matches& matches = m_map.add(key, Matches()).iterator->value;
+
+    for (Value* match : matches) {
+        if (!match->owner)
+            continue;
+        if (dominators.dominates(match->owner, value->owner)) {
+            value->replaceWithIdentity(match);
+            return true;
+        }
+    }
+
+    matches.append(value);
+    return false;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3PureCSE.h b/Source/JavaScriptCore/b3/B3PureCSE.h
new file mode 100644
index 000000000..942966ceb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PureCSE.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3ValueKey.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class Dominators;
+class Value;
+
+typedef Vector Matches;
+
+// This is a reusable utility for doing pure CSE. You can use it to do pure CSE on a program by just
+// proceeding in order an calling process().
+class PureCSE {
+public:
+    PureCSE();
+    ~PureCSE();
+
+    void clear();
+
+    Value* findMatch(const ValueKey&, BasicBlock*, Dominators&);
+
+    bool process(Value*, Dominators&);
+    
+private:
+    HashMap m_map;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.cpp b/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.cpp
new file mode 100644
index 000000000..ef928112f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.cpp
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ReduceDoubleToFloat.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3UseCounts.h"
+#include "B3ValueInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+bool verbose = false;
+bool printRemainingConversions = false;
+
+class DoubleToFloatReduction {
+public:
+    DoubleToFloatReduction(Procedure& procedure)
+        : m_procedure(procedure)
+    {
+    }
+
+    void run()
+    {
+        if (!findCandidates())
+            return;
+
+        findPhisContainingFloat();
+
+        simplify();
+
+        cleanUp();
+    }
+
+private:
+    // This step find values that are used as Double and cannot be converted to Float..
+    // It flows the information backward through Phi-Upsilons.
+    bool findCandidates()
+    {
+        bool foundConversionCandidate = false;
+        Vector upsilons;
+
+        // First, we find all values that are strictly used as double.
+        // Those are values used by something else than DoubleToFloat.
+        //
+        // We don't know the state of Upsilons until their Phi has been
+        // set. We just keep a list of them and update them next.
+        for (BasicBlock* block : m_procedure) {
+            for (Value* value : *block) {
+                value->performSubstitution();
+
+                if (value->opcode() == DoubleToFloat) {
+                    foundConversionCandidate = true;
+
+                    Value* child = value->child(0);
+                    if (child->opcode() == FloatToDouble) {
+                        // We don't really need to simplify this early but it simplifies debugging.
+                        value->replaceWithIdentity(child->child(0));
+                    }
+                    continue;
+                }
+
+                if (value->opcode() == FloatToDouble)
+                    foundConversionCandidate = true;
+
+                if (value->opcode() == Upsilon) {
+                    Value* child = value->child(0);
+                    if (child->type() == Double)
+                        upsilons.append(value);
+                    continue;
+                }
+
+                for (Value* child : value->children()) {
+                    if (child->type() == Double)
+                        m_valuesUsedAsDouble.add(child);
+                }
+            }
+        }
+
+        if (!foundConversionCandidate)
+            return false;
+
+        // Now we just need to propagate through Phi-Upsilon.
+        // A Upsilon can convert its input to float if its phi is never used as double.
+        // If we modify a phi, we need to continue until all the Upsilon-Phi converge.
+        bool changedPhiState;
+        do {
+            changedPhiState = false;
+            for (Value* value : upsilons) {
+                UpsilonValue* upsilon = value->as();
+                Value* phi = upsilon->phi();
+                if (!m_valuesUsedAsDouble.contains(phi))
+                    continue;
+
+                Value* child = value->child(0);
+                bool childChanged = m_valuesUsedAsDouble.add(child);
+                if (childChanged && child->opcode() == Phi)
+                    changedPhiState = true;
+            }
+        } while (changedPhiState);
+
+        if (verbose) {
+            dataLog("Conversion candidates:\n");
+            for (BasicBlock* block : m_procedure) {
+                for (Value* value : *block) {
+                    if (value->type() == Double && !m_valuesUsedAsDouble.contains(value))
+                        dataLog("    ", deepDump(m_procedure, value), "\n");
+                }
+            }
+            dataLog("\n");
+        }
+
+        return true;
+    }
+
+    // This step finds Phis of type Double that effectively contains Float values.
+    // It flows that information forward through Phi-Upsilons.
+    void findPhisContainingFloat()
+    {
+        Vector upsilons;
+
+        // The Double value that can be safely turned into a Float are:
+        // - FloatToDouble
+        // - ConstDouble with a value that converts to Float without losing precision.
+        for (BasicBlock* block : m_procedure) {
+            for (Value* value : *block) {
+                if (value->opcode() != Upsilon)
+                    continue;
+
+                Value* child = value->child(0);
+                if (child->type() != Double
+                    || child->opcode() == FloatToDouble)
+                    continue;
+
+                if (child->hasDouble()) {
+                    double constValue = child->asDouble();
+                    if (isIdentical(static_cast(static_cast(constValue)), constValue))
+                        continue;
+                }
+
+                if (child->opcode() == Phi) {
+                    upsilons.append(value);
+                    continue;
+                }
+
+                UpsilonValue* upsilon = value->as();
+                Value* phi = upsilon->phi();
+                m_phisContainingDouble.add(phi);
+            }
+        }
+
+        // Propagate the flags forward.
+        bool changedPhiState;
+        do {
+            changedPhiState = false;
+            for (Value* value : upsilons) {
+                Value* child = value->child(0);
+                if (m_phisContainingDouble.contains(child)) {
+                    UpsilonValue* upsilon = value->as();
+                    Value* phi = upsilon->phi();
+                    changedPhiState |= m_phisContainingDouble.add(phi);
+                }
+            }
+        } while (changedPhiState);
+
+        if (verbose) {
+            dataLog("Phis containing float values:\n");
+            for (BasicBlock* block : m_procedure) {
+                for (Value* value : *block) {
+                    if (value->opcode() == Phi
+                        && value->type() == Double
+                        && !m_phisContainingDouble.contains(value))
+                        dataLog("    ", deepDump(m_procedure, value), "\n");
+                }
+            }
+            dataLog("\n");
+        }
+    }
+
+    bool canBeTransformedToFloat(Value* value)
+    {
+        if (value->opcode() == FloatToDouble)
+            return true;
+
+        if (value->hasDouble())
+            return true; // Double constant truncated to float.
+
+        if (value->opcode() == Phi) {
+            return value->type() == Float
+                || (value->type() == Double && !m_phisContainingDouble.contains(value));
+        }
+        return false;
+    }
+
+    Value* transformToFloat(Value* value, unsigned valueIndex, InsertionSet& insertionSet)
+    {
+        ASSERT(canBeTransformedToFloat(value));
+        if (value->opcode() == FloatToDouble)
+            return value->child(0);
+
+        if (value->hasDouble())
+            return insertionSet.insert(valueIndex, value->origin(), static_cast(value->asDouble()));
+
+        if (value->opcode() == Phi) {
+            ASSERT(value->type() == Double || value->type() == Float);
+            if (value->type() == Double)
+                convertPhi(value);
+            return value;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return nullptr;
+    }
+
+    void convertPhi(Value* phi)
+    {
+        ASSERT(phi->opcode() == Phi);
+        ASSERT(phi->type() == Double);
+        phi->setType(Float);
+        m_convertedPhis.add(phi);
+    }
+
+    bool attemptTwoOperandsSimplify(Value* candidate, unsigned candidateIndex, InsertionSet& insertionSet)
+    {
+        Value* left = candidate->child(0);
+        Value* right = candidate->child(1);
+        if (!canBeTransformedToFloat(left) || !canBeTransformedToFloat(right))
+            return false;
+
+        m_convertedValue.add(candidate);
+        candidate->child(0) = transformToFloat(left, candidateIndex, insertionSet);
+        candidate->child(1) = transformToFloat(right, candidateIndex, insertionSet);
+        return true;
+    }
+
+    // Simplify Double operations into Float operations.
+    void simplify()
+    {
+        Vector upsilonReferencingDoublePhi;
+
+        InsertionSet insertionSet(m_procedure);
+        for (BasicBlock* block : m_procedure) {
+            for (unsigned index = 0; index < block->size(); ++index) {
+                Value* value = block->at(index);
+
+                switch (value->opcode()) {
+                case Equal:
+                case NotEqual:
+                case LessThan:
+                case GreaterThan:
+                case LessEqual:
+                case GreaterEqual:
+                case EqualOrUnordered:
+                    attemptTwoOperandsSimplify(value, index, insertionSet);
+                    continue;
+                case Upsilon: {
+                    Value* child = value->child(0);
+                    if (child->opcode() == Phi && child->type() == Double)
+                        upsilonReferencingDoublePhi.append(value);
+                    continue;
+                }
+                default:
+                    break;
+                }
+
+                if (m_valuesUsedAsDouble.contains(value))
+                    continue;
+
+                switch (value->opcode()) {
+                case Add:
+                case Sub:
+                case Mul:
+                case Div:
+                    if (attemptTwoOperandsSimplify(value, index, insertionSet))
+                        value->setType(Float);
+                    break;
+                case Abs:
+                case Ceil:
+                case Floor:
+                case Neg:
+                case Sqrt: {
+                    Value* child = value->child(0);
+                    if (canBeTransformedToFloat(child)) {
+                        value->child(0) = transformToFloat(child, index, insertionSet);
+                        value->setType(Float);
+                        m_convertedValue.add(value);
+                    }
+                    break;
+                }
+                case IToD: {
+                    Value* iToF = insertionSet.insert(index, IToF, value->origin(), value->child(0));
+                    value->setType(Float);
+                    value->replaceWithIdentity(iToF);
+                    m_convertedValue.add(value);
+                    break;
+                }
+                case FloatToDouble:
+                    // This happens if we round twice.
+                    // Typically, this is indirect through Phi-Upsilons.
+                    // The Upsilon rounds and the Phi rounds.
+                    value->setType(Float);
+                    value->replaceWithIdentity(value->child(0));
+                    m_convertedValue.add(value);
+                    break;
+                case Phi:
+                    // If a Phi is always converted to Float, we always make it into a float Phi-Upsilon.
+                    // This is a simplistic view of things. Ideally we should keep type that will minimize
+                    // the amount of conversion in the loop.
+                    if (value->type() == Double)
+                        convertPhi(value);
+                    break;
+                default:
+                    break;
+                }
+            }
+            insertionSet.execute(block);
+        }
+
+        if (!upsilonReferencingDoublePhi.isEmpty()) {
+            // If a Phi contains Float values typed as Double, but is not used as Float
+            // by a non-trivial operation, we did not convert it.
+            //
+            // We fix that now by converting the remaining phis that contains
+            // float but where not converted to float.
+            bool changedPhi;
+            do {
+                changedPhi = false;
+
+                for (Value* value : upsilonReferencingDoublePhi) {
+                    UpsilonValue* upsilon = value->as();
+                    Value* child = value->child(0);
+                    Value* phi = upsilon->phi();
+                    if (phi->type() == Float && child->type() == Double
+                        && !m_phisContainingDouble.contains(child)) {
+                        convertPhi(child);
+                        changedPhi = true;
+                    }
+                }
+
+            } while (changedPhi);
+        }
+    }
+
+    // We are in an inconsistent state where we have
+    // DoubleToFloat nodes over values producing float and Phis that are
+    // float for Upsilons that are Double.
+    //
+    // This steps puts us back in a consistent state.
+    void cleanUp()
+    {
+        InsertionSet insertionSet(m_procedure);
+
+        for (BasicBlock* block : m_procedure) {
+            for (unsigned index = 0; index < block->size(); ++index) {
+                Value* value = block->at(index);
+                if (value->opcode() == DoubleToFloat && value->child(0)->type() == Float) {
+                    value->replaceWithIdentity(value->child(0));
+                    continue;
+                }
+
+                if (value->opcode() == Upsilon) {
+                    UpsilonValue* upsilon = value->as();
+                    Value* child = value->child(0);
+                    Value* phi = upsilon->phi();
+
+                    if (phi->type() == Float) {
+                        if (child->type() == Double) {
+                            Value* newChild = nullptr;
+                            if (child->opcode() == FloatToDouble)
+                                newChild = child->child(0);
+                            else if (child->hasDouble())
+                                newChild = insertionSet.insert(index, child->origin(), static_cast(child->asDouble()));
+                            else
+                                newChild = insertionSet.insert(index, DoubleToFloat, upsilon->origin(), child);
+                            upsilon->child(0) = newChild;
+                        }
+                        continue;
+                    }
+                }
+
+                if (!m_convertedValue.contains(value)) {
+                    // Phis can be converted from Double to Float if the value they contain
+                    // is not more precise than a Float.
+                    // If the value is needed as Double, it has to be converted back.
+                    for (Value*& child : value->children()) {
+                        if (m_convertedPhis.contains(child))
+                            child = insertionSet.insert(index, FloatToDouble, value->origin(), child);
+                    }
+                }
+            }
+            insertionSet.execute(block);
+        }
+    }
+
+    Procedure& m_procedure;
+
+    // Set of all the Double values that are actually used as Double.
+    // Converting any of them to Float would lose precision.
+    IndexSet m_valuesUsedAsDouble;
+
+    // Set of all the Phi of type Double that really contains a Double.
+    // Any Double Phi not in the set can be converted to Float without losing precision.
+    IndexSet m_phisContainingDouble;
+
+    // Any value that was converted from producing a Double to producing a Float.
+    // This set does not include Phi-Upsilons.
+    IndexSet m_convertedValue;
+
+    // Any value that previously produced Double and now produce Float.
+    IndexSet m_convertedPhis;
+};
+
+void printGraphIfConverting(Procedure& procedure)
+{
+    if (!printRemainingConversions)
+        return;
+
+    UseCounts useCount(procedure);
+
+    Vector doubleToFloat;
+    Vector floatToDouble;
+
+    for (BasicBlock* block : procedure) {
+        for (Value* value : *block) {
+            if (!useCount.numUses(value))
+                continue;
+
+            if (value->opcode() == DoubleToFloat)
+                doubleToFloat.append(value);
+            if (value->opcode() == FloatToDouble)
+                floatToDouble.append(value);
+        }
+    }
+
+    if (doubleToFloat.isEmpty() && floatToDouble.isEmpty())
+        return;
+
+    dataLog("Procedure with Float-Double conversion:\n", procedure, "\n");
+    dataLog("Converting nodes:\n");
+    for (Value* value : doubleToFloat)
+        dataLog("    ", deepDump(procedure, value), "\n");
+    for (Value* value : floatToDouble)
+        dataLog("    ", deepDump(procedure, value), "\n");
+
+}
+
+} // anonymous namespace.
+
+void reduceDoubleToFloat(Procedure& procedure)
+{
+    PhaseScope phaseScope(procedure, "reduceDoubleToFloat");
+
+    if (verbose)
+        dataLog("Before DoubleToFloatReduction:\n", procedure, "\n");
+
+    DoubleToFloatReduction doubleToFloatReduction(procedure);
+    doubleToFloatReduction.run();
+
+    if (verbose)
+        dataLog("After DoubleToFloatReduction:\n", procedure, "\n");
+
+    printGraphIfConverting(procedure);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.h b/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.h
new file mode 100644
index 000000000..899f770d3
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Change Double operations to Float operations when the difference is not observable
+// and doing so is likely beneficial.
+void reduceDoubleToFloat(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ReduceStrength.cpp b/Source/JavaScriptCore/b3/B3ReduceStrength.cpp
new file mode 100644
index 000000000..43c7302a6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ReduceStrength.cpp
@@ -0,0 +1,2518 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ReduceStrength.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3ComputeDivisionMagic.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3PhiChildren.h"
+#include "B3ProcedureInlines.h"
+#include "B3PureCSE.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueKeyInlines.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+// The goal of this phase is to:
+//
+// - Replace operations with less expensive variants. This includes constant folding and classic
+//   strength reductions like turning Mul(x, 1 << k) into Shl(x, k).
+//
+// - Reassociate constant operations. For example, Load(Add(x, c)) is turned into Load(x, offset = c)
+//   and Add(Add(x, c), d) is turned into Add(x, c + d).
+//
+// - Canonicalize operations. There are some cases where it's not at all obvious which kind of
+//   operation is less expensive, but it's useful for subsequent phases - particularly LowerToAir -
+//   to have only one way of representing things.
+//
+// This phase runs to fixpoint. Therefore, the canonicalizations must be designed to be monotonic.
+// For example, if we had a canonicalization that said that Add(x, -c) should be Sub(x, c) and
+// another canonicalization that said that Sub(x, d) should be Add(x, -d), then this phase would end
+// up running forever. We don't want that.
+//
+// Therefore, we need to prioritize certain canonical forms over others. Naively, we want strength
+// reduction to reduce the number of values, and so a form involving fewer total values is more
+// canonical. But we might break this, for example when reducing strength of Mul(x, 9). This could be
+// better written as Add(Shl(x, 3), x), which also happens to be representable using a single
+// instruction on x86.
+//
+// Here are some of the rules we have:
+//
+// Canonical form of logical not: BitXor(value, 1). We may have to avoid using this form if we don't
+// know for sure that 'value' is 0-or-1 (i.e. returnsBool). In that case we fall back on
+// Equal(value, 0).
+//
+// Canonical form of commutative operations: if the operation involves a constant, the constant must
+// come second. Add(x, constant) is canonical, while Add(constant, x) is not. If there are no
+// constants then the canonical form involves the lower-indexed value first. Given Add(x, y), it's
+// canonical if x->index() <= y->index().
+
+bool verbose = false;
+
+// FIXME: This IntRange stuff should be refactored into a general constant propagator. It's weird
+// that it's just sitting here in this file.
+class IntRange {
+public:
+    IntRange()
+    {
+    }
+
+    IntRange(int64_t min, int64_t max)
+        : m_min(min)
+        , m_max(max)
+    {
+    }
+
+    template
+    static IntRange top()
+    {
+        return IntRange(std::numeric_limits::min(), std::numeric_limits::max());
+    }
+
+    static IntRange top(Type type)
+    {
+        switch (type) {
+        case Int32:
+            return top();
+        case Int64:
+            return top();
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    static IntRange rangeForMask(T mask)
+    {
+        if (!(mask + 1))
+            return top();
+        return IntRange(0, mask);
+    }
+
+    static IntRange rangeForMask(int64_t mask, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return rangeForMask(static_cast(mask));
+        case Int64:
+            return rangeForMask(mask);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    static IntRange rangeForZShr(int32_t shiftAmount)
+    {
+        typename std::make_unsigned::type mask = 0;
+        mask--;
+        mask >>= shiftAmount;
+        return rangeForMask(static_cast(mask));
+    }
+
+    static IntRange rangeForZShr(int32_t shiftAmount, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return rangeForZShr(shiftAmount);
+        case Int64:
+            return rangeForZShr(shiftAmount);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    int64_t min() const { return m_min; }
+    int64_t max() const { return m_max; }
+
+    void dump(PrintStream& out) const
+    {
+        out.print("[", m_min, ",", m_max, "]");
+    }
+
+    template
+    bool couldOverflowAdd(const IntRange& other)
+    {
+        return sumOverflows(m_min, other.m_min)
+            || sumOverflows(m_min, other.m_max)
+            || sumOverflows(m_max, other.m_min)
+            || sumOverflows(m_max, other.m_max);
+    }
+
+    bool couldOverflowAdd(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return couldOverflowAdd(other);
+        case Int64:
+            return couldOverflowAdd(other);
+        default:
+            return true;
+        }
+    }
+
+    template
+    bool couldOverflowSub(const IntRange& other)
+    {
+        return differenceOverflows(m_min, other.m_min)
+            || differenceOverflows(m_min, other.m_max)
+            || differenceOverflows(m_max, other.m_min)
+            || differenceOverflows(m_max, other.m_max);
+    }
+
+    bool couldOverflowSub(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return couldOverflowSub(other);
+        case Int64:
+            return couldOverflowSub(other);
+        default:
+            return true;
+        }
+    }
+
+    template
+    bool couldOverflowMul(const IntRange& other)
+    {
+        return productOverflows(m_min, other.m_min)
+            || productOverflows(m_min, other.m_max)
+            || productOverflows(m_max, other.m_min)
+            || productOverflows(m_max, other.m_max);
+    }
+
+    bool couldOverflowMul(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return couldOverflowMul(other);
+        case Int64:
+            return couldOverflowMul(other);
+        default:
+            return true;
+        }
+    }
+
+    template
+    IntRange shl(int32_t shiftAmount)
+    {
+        T newMin = static_cast(m_min) << static_cast(shiftAmount);
+        T newMax = static_cast(m_max) << static_cast(shiftAmount);
+
+        if ((newMin >> shiftAmount) != static_cast(m_min))
+            newMin = std::numeric_limits::min();
+        if ((newMax >> shiftAmount) != static_cast(m_max))
+            newMax = std::numeric_limits::max();
+
+        return IntRange(newMin, newMax);
+    }
+
+    IntRange shl(int32_t shiftAmount, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return shl(shiftAmount);
+        case Int64:
+            return shl(shiftAmount);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    IntRange sShr(int32_t shiftAmount)
+    {
+        T newMin = static_cast(m_min) >> static_cast(shiftAmount);
+        T newMax = static_cast(m_max) >> static_cast(shiftAmount);
+
+        return IntRange(newMin, newMax);
+    }
+
+    IntRange sShr(int32_t shiftAmount, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return sShr(shiftAmount);
+        case Int64:
+            return sShr(shiftAmount);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    IntRange zShr(int32_t shiftAmount)
+    {
+        // This is an awkward corner case for all of the other logic.
+        if (!shiftAmount)
+            return *this;
+
+        // If the input range may be negative, then all we can say about the output range is that it
+        // will be masked. That's because -1 right shifted just produces that mask.
+        if (m_min < 0)
+            return rangeForZShr(shiftAmount);
+
+        // If the input range is non-negative, then this just brings the range closer to zero.
+        typedef typename std::make_unsigned::type UnsignedT;
+        UnsignedT newMin = static_cast(m_min) >> static_cast(shiftAmount);
+        UnsignedT newMax = static_cast(m_max) >> static_cast(shiftAmount);
+        
+        return IntRange(newMin, newMax);
+    }
+
+    IntRange zShr(int32_t shiftAmount, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return zShr(shiftAmount);
+        case Int64:
+            return zShr(shiftAmount);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    IntRange add(const IntRange& other)
+    {
+        if (couldOverflowAdd(other))
+            return top();
+        return IntRange(m_min + other.m_min, m_max + other.m_max);
+    }
+
+    IntRange add(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return add(other);
+        case Int64:
+            return add(other);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    IntRange sub(const IntRange& other)
+    {
+        if (couldOverflowSub(other))
+            return top();
+        return IntRange(m_min - other.m_max, m_max - other.m_min);
+    }
+
+    IntRange sub(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return sub(other);
+        case Int64:
+            return sub(other);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    IntRange mul(const IntRange& other)
+    {
+        if (couldOverflowMul(other))
+            return top();
+        return IntRange(
+            std::min(
+                std::min(m_min * other.m_min, m_min * other.m_max),
+                std::min(m_max * other.m_min, m_max * other.m_max)),
+            std::max(
+                std::max(m_min * other.m_min, m_min * other.m_max),
+                std::max(m_max * other.m_min, m_max * other.m_max)));
+    }
+
+    IntRange mul(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return mul(other);
+        case Int64:
+            return mul(other);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+private:
+    int64_t m_min { 0 };
+    int64_t m_max { 0 };
+};
+
+class ReduceStrength {
+public:
+    ReduceStrength(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+        , m_blockInsertionSet(proc)
+    {
+    }
+
+    bool run()
+    {
+        bool result = false;
+        bool first = true;
+        unsigned index = 0;
+        do {
+            m_changed = false;
+            m_changedCFG = false;
+            ++index;
+
+            if (first)
+                first = false;
+            else if (verbose) {
+                dataLog("B3 after iteration #", index - 1, " of reduceStrength:\n");
+                dataLog(m_proc);
+            }
+            
+            simplifyCFG();
+
+            if (m_changedCFG) {
+                m_proc.resetReachability();
+                m_proc.invalidateCFG();
+                m_changed = true;
+            }
+
+            // We definitely want to do DCE before we do CSE so that we don't hoist things. For
+            // example:
+            //
+            // @dead = Mul(@a, @b)
+            // ... lots of control flow and stuff
+            // @thing = Mul(@a, @b)
+            //
+            // If we do CSE before DCE, we will remove @thing and keep @dead. Effectively, we will
+            // "hoist" @thing. On the other hand, if we run DCE before CSE, we will kill @dead and
+            // keep @thing. That's better, since we usually want things to stay wherever the client
+            // put them. We're not actually smart enough to move things around at random.
+            killDeadCode();
+            
+            simplifySSA();
+            
+            m_proc.resetValueOwners();
+            m_dominators = &m_proc.dominators(); // Recompute if necessary.
+            m_pureCSE.clear();
+
+            for (BasicBlock* block : m_proc.blocksInPreOrder()) {
+                m_block = block;
+                
+                for (m_index = 0; m_index < block->size(); ++m_index) {
+                    if (verbose) {
+                        dataLog(
+                            "Looking at ", *block, " #", m_index, ": ",
+                            deepDump(m_proc, block->at(m_index)), "\n");
+                    }
+                    m_value = m_block->at(m_index);
+                    m_value->performSubstitution();
+                    
+                    reduceValueStrength();
+                    replaceIfRedundant();
+                }
+                m_insertionSet.execute(m_block);
+            }
+
+            m_changedCFG |= m_blockInsertionSet.execute();
+            if (m_changedCFG) {
+                m_proc.resetReachability();
+                m_proc.invalidateCFG();
+                m_dominators = nullptr; // Dominators are not valid anymore, and we don't need them yet.
+                m_changed = true;
+            }
+            
+            result |= m_changed;
+        } while (m_changed);
+        return result;
+    }
+    
+private:
+    void reduceValueStrength()
+    {
+        switch (m_value->opcode()) {
+        case Add:
+            handleCommutativity();
+            
+            if (m_value->child(0)->opcode() == Add && isInt(m_value->type())) {
+                // Turn this: Add(Add(value, constant1), constant2)
+                // Into this: Add(value, constant1 + constant2)
+                Value* newSum = m_value->child(1)->addConstant(m_proc, m_value->child(0)->child(1));
+                if (newSum) {
+                    m_insertionSet.insertValue(m_index, newSum);
+                    m_value->child(0) = m_value->child(0)->child(0);
+                    m_value->child(1) = newSum;
+                    m_changed = true;
+                    break;
+                }
+                
+                // Turn this: Add(Add(value, constant), otherValue)
+                // Into this: Add(Add(value, otherValue), constant)
+                if (!m_value->child(1)->hasInt() && m_value->child(0)->child(1)->hasInt()) {
+                    Value* value = m_value->child(0)->child(0);
+                    Value* constant = m_value->child(0)->child(1);
+                    Value* otherValue = m_value->child(1);
+                    // This could create duplicate code if Add(value, constant) is used elsewhere.
+                    // However, we already model adding a constant as if it was free in other places
+                    // so let's just roll with it. The alternative would mean having to do good use
+                    // counts, which reduceStrength() currently doesn't have.
+                    m_value->child(0) =
+                        m_insertionSet.insert(
+                            m_index, Add, m_value->origin(), value, otherValue);
+                    m_value->child(1) = constant;
+                    m_changed = true;
+                    break;
+                }
+            }
+            
+            // Turn this: Add(otherValue, Add(value, constant))
+            // Into this: Add(Add(value, otherValue), constant)
+            if (isInt(m_value->type())
+                && !m_value->child(0)->hasInt()
+                && m_value->child(1)->opcode() == Add
+                && m_value->child(1)->child(1)->hasInt()) {
+                Value* value = m_value->child(1)->child(0);
+                Value* constant = m_value->child(1)->child(1);
+                Value* otherValue = m_value->child(0);
+                // This creates a duplicate add. That's dangerous but probably fine, see above.
+                m_value->child(0) =
+                    m_insertionSet.insert(
+                        m_index, Add, m_value->origin(), value, otherValue);
+                m_value->child(1) = constant;
+                m_changed = true;
+                break;
+            }
+            
+            // Turn this: Add(constant1, constant2)
+            // Into this: constant1 + constant2
+            if (Value* constantAdd = m_value->child(0)->addConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constantAdd);
+                break;
+            }
+
+            // Turn this: Integer Add(value, value)
+            // Into this: Shl(value, 1)
+            // This is a useful canonicalization. It's not meant to be a strength reduction.
+            if (m_value->isInteger() && m_value->child(0) == m_value->child(1)) {
+                replaceWithNewValue(
+                    m_proc.add(
+                        Shl, m_value->origin(), m_value->child(0),
+                        m_insertionSet.insert(m_index, m_value->origin(), 1)));
+                break;
+            }
+
+            // Turn this: Add(value, zero)
+            // Into an Identity.
+            //
+            // Addition is subtle with doubles. Zero is not the neutral value, negative zero is:
+            //    0 + 0 = 0
+            //    0 + -0 = 0
+            //    -0 + 0 = 0
+            //    -0 + -0 = -0
+            if (m_value->child(1)->isInt(0) || m_value->child(1)->isNegativeZero()) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: Integer Add(Sub(0, value), -1)
+            // Into this: BitXor(value, -1)
+            if (m_value->isInteger()
+                && m_value->child(0)->opcode() == Sub
+                && m_value->child(1)->isInt(-1)
+                && m_value->child(0)->child(0)->isInt(0)) {
+                replaceWithNewValue(m_proc.add(BitXor, m_value->origin(), m_value->child(0)->child(1), m_value->child(1)));
+                break;
+            }
+
+            break;
+
+        case Sub:
+            // Turn this: Sub(constant1, constant2)
+            // Into this: constant1 - constant2
+            if (Value* constantSub = m_value->child(0)->subConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constantSub);
+                break;
+            }
+
+            if (isInt(m_value->type())) {
+                // Turn this: Sub(value, constant)
+                // Into this: Add(value, -constant)
+                if (Value* negatedConstant = m_value->child(1)->negConstant(m_proc)) {
+                    m_insertionSet.insertValue(m_index, negatedConstant);
+                    replaceWithNew(
+                        Add, m_value->origin(), m_value->child(0), negatedConstant);
+                    break;
+                }
+                
+                // Turn this: Sub(0, value)
+                // Into this: Neg(value)
+                if (m_value->child(0)->isInt(0)) {
+                    replaceWithNew(Neg, m_value->origin(), m_value->child(1));
+                    break;
+                }
+            }
+
+            break;
+
+        case Neg:
+            // Turn this: Neg(constant)
+            // Into this: -constant
+            if (Value* constant = m_value->child(0)->negConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            
+            // Turn this: Neg(Neg(value))
+            // Into this: value
+            if (m_value->child(0)->opcode() == Neg) {
+                replaceWithIdentity(m_value->child(0)->child(0));
+                break;
+            }
+            
+            break;
+
+        case Mul:
+            handleCommutativity();
+
+            // Turn this: Mul(constant1, constant2)
+            // Into this: constant1 * constant2
+            if (Value* value = m_value->child(0)->mulConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(value);
+                break;
+            }
+
+            if (m_value->child(1)->hasInt()) {
+                int64_t factor = m_value->child(1)->asInt();
+
+                // Turn this: Mul(value, 0)
+                // Into this: 0
+                // Note that we don't do this for doubles because that's wrong. For example, -1 * 0
+                // and 1 * 0 yield different results.
+                if (!factor) {
+                    replaceWithIdentity(m_value->child(1));
+                    break;
+                }
+
+                // Turn this: Mul(value, 1)
+                // Into this: value
+                if (factor == 1) {
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+                }
+
+                // Turn this: Mul(value, -1)
+                // Into this: Sub(0, value)
+                if (factor == -1) {
+                    replaceWithNewValue(
+                        m_proc.add(
+                            Sub, m_value->origin(),
+                            m_insertionSet.insertIntConstant(m_index, m_value, 0),
+                            m_value->child(0)));
+                    break;
+                }
+                
+                // Turn this: Mul(value, constant)
+                // Into this: Shl(value, log2(constant))
+                if (hasOneBitSet(factor)) {
+                    unsigned shiftAmount = WTF::fastLog2(static_cast(factor));
+                    replaceWithNewValue(
+                        m_proc.add(
+                            Shl, m_value->origin(), m_value->child(0),
+                            m_insertionSet.insert(
+                                m_index, m_value->origin(), shiftAmount)));
+                    break;
+                }
+            } else if (m_value->child(1)->hasDouble()) {
+                double factor = m_value->child(1)->asDouble();
+
+                // Turn this: Mul(value, 1)
+                // Into this: value
+                if (factor == 1) {
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+                }
+            }
+
+            break;
+
+        case Div:
+            // Turn this: Div(constant1, constant2)
+            // Into this: constant1 / constant2
+            // Note that this uses Div semantics. That's fine, because the rules for Div
+            // are strictly weaker: it has corner cases where it's allowed to do anything it
+            // likes.
+            if (replaceWithNewValue(m_value->child(0)->divConstant(m_proc, m_value->child(1))))
+                break;
+
+            if (m_value->child(1)->hasInt()) {
+                switch (m_value->child(1)->asInt()) {
+                case -1:
+                    // Turn this: Div(value, -1)
+                    // Into this: Neg(value)
+                    replaceWithNewValue(
+                        m_proc.add(Neg, m_value->origin(), m_value->child(0)));
+                    break;
+
+                case 0:
+                    // Turn this: Div(value, 0)
+                    // Into this: 0
+                    // We can do this because it's precisely correct for ChillDiv and for Div we
+                    // are allowed to do whatever we want.
+                    replaceWithIdentity(m_value->child(1));
+                    break;
+
+                case 1:
+                    // Turn this: Div(value, 1)
+                    // Into this: value
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+
+                default:
+                    // Perform super comprehensive strength reduction of division. Currently we
+                    // only do this for 32-bit divisions, since we need a high multiply
+                    // operation. We emulate it using 64-bit multiply. We can't emulate 64-bit
+                    // high multiply with a 128-bit multiply because we don't have a 128-bit
+                    // multiply. We could do it with a patchpoint if we cared badly enough.
+
+                    if (m_value->type() != Int32)
+                        break;
+
+                    int32_t divisor = m_value->child(1)->asInt32();
+                    DivisionMagic magic = computeDivisionMagic(divisor);
+
+                    // Perform the "high" multiplication. We do it just to get the high bits.
+                    // This is sort of like multiplying by the reciprocal, just more gnarly. It's
+                    // from Hacker's Delight and I don't claim to understand it.
+                    Value* magicQuotient = m_insertionSet.insert(
+                        m_index, Trunc, m_value->origin(),
+                        m_insertionSet.insert(
+                            m_index, ZShr, m_value->origin(),
+                            m_insertionSet.insert(
+                                m_index, Mul, m_value->origin(),
+                                m_insertionSet.insert(
+                                    m_index, SExt32, m_value->origin(), m_value->child(0)),
+                                m_insertionSet.insert(
+                                    m_index, m_value->origin(), magic.magicMultiplier)),
+                            m_insertionSet.insert(
+                                m_index, m_value->origin(), 32)));
+
+                    if (divisor > 0 && magic.magicMultiplier < 0) {
+                        magicQuotient = m_insertionSet.insert(
+                            m_index, Add, m_value->origin(), magicQuotient, m_value->child(0));
+                    }
+                    if (divisor < 0 && magic.magicMultiplier > 0) {
+                        magicQuotient = m_insertionSet.insert(
+                            m_index, Sub, m_value->origin(), magicQuotient, m_value->child(0));
+                    }
+                    if (magic.shift > 0) {
+                        magicQuotient = m_insertionSet.insert(
+                            m_index, SShr, m_value->origin(), magicQuotient,
+                            m_insertionSet.insert(
+                                m_index, m_value->origin(), magic.shift));
+                    }
+                    replaceWithIdentity(
+                        m_insertionSet.insert(
+                            m_index, Add, m_value->origin(), magicQuotient,
+                            m_insertionSet.insert(
+                                m_index, ZShr, m_value->origin(), magicQuotient,
+                                m_insertionSet.insert(
+                                    m_index, m_value->origin(), 31))));
+                    break;
+                }
+                break;
+            }
+            break;
+
+        case UDiv:
+            // Turn this: UDiv(constant1, constant2)
+            // Into this: constant1 / constant2
+            if (replaceWithNewValue(m_value->child(0)->uDivConstant(m_proc, m_value->child(1))))
+                break;
+
+            if (m_value->child(1)->hasInt()) {
+                switch (m_value->child(1)->asInt()) {
+                case 0:
+                    // Turn this: UDiv(value, 0)
+                    // Into this: 0
+                    // We can do whatever we want here so we might as well do the chill thing,
+                    // in case we add chill versions of UDiv in the future.
+                    replaceWithIdentity(m_value->child(1));
+                    break;
+
+                case 1:
+                    // Turn this: UDiv(value, 1)
+                    // Into this: value
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+                default:
+                    // FIXME: We should do comprehensive strength reduction for unsigned numbers. Likely,
+                    // we will just want copy what llvm does. https://bugs.webkit.org/show_bug.cgi?id=164809
+                    break;
+                }
+            }
+            break;
+
+        case Mod:
+            // Turn this: Mod(constant1, constant2)
+            // Into this: constant1 / constant2
+            // Note that this uses Mod semantics.
+            if (replaceWithNewValue(m_value->child(0)->modConstant(m_proc, m_value->child(1))))
+                break;
+
+            // Modulo by constant is more efficient if we turn it into Div, and then let Div get
+            // optimized.
+            if (m_value->child(1)->hasInt()) {
+                switch (m_value->child(1)->asInt()) {
+                case 0:
+                    // Turn this: Mod(value, 0)
+                    // Into this: 0
+                    // This is correct according to ChillMod semantics.
+                    replaceWithIdentity(m_value->child(1));
+                    break;
+
+                default:
+                    // Turn this: Mod(N, D)
+                    // Into this: Sub(N, Mul(Div(N, D), D))
+                    //
+                    // This is a speed-up because we use our existing Div optimizations.
+                    //
+                    // Here's an easier way to look at it:
+                    //     N % D = N - N / D * D
+                    //
+                    // Note that this does not work for D = 0 and ChillMod. The expected result is 0.
+                    // That's why we have a special-case above.
+                    //     X % 0 = X - X / 0 * 0 = X     (should be 0)
+                    //
+                    // This does work for the D = -1 special case.
+                    //     -2^31 % -1 = -2^31 - -2^31 / -1 * -1
+                    //                = -2^31 - -2^31 * -1
+                    //                = -2^31 - -2^31
+                    //                = 0
+
+                    Kind divKind = Div;
+                    divKind.setIsChill(m_value->isChill());
+
+                    replaceWithIdentity(
+                        m_insertionSet.insert(
+                            m_index, Sub, m_value->origin(),
+                            m_value->child(0),
+                            m_insertionSet.insert(
+                                m_index, Mul, m_value->origin(),
+                                m_insertionSet.insert(
+                                    m_index, divKind, m_value->origin(),
+                                    m_value->child(0), m_value->child(1)),
+                                m_value->child(1))));
+                    break;
+                }
+                break;
+            }
+            
+            break;
+
+        case UMod:
+            // Turn this: UMod(constant1, constant2)
+            // Into this: constant1 / constant2
+            replaceWithNewValue(m_value->child(0)->uModConstant(m_proc, m_value->child(1)));
+            // FIXME: We should do what we do for Mod since the same principle applies here.
+            // https://bugs.webkit.org/show_bug.cgi?id=164809
+            break;
+
+        case BitAnd:
+            handleCommutativity();
+
+            // Turn this: BitAnd(constant1, constant2)
+            // Into this: constant1 & constant2
+            if (Value* constantBitAnd = m_value->child(0)->bitAndConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constantBitAnd);
+                break;
+            }
+
+            // Turn this: BitAnd(BitAnd(value, constant1), constant2)
+            // Into this: BitAnd(value, constant1 & constant2).
+            if (m_value->child(0)->opcode() == BitAnd) {
+                Value* newConstant = m_value->child(1)->bitAndConstant(m_proc, m_value->child(0)->child(1));
+                if (newConstant) {
+                    m_insertionSet.insertValue(m_index, newConstant);
+                    m_value->child(0) = m_value->child(0)->child(0);
+                    m_value->child(1) = newConstant;
+                    m_changed = true;
+                }
+            }
+
+            // Turn this: BitAnd(valueX, valueX)
+            // Into this: valueX.
+            if (m_value->child(0) == m_value->child(1)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: BitAnd(value, zero-constant)
+            // Into this: zero-constant.
+            if (m_value->child(1)->isInt(0)) {
+                replaceWithIdentity(m_value->child(1));
+                break;
+            }
+
+            // Turn this: BitAnd(value, all-ones)
+            // Into this: value.
+            if ((m_value->type() == Int64 && m_value->child(1)->isInt(0xffffffffffffffff))
+                || (m_value->type() == Int32 && m_value->child(1)->isInt(0xffffffff))) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: BitAnd(64-bit value, 32 ones)
+            // Into this: ZExt32(Trunc(64-bit value))
+            if (m_value->child(1)->isInt64(0xffffffffllu)) {
+                Value* newValue = m_insertionSet.insert(
+                    m_index, ZExt32, m_value->origin(),
+                    m_insertionSet.insert(m_index, Trunc, m_value->origin(), m_value->child(0)));
+                replaceWithIdentity(newValue);
+                break;
+            }
+
+            // Turn this: BitAnd(SExt8(value), mask) where (mask & 0xffffff00) == 0
+            // Into this: BitAnd(value, mask)
+            if (m_value->child(0)->opcode() == SExt8 && m_value->child(1)->hasInt32()
+                && !(m_value->child(1)->asInt32() & 0xffffff00)) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            // Turn this: BitAnd(SExt16(value), mask) where (mask & 0xffff0000) == 0
+            // Into this: BitAnd(value, mask)
+            if (m_value->child(0)->opcode() == SExt16 && m_value->child(1)->hasInt32()
+                && !(m_value->child(1)->asInt32() & 0xffff0000)) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            // Turn this: BitAnd(SExt32(value), mask) where (mask & 0xffffffff00000000) == 0
+            // Into this: BitAnd(ZExt32(value), mask)
+            if (m_value->child(0)->opcode() == SExt32 && m_value->child(1)->hasInt32()
+                && !(m_value->child(1)->asInt32() & 0xffffffff00000000llu)) {
+                m_value->child(0) = m_insertionSet.insert(
+                    m_index, ZExt32, m_value->origin(),
+                    m_value->child(0)->child(0), m_value->child(0)->child(1));
+                m_changed = true;
+            }
+
+            // Turn this: BitAnd(Op(value, constant1), constant2)
+            //     where !(constant1 & constant2)
+            //       and Op is BitOr or BitXor
+            // into this: BitAnd(value, constant2)
+            if (m_value->child(1)->hasInt()) {
+                int64_t constant2 = m_value->child(1)->asInt();
+                switch (m_value->child(0)->opcode()) {
+                case BitOr:
+                case BitXor:
+                    if (m_value->child(0)->child(1)->hasInt()
+                        && !(m_value->child(0)->child(1)->asInt() & constant2)) {
+                        m_value->child(0) = m_value->child(0)->child(0);
+                        m_changed = true;
+                        break;
+                    }
+                    break;
+                default:
+                    break;
+                }
+            }
+            break;
+
+        case BitOr:
+            handleCommutativity();
+
+            // Turn this: BitOr(constant1, constant2)
+            // Into this: constant1 | constant2
+            if (Value* constantBitOr = m_value->child(0)->bitOrConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constantBitOr);
+                break;
+            }
+
+            // Turn this: BitOr(BitOr(value, constant1), constant2)
+            // Into this: BitOr(value, constant1 & constant2).
+            if (m_value->child(0)->opcode() == BitOr) {
+                Value* newConstant = m_value->child(1)->bitOrConstant(m_proc, m_value->child(0)->child(1));
+                if (newConstant) {
+                    m_insertionSet.insertValue(m_index, newConstant);
+                    m_value->child(0) = m_value->child(0)->child(0);
+                    m_value->child(1) = newConstant;
+                    m_changed = true;
+                }
+            }
+
+            // Turn this: BitOr(valueX, valueX)
+            // Into this: valueX.
+            if (m_value->child(0) == m_value->child(1)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: BitOr(value, zero-constant)
+            // Into this: value.
+            if (m_value->child(1)->isInt(0)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: BitOr(value, all-ones)
+            // Into this: all-ones.
+            if ((m_value->type() == Int64 && m_value->child(1)->isInt(0xffffffffffffffff))
+                || (m_value->type() == Int32 && m_value->child(1)->isInt(0xffffffff))) {
+                replaceWithIdentity(m_value->child(1));
+                break;
+            }
+
+            break;
+
+        case BitXor:
+            handleCommutativity();
+
+            // Turn this: BitXor(constant1, constant2)
+            // Into this: constant1 ^ constant2
+            if (Value* constantBitXor = m_value->child(0)->bitXorConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constantBitXor);
+                break;
+            }
+
+            // Turn this: BitXor(BitXor(value, constant1), constant2)
+            // Into this: BitXor(value, constant1 ^ constant2).
+            if (m_value->child(0)->opcode() == BitXor) {
+                Value* newConstant = m_value->child(1)->bitXorConstant(m_proc, m_value->child(0)->child(1));
+                if (newConstant) {
+                    m_insertionSet.insertValue(m_index, newConstant);
+                    m_value->child(0) = m_value->child(0)->child(0);
+                    m_value->child(1) = newConstant;
+                    m_changed = true;
+                }
+            }
+
+            // Turn this: BitXor(compare, 1)
+            // Into this: invertedCompare
+            if (m_value->child(1)->isInt32(1)) {
+                if (Value* invertedCompare = m_value->child(0)->invertedCompare(m_proc)) {
+                    replaceWithNewValue(invertedCompare);
+                    break;
+                }
+            }
+
+            // Turn this: BitXor(valueX, valueX)
+            // Into this: zero-constant.
+            if (m_value->child(0) == m_value->child(1)) {
+                replaceWithNewValue(m_proc.addIntConstant(m_value, 0));
+                break;
+            }
+
+            // Turn this: BitXor(value, zero-constant)
+            // Into this: value.
+            if (m_value->child(1)->isInt(0)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            break;
+
+        case Shl:
+            // Turn this: Shl(constant1, constant2)
+            // Into this: constant1 << constant2
+            if (Value* constant = m_value->child(0)->shlConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            handleShiftAmount();
+            break;
+
+        case SShr:
+            // Turn this: SShr(constant1, constant2)
+            // Into this: constant1 >> constant2
+            if (Value* constant = m_value->child(0)->sShrConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            if (m_value->child(1)->hasInt32()
+                && m_value->child(0)->opcode() == Shl
+                && m_value->child(0)->child(1)->hasInt32()
+                && m_value->child(1)->asInt32() == m_value->child(0)->child(1)->asInt32()) {
+                switch (m_value->child(1)->asInt32()) {
+                case 16:
+                    if (m_value->type() == Int32) {
+                        // Turn this: SShr(Shl(value, 16), 16)
+                        // Into this: SExt16(value)
+                        replaceWithNewValue(
+                            m_proc.add(
+                                SExt16, m_value->origin(), m_value->child(0)->child(0)));
+                    }
+                    break;
+
+                case 24:
+                    if (m_value->type() == Int32) {
+                        // Turn this: SShr(Shl(value, 24), 24)
+                        // Into this: SExt8(value)
+                        replaceWithNewValue(
+                            m_proc.add(
+                                SExt8, m_value->origin(), m_value->child(0)->child(0)));
+                    }
+                    break;
+
+                case 32:
+                    if (m_value->type() == Int64) {
+                        // Turn this: SShr(Shl(value, 32), 32)
+                        // Into this: SExt32(Trunc(value))
+                        replaceWithNewValue(
+                            m_proc.add(
+                                SExt32, m_value->origin(),
+                                m_insertionSet.insert(
+                                    m_index, Trunc, m_value->origin(),
+                                    m_value->child(0)->child(0))));
+                    }
+                    break;
+
+                // FIXME: Add cases for 48 and 56, but that would translate to SExt32(SExt8) or
+                // SExt32(SExt16), which we don't currently lower efficiently.
+
+                default:
+                    break;
+                }
+
+                if (m_value->opcode() != SShr)
+                    break;
+            }
+
+            handleShiftAmount();
+            break;
+
+        case ZShr:
+            // Turn this: ZShr(constant1, constant2)
+            // Into this: (unsigned)constant1 >> constant2
+            if (Value* constant = m_value->child(0)->zShrConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            handleShiftAmount();
+            break;
+
+        case RotR:
+            // Turn this: RotR(constant1, constant2)
+            // Into this: (constant1 >> constant2) | (constant1 << sizeof(constant1) * 8 - constant2)
+            if (Value* constant = m_value->child(0)->rotRConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            handleShiftAmount();
+            break;
+
+        case RotL:
+            // Turn this: RotL(constant1, constant2)
+            // Into this: (constant1 << constant2) | (constant1 >> sizeof(constant1) * 8 - constant2)
+            if (Value* constant = m_value->child(0)->rotLConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            handleShiftAmount();
+            break;
+
+        case Abs:
+            // Turn this: Abs(constant)
+            // Into this: fabstype()>(constant)
+            if (Value* constant = m_value->child(0)->absConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            // Turn this: Abs(Abs(value))
+            // Into this: Abs(value)
+            if (m_value->child(0)->opcode() == Abs) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: Abs(BitwiseCast(value))
+            // Into this: BitwiseCast(And(value, mask-top-bit))
+            if (m_value->child(0)->opcode() == BitwiseCast) {
+                Value* mask;
+                if (m_value->type() == Double)
+                    mask = m_insertionSet.insert(m_index, m_value->origin(), ~(1ll << 63));
+                else
+                    mask = m_insertionSet.insert(m_index, m_value->origin(), ~(1l << 31));
+
+                Value* bitAnd = m_insertionSet.insert(m_index, BitAnd, m_value->origin(),
+                    m_value->child(0)->child(0),
+                    mask);
+                Value* cast = m_insertionSet.insert(m_index, BitwiseCast, m_value->origin(), bitAnd);
+                replaceWithIdentity(cast);
+                break;
+            }
+            break;
+
+        case Ceil:
+            // Turn this: Ceil(constant)
+            // Into this: ceiltype()>(constant)
+            if (Value* constant = m_value->child(0)->ceilConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            // Turn this: Ceil(roundedValue)
+            // Into this: roundedValue
+            if (m_value->child(0)->isRounded()) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+            break;
+
+        case Floor:
+            // Turn this: Floor(constant)
+            // Into this: floortype()>(constant)
+            if (Value* constant = m_value->child(0)->floorConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            // Turn this: Floor(roundedValue)
+            // Into this: roundedValue
+            if (m_value->child(0)->isRounded()) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+            break;
+
+        case Sqrt:
+            // Turn this: Sqrt(constant)
+            // Into this: sqrttype()>(constant)
+            if (Value* constant = m_value->child(0)->sqrtConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            break;
+
+        case BitwiseCast:
+            // Turn this: BitwiseCast(constant)
+            // Into this: bitwise_casttype()>(constant)
+            if (Value* constant = m_value->child(0)->bitwiseCastConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            // Turn this: BitwiseCast(BitwiseCast(value))
+            // Into this: value
+            if (m_value->child(0)->opcode() == BitwiseCast) {
+                replaceWithIdentity(m_value->child(0)->child(0));
+                break;
+            }
+            break;
+
+        case SExt8:
+            // Turn this: SExt8(constant)
+            // Into this: static_cast(constant)
+            if (m_value->child(0)->hasInt32()) {
+                int32_t result = static_cast(m_value->child(0)->asInt32());
+                replaceWithNewValue(m_proc.addIntConstant(m_value, result));
+                break;
+            }
+
+            // Turn this: SExt8(SExt8(value))
+            //   or this: SExt8(SExt16(value))
+            // Into this: SExt8(value)
+            if (m_value->child(0)->opcode() == SExt8 || m_value->child(0)->opcode() == SExt16) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            if (m_value->child(0)->opcode() == BitAnd && m_value->child(0)->child(1)->hasInt32()) {
+                Value* input = m_value->child(0)->child(0);
+                int32_t mask = m_value->child(0)->child(1)->asInt32();
+                
+                // Turn this: SExt8(BitAnd(input, mask)) where (mask & 0xff) == 0xff
+                // Into this: SExt8(input)
+                if ((mask & 0xff) == 0xff) {
+                    m_value->child(0) = input;
+                    m_changed = true;
+                    break;
+                }
+                
+                // Turn this: SExt8(BitAnd(input, mask)) where (mask & 0x80) == 0
+                // Into this: BitAnd(input, const & 0x7f)
+                if (!(mask & 0x80)) {
+                    replaceWithNewValue(
+                        m_proc.add(
+                            BitAnd, m_value->origin(), input,
+                            m_insertionSet.insert(
+                                m_index, m_value->origin(), mask & 0x7f)));
+                    break;
+                }
+            }
+            break;
+
+        case SExt16:
+            // Turn this: SExt16(constant)
+            // Into this: static_cast(constant)
+            if (m_value->child(0)->hasInt32()) {
+                int32_t result = static_cast(m_value->child(0)->asInt32());
+                replaceWithNewValue(m_proc.addIntConstant(m_value, result));
+                break;
+            }
+
+            // Turn this: SExt16(SExt16(value))
+            // Into this: SExt16(value)
+            if (m_value->child(0)->opcode() == SExt16) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            // Turn this: SExt16(SExt8(value))
+            // Into this: SExt8(value)
+            if (m_value->child(0)->opcode() == SExt8) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            if (m_value->child(0)->opcode() == BitAnd && m_value->child(0)->child(1)->hasInt32()) {
+                Value* input = m_value->child(0)->child(0);
+                int32_t mask = m_value->child(0)->child(1)->asInt32();
+                
+                // Turn this: SExt16(BitAnd(input, mask)) where (mask & 0xffff) == 0xffff
+                // Into this: SExt16(input)
+                if ((mask & 0xffff) == 0xffff) {
+                    m_value->child(0) = input;
+                    m_changed = true;
+                    break;
+                }
+                
+                // Turn this: SExt16(BitAnd(input, mask)) where (mask & 0x8000) == 0
+                // Into this: BitAnd(input, const & 0x7fff)
+                if (!(mask & 0x8000)) {
+                    replaceWithNewValue(
+                        m_proc.add(
+                            BitAnd, m_value->origin(), input,
+                            m_insertionSet.insert(
+                                m_index, m_value->origin(), mask & 0x7fff)));
+                    break;
+                }
+            }
+            break;
+
+        case SExt32:
+            // Turn this: SExt32(constant)
+            // Into this: static_cast(constant)
+            if (m_value->child(0)->hasInt32()) {
+                replaceWithNewValue(m_proc.addIntConstant(m_value, m_value->child(0)->asInt32()));
+                break;
+            }
+
+            // Turn this: SExt32(BitAnd(input, mask)) where (mask & 0x80000000) == 0
+            // Into this: ZExt32(BitAnd(input, mask))
+            if (m_value->child(0)->opcode() == BitAnd && m_value->child(0)->child(1)->hasInt32()
+                && !(m_value->child(0)->child(1)->asInt32() & 0x80000000)) {
+                replaceWithNewValue(
+                    m_proc.add(
+                        ZExt32, m_value->origin(), m_value->child(0)));
+                break;
+            }
+            break;
+
+        case ZExt32:
+            // Turn this: ZExt32(constant)
+            // Into this: static_cast(static_cast(constant))
+            if (m_value->child(0)->hasInt32()) {
+                replaceWithNewValue(
+                    m_proc.addIntConstant(
+                        m_value,
+                        static_cast(static_cast(m_value->child(0)->asInt32()))));
+                break;
+            }
+            break;
+
+        case Trunc:
+            // Turn this: Trunc(constant)
+            // Into this: static_cast(constant)
+            if (m_value->child(0)->hasInt64() || m_value->child(0)->hasDouble()) {
+                replaceWithNewValue(
+                    m_proc.addIntConstant(m_value, static_cast(m_value->child(0)->asInt64())));
+                break;
+            }
+
+            // Turn this: Trunc(SExt32(value)) or Trunc(ZExt32(value))
+            // Into this: value
+            if (m_value->child(0)->opcode() == SExt32 || m_value->child(0)->opcode() == ZExt32) {
+                replaceWithIdentity(m_value->child(0)->child(0));
+                break;
+            }
+
+            // Turn this: Trunc(Op(value, constant))
+            //     where !(constant & 0xffffffff)
+            //       and Op is Add, Sub, BitOr, or BitXor
+            // into this: Trunc(value)
+            switch (m_value->child(0)->opcode()) {
+            case Add:
+            case Sub:
+            case BitOr:
+            case BitXor:
+                if (m_value->child(0)->child(1)->hasInt64()
+                    && !(m_value->child(0)->child(1)->asInt64() & 0xffffffffll)) {
+                    m_value->child(0) = m_value->child(0)->child(0);
+                    m_changed = true;
+                    break;
+                }
+                break;
+            default:
+                break;
+            }
+            break;
+
+        case IToD:
+            // Turn this: IToD(constant)
+            // Into this: ConstDouble(constant)
+            if (Value* constant = m_value->child(0)->iToDConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            break;
+
+        case IToF:
+            // Turn this: IToF(constant)
+            // Into this: ConstFloat(constant)
+            if (Value* constant = m_value->child(0)->iToFConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            break;
+
+        case FloatToDouble:
+            // Turn this: FloatToDouble(constant)
+            // Into this: ConstDouble(constant)
+            if (Value* constant = m_value->child(0)->floatToDoubleConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            break;
+
+        case DoubleToFloat:
+            // Turn this: DoubleToFloat(FloatToDouble(value))
+            // Into this: value
+            if (m_value->child(0)->opcode() == FloatToDouble) {
+                replaceWithIdentity(m_value->child(0)->child(0));
+                break;
+            }
+
+            // Turn this: DoubleToFloat(constant)
+            // Into this: ConstFloat(constant)
+            if (Value* constant = m_value->child(0)->doubleToFloatConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            break;
+
+        case Select:
+            // Turn this: Select(constant, a, b)
+            // Into this: constant ? a : b
+            if (m_value->child(0)->hasInt32()) {
+                replaceWithIdentity(
+                    m_value->child(0)->asInt32() ? m_value->child(1) : m_value->child(2));
+                break;
+            }
+
+            // Turn this: Select(Equal(x, 0), a, b)
+            // Into this: Select(x, b, a)
+            if (m_value->child(0)->opcode() == Equal && m_value->child(0)->child(1)->isInt(0)) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                std::swap(m_value->child(1), m_value->child(2));
+                m_changed = true;
+                break;
+            }
+
+            // Turn this: Select(BitXor(bool, 1), a, b)
+            // Into this: Select(bool, b, a)
+            if (m_value->child(0)->opcode() == BitXor
+                && m_value->child(0)->child(1)->isInt32(1)
+                && m_value->child(0)->child(0)->returnsBool()) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                std::swap(m_value->child(1), m_value->child(2));
+                m_changed = true;
+                break;
+            }
+
+            // Turn this: Select(BitAnd(bool, xyz1), a, b)
+            // Into this: Select(bool, a, b)
+            if (m_value->child(0)->opcode() == BitAnd
+                && m_value->child(0)->child(1)->hasInt()
+                && m_value->child(0)->child(1)->asInt() & 1
+                && m_value->child(0)->child(0)->returnsBool()) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+                break;
+            }
+
+            // Turn this: Select(stuff, x, x)
+            // Into this: x
+            if (m_value->child(1) == m_value->child(2)) {
+                replaceWithIdentity(m_value->child(1));
+                break;
+            }
+            break;
+
+        case Load8Z:
+        case Load8S:
+        case Load16Z:
+        case Load16S:
+        case Load:
+        case Store8:
+        case Store16:
+        case Store: {
+            Value* address = m_value->lastChild();
+            MemoryValue* memory = m_value->as();
+
+            // Turn this: Load(Add(address, offset1), offset = offset2)
+            // Into this: Load(address, offset = offset1 + offset2)
+            //
+            // Also turns this: Store(value, Add(address, offset1), offset = offset2)
+            // Into this: Store(value, address, offset = offset1 + offset2)
+            if (address->opcode() == Add && address->child(1)->hasIntPtr()) {
+                intptr_t offset = address->child(1)->asIntPtr();
+                if (!sumOverflows(offset, memory->offset())) {
+                    offset += memory->offset();
+                    int32_t smallOffset = static_cast(offset);
+                    if (smallOffset == offset) {
+                        address = address->child(0);
+                        memory->lastChild() = address;
+                        memory->setOffset(smallOffset);
+                        m_changed = true;
+                    }
+                }
+            }
+
+            // Turn this: Load(constant1, offset = constant2)
+            // Into this: Load(constant1 + constant2)
+            //
+            // This is a fun canonicalization. It purely regresses naively generated code. We rely
+            // on constant materialization to be smart enough to materialize this constant the smart
+            // way. We want this canonicalization because we want to know if two memory accesses see
+            // the same address.
+            if (memory->offset()) {
+                if (Value* newAddress = address->addConstant(m_proc, memory->offset())) {
+                    m_insertionSet.insertValue(m_index, newAddress);
+                    address = newAddress;
+                    memory->lastChild() = newAddress;
+                    memory->setOffset(0);
+                    m_changed = true;
+                }
+            }
+            
+            break;
+        }
+
+        case CCall: {
+            // Turn this: Call(fmod, constant1, constant2)
+            // Into this: fcall-constant(constant1, constant2)
+            double(*fmodDouble)(double, double) = fmod;
+            if (m_value->type() == Double
+                && m_value->numChildren() == 3
+                && m_value->child(0)->isIntPtr(reinterpret_cast(fmodDouble))
+                && m_value->child(1)->type() == Double
+                && m_value->child(2)->type() == Double) {
+                replaceWithNewValue(m_value->child(1)->modConstant(m_proc, m_value->child(2)));
+            }
+            break;
+        }
+        case Equal:
+            handleCommutativity();
+
+            // Turn this: Equal(bool, 0)
+            // Into this: BitXor(bool, 1)
+            if (m_value->child(0)->returnsBool() && m_value->child(1)->isInt32(0)) {
+                replaceWithNew(
+                    BitXor, m_value->origin(), m_value->child(0),
+                    m_insertionSet.insert(m_index, m_value->origin(), 1));
+                break;
+            }
+            
+            // Turn this Equal(bool, 1)
+            // Into this: bool
+            if (m_value->child(0)->returnsBool() && m_value->child(1)->isInt32(1)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: Equal(const1, const2)
+            // Into this: const1 == const2
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->equalConstant(m_value->child(1))));
+            break;
+            
+        case NotEqual:
+            handleCommutativity();
+
+            if (m_value->child(0)->returnsBool()) {
+                // Turn this: NotEqual(bool, 0)
+                // Into this: bool
+                if (m_value->child(1)->isInt32(0)) {
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+                }
+                
+                // Turn this: NotEqual(bool, 1)
+                // Into this: Equal(bool, 0)
+                if (m_value->child(1)->isInt32(1)) {
+                    replaceWithNew(
+                        Equal, m_value->origin(), m_value->child(0),
+                        m_insertionSet.insertIntConstant(m_index, m_value->origin(), Int32, 0));
+                    break;
+                }
+            }
+
+            // Turn this: NotEqual(const1, const2)
+            // Into this: const1 != const2
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->notEqualConstant(m_value->child(1))));
+            break;
+
+        case LessThan:
+            // FIXME: We could do a better job of canonicalizing integer comparisons.
+            // https://bugs.webkit.org/show_bug.cgi?id=150958
+
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->lessThanConstant(m_value->child(1))));
+            break;
+
+        case GreaterThan:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->greaterThanConstant(m_value->child(1))));
+            break;
+
+        case LessEqual:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->lessEqualConstant(m_value->child(1))));
+            break;
+
+        case GreaterEqual:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->greaterEqualConstant(m_value->child(1))));
+            break;
+
+        case Above:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->aboveConstant(m_value->child(1))));
+            break;
+
+        case Below:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->belowConstant(m_value->child(1))));
+            break;
+
+        case AboveEqual:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->aboveEqualConstant(m_value->child(1))));
+            break;
+
+        case BelowEqual:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->belowEqualConstant(m_value->child(1))));
+            break;
+
+        case EqualOrUnordered:
+            handleCommutativity();
+
+            // Turn this: Equal(const1, const2)
+            // Into this: isunordered(const1, const2) || const1 == const2.
+            // Turn this: Equal(value, const_NaN)
+            // Into this: 1.
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(1)->equalOrUnorderedConstant(m_value->child(0))));
+            break;
+
+        case CheckAdd: {
+            if (replaceWithNewValue(m_value->child(0)->checkAddConstant(m_proc, m_value->child(1))))
+                break;
+
+            handleCommutativity();
+            
+            if (m_value->child(1)->isInt(0)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            IntRange leftRange = rangeFor(m_value->child(0));
+            IntRange rightRange = rangeFor(m_value->child(1));
+            if (!leftRange.couldOverflowAdd(rightRange, m_value->type())) {
+                replaceWithNewValue(
+                    m_proc.add(Add, m_value->origin(), m_value->child(0), m_value->child(1)));
+                break;
+            }
+            break;
+        }
+
+        case CheckSub: {
+            if (replaceWithNewValue(m_value->child(0)->checkSubConstant(m_proc, m_value->child(1))))
+                break;
+
+            if (m_value->child(1)->isInt(0)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            if (Value* negatedConstant = m_value->child(1)->checkNegConstant(m_proc)) {
+                m_insertionSet.insertValue(m_index, negatedConstant);
+                m_value->as()->convertToAdd();
+                m_value->child(1) = negatedConstant;
+                m_changed = true;
+                break;
+            }
+
+            IntRange leftRange = rangeFor(m_value->child(0));
+            IntRange rightRange = rangeFor(m_value->child(1));
+            if (!leftRange.couldOverflowSub(rightRange, m_value->type())) {
+                replaceWithNewValue(
+                    m_proc.add(Sub, m_value->origin(), m_value->child(0), m_value->child(1)));
+                break;
+            }
+            break;
+        }
+
+        case CheckMul: {
+            if (replaceWithNewValue(m_value->child(0)->checkMulConstant(m_proc, m_value->child(1))))
+                break;
+
+            handleCommutativity();
+
+            if (m_value->child(1)->hasInt()) {
+                bool modified = true;
+                switch (m_value->child(1)->asInt()) {
+                case 0:
+                    replaceWithNewValue(m_proc.addIntConstant(m_value, 0));
+                    break;
+                case 1:
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+                case 2:
+                    m_value->as()->convertToAdd();
+                    m_value->child(1) = m_value->child(0);
+                    m_changed = true;
+                    break;
+                default:
+                    modified = false;
+                    break;
+                }
+                if (modified)
+                    break;
+            }
+
+            IntRange leftRange = rangeFor(m_value->child(0));
+            IntRange rightRange = rangeFor(m_value->child(1));
+            if (!leftRange.couldOverflowMul(rightRange, m_value->type())) {
+                replaceWithNewValue(
+                    m_proc.add(Mul, m_value->origin(), m_value->child(0), m_value->child(1)));
+                break;
+            }
+            break;
+        }
+
+        case Check: {
+            CheckValue* checkValue = m_value->as();
+            
+            if (checkValue->child(0)->isLikeZero()) {
+                checkValue->replaceWithNop();
+                m_changed = true;
+                break;
+            }
+
+            if (checkValue->child(0)->isLikeNonZero()) {
+                PatchpointValue* patchpoint =
+                    m_insertionSet.insert(m_index, Void, checkValue->origin());
+
+                patchpoint->effects = Effects();
+                patchpoint->effects.reads = HeapRange::top();
+                patchpoint->effects.exitsSideways = true;
+
+                for (unsigned i = 1; i < checkValue->numChildren(); ++i)
+                    patchpoint->append(checkValue->constrainedChild(i));
+
+                patchpoint->setGenerator(checkValue->generator());
+
+                // Replace the rest of the block with an Oops.
+                for (unsigned i = m_index + 1; i < m_block->size() - 1; ++i)
+                    m_block->at(i)->replaceWithBottom(m_insertionSet, m_index);
+                m_block->last()->replaceWithOops(m_block);
+                m_block->last()->setOrigin(checkValue->origin());
+
+                // Replace ourselves last.
+                checkValue->replaceWithNop();
+                m_changedCFG = true;
+                break;
+            }
+
+            if (checkValue->child(0)->opcode() == NotEqual
+                && checkValue->child(0)->child(1)->isInt(0)) {
+                checkValue->child(0) = checkValue->child(0)->child(0);
+                m_changed = true;
+            }
+
+            // If we are checking some bounded-size SSA expression that leads to a Select that
+            // has a constant as one of its results, then turn the Select into a Branch and split
+            // the code between the Check and the Branch. For example, this:
+            //
+            //     @a = Select(@p, @x, 42)
+            //     @b = Add(@a, 35)
+            //     Check(@b)
+            //
+            // becomes this:
+            //
+            //     Branch(@p, #truecase, #falsecase)
+            //
+            //   BB#truecase:
+            //     @b_truecase = Add(@x, 35)
+            //     Check(@b_truecase)
+            //     Upsilon(@x, ^a)
+            //     Upsilon(@b_truecase, ^b)
+            //     Jump(#continuation)
+            //
+            //   BB#falsecase:
+            //     @b_falsecase = Add(42, 35)
+            //     Check(@b_falsecase)
+            //     Upsilon(42, ^a)
+            //     Upsilon(@b_falsecase, ^b)
+            //     Jump(#continuation)
+            //
+            //   BB#continuation:
+            //     @a = Phi()
+            //     @b = Phi()
+            //
+            // The goal of this optimization is to kill a lot of code in one of those basic
+            // blocks. This is pretty much guaranteed since one of those blocks will replace all
+            // uses of the Select with a constant, and that constant will be transitively used
+            // from the check.
+            static const unsigned selectSpecializationBound = 3;
+            Value* select = findRecentNodeMatching(
+                m_value->child(0), selectSpecializationBound,
+                [&] (Value* value) -> bool {
+                    return value->opcode() == Select
+                        && (value->child(1)->isConstant() && value->child(2)->isConstant());
+                });
+            
+            if (select) {
+                specializeSelect(select);
+                break;
+            }
+            break;
+        }
+
+        case Branch: {
+            // Turn this: Branch(NotEqual(x, 0))
+            // Into this: Branch(x)
+            if (m_value->child(0)->opcode() == NotEqual && m_value->child(0)->child(1)->isInt(0)) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            // Turn this: Branch(Equal(x, 0), then, else)
+            // Into this: Branch(x, else, then)
+            if (m_value->child(0)->opcode() == Equal && m_value->child(0)->child(1)->isInt(0)) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                std::swap(m_block->taken(), m_block->notTaken());
+                m_changed = true;
+            }
+            
+            // Turn this: Branch(BitXor(bool, 1), then, else)
+            // Into this: Branch(bool, else, then)
+            if (m_value->child(0)->opcode() == BitXor
+                && m_value->child(0)->child(1)->isInt32(1)
+                && m_value->child(0)->child(0)->returnsBool()) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                std::swap(m_block->taken(), m_block->notTaken());
+                m_changed = true;
+            }
+
+            // Turn this: Branch(BitAnd(bool, xyb1), then, else)
+            // Into this: Branch(bool, then, else)
+            if (m_value->child(0)->opcode() == BitAnd
+                && m_value->child(0)->child(1)->hasInt()
+                && m_value->child(0)->child(1)->asInt() & 1
+                && m_value->child(0)->child(0)->returnsBool()) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            TriState triState = m_value->child(0)->asTriState();
+
+            // Turn this: Branch(0, then, else)
+            // Into this: Jump(else)
+            if (triState == FalseTriState) {
+                m_block->taken().block()->removePredecessor(m_block);
+                m_value->replaceWithJump(m_block, m_block->notTaken());
+                m_changedCFG = true;
+                break;
+            }
+
+            // Turn this: Branch(not 0, then, else)
+            // Into this: Jump(then)
+            if (triState == TrueTriState) {
+                m_block->notTaken().block()->removePredecessor(m_block);
+                m_value->replaceWithJump(m_block, m_block->taken());
+                m_changedCFG = true;
+                break;
+            }
+
+            // If a check for the same property dominates us, we can kill the branch. This sort
+            // of makes sense here because it's cheap, but hacks like this show that we're going
+            // to need SCCP.
+            Value* check = m_pureCSE.findMatch(
+                ValueKey(Check, Void, m_value->child(0)), m_block, *m_dominators);
+            if (check) {
+                // The Check would have side-exited if child(0) was non-zero. So, it must be
+                // zero here.
+                m_block->taken().block()->removePredecessor(m_block);
+                m_value->replaceWithJump(m_block, m_block->notTaken());
+                m_changedCFG = true;
+            }
+            break;
+        }
+            
+        default:
+            break;
+        }
+    }
+
+    // Find a node that:
+    //     - functor(node) returns true.
+    //     - it's reachable from the given node via children.
+    //     - it's in the last "bound" slots in the current basic block.
+    // This algorithm is optimized under the assumption that the bound is small.
+    template
+    Value* findRecentNodeMatching(Value* start, unsigned bound, const Functor& functor)
+    {
+        unsigned startIndex = bound < m_index ? m_index - bound : 0;
+        Value* result = nullptr;
+        start->walk(
+            [&] (Value* value) -> Value::WalkStatus {
+                bool found = false;
+                for (unsigned i = startIndex; i <= m_index; ++i) {
+                    if (m_block->at(i) == value)
+                        found = true;
+                }
+                if (!found)
+                    return Value::IgnoreChildren;
+
+                if (functor(value)) {
+                    result = value;
+                    return Value::Stop;
+                }
+
+                return Value::Continue;
+            });
+        return result;
+    }
+
+    // This specializes a sequence of code up to a Select. This doesn't work when we're at a
+    // terminal. It would be cool to fix that eventually. The main problem is that instead of
+    // splitting the block, we should just insert the then/else blocks. We'll have to create
+    // double the Phis and double the Upsilons. It'll probably be the sort of optimization that
+    // we want to do only after we've done loop optimizations, since this will *definitely*
+    // obscure things. In fact, even this simpler form of select specialization will possibly
+    // obscure other optimizations. It would be great to have two modes of strength reduction,
+    // one that does obscuring optimizations and runs late, and another that does not do
+    // obscuring optimizations and runs early.
+    // FIXME: Make select specialization handle branches.
+    // FIXME: Have a form of strength reduction that does no obscuring optimizations and runs
+    // early.
+    void specializeSelect(Value* source)
+    {
+        if (verbose)
+            dataLog("Specializing select: ", deepDump(m_proc, source), "\n");
+
+        // This mutates startIndex to account for the fact that m_block got the front of it
+        // chopped off.
+        BasicBlock* predecessor =
+            m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet);
+
+        // Splitting will commit the insertion set, which changes the exact position of the
+        // source. That's why we do the search after splitting.
+        unsigned startIndex = UINT_MAX;
+        for (unsigned i = predecessor->size(); i--;) {
+            if (predecessor->at(i) == source) {
+                startIndex = i;
+                break;
+            }
+        }
+        
+        RELEASE_ASSERT(startIndex != UINT_MAX);
+
+        // By BasicBlock convention, caseIndex == 0 => then, caseIndex == 1 => else.
+        static const unsigned numCases = 2;
+        BasicBlock* cases[numCases];
+        for (unsigned i = 0; i < numCases; ++i)
+            cases[i] = m_blockInsertionSet.insertBefore(m_block);
+
+        HashMap mappings[2];
+
+        // Save things we want to know about the source.
+        Value* predicate = source->child(0);
+
+        for (unsigned i = 0; i < numCases; ++i)
+            mappings[i].add(source, source->child(1 + i));
+
+        auto cloneValue = [&] (Value* value) {
+            ASSERT(value != source);
+
+            for (unsigned i = 0; i < numCases; ++i) {
+                Value* clone = m_proc.clone(value);
+                for (Value*& child : clone->children()) {
+                    if (Value* newChild = mappings[i].get(child))
+                        child = newChild;
+                }
+                if (value->type() != Void)
+                    mappings[i].add(value, clone);
+
+                cases[i]->append(clone);
+                if (value->type() != Void)
+                    cases[i]->appendNew(m_proc, value->origin(), clone, value);
+            }
+
+            value->replaceWithPhi();
+        };
+
+        // The jump that the splitter inserted is of no use to us.
+        predecessor->removeLast(m_proc);
+
+        // Hance the source, it's special.
+        for (unsigned i = 0; i < numCases; ++i) {
+            cases[i]->appendNew(
+                m_proc, source->origin(), source->child(1 + i), source);
+        }
+        source->replaceWithPhi();
+        m_insertionSet.insertValue(m_index, source);
+
+        // Now handle all values between the source and the check.
+        for (unsigned i = startIndex + 1; i < predecessor->size(); ++i) {
+            Value* value = predecessor->at(i);
+            value->owner = nullptr;
+
+            cloneValue(value);
+
+            if (value->type() != Void)
+                m_insertionSet.insertValue(m_index, value);
+            else
+                m_proc.deleteValue(value);
+        }
+
+        // Finally, deal with the check.
+        cloneValue(m_value);
+
+        // Remove the values from the predecessor.
+        predecessor->values().resize(startIndex);
+        
+        predecessor->appendNew(m_proc, Branch, source->origin(), predicate);
+        predecessor->setSuccessors(FrequentedBlock(cases[0]), FrequentedBlock(cases[1]));
+
+        for (unsigned i = 0; i < numCases; ++i) {
+            cases[i]->appendNew(m_proc, Jump, m_value->origin());
+            cases[i]->setSuccessors(FrequentedBlock(m_block));
+        }
+
+        m_changed = true;
+
+        predecessor->updatePredecessorsAfter();
+    }
+
+    // Turn this: Add(constant, value)
+    // Into this: Add(value, constant)
+    //
+    // Also:
+    // Turn this: Add(value1, value2)
+    // Into this: Add(value2, value1)
+    // If we decide that value2 coming first is the canonical ordering.
+    void handleCommutativity()
+    {
+        // Note that we have commutative operations that take more than two children. Those operations may
+        // commute their first two children while leaving the rest unaffected.
+        ASSERT(m_value->numChildren() >= 2);
+        
+        // Leave it alone if the right child is a constant.
+        if (m_value->child(1)->isConstant())
+            return;
+        
+        if (m_value->child(0)->isConstant()) {
+            std::swap(m_value->child(0), m_value->child(1));
+            m_changed = true;
+            return;
+        }
+
+        // Sort the operands. This is an important canonicalization. We use the index instead of
+        // the address to make this at least slightly deterministic.
+        if (m_value->child(0)->index() > m_value->child(1)->index()) {
+            std::swap(m_value->child(0), m_value->child(1));
+            m_changed = true;
+            return;
+        }
+    }
+
+    // FIXME: This should really be a forward analysis. Instead, we uses a bounded-search backwards
+    // analysis.
+    IntRange rangeFor(Value* value, unsigned timeToLive = 5)
+    {
+        if (!timeToLive)
+            return IntRange::top(value->type());
+        
+        switch (value->opcode()) {
+        case Const32:
+        case Const64: {
+            int64_t intValue = value->asInt();
+            return IntRange(intValue, intValue);
+        }
+
+        case BitAnd:
+            if (value->child(1)->hasInt())
+                return IntRange::rangeForMask(value->child(1)->asInt(), value->type());
+            break;
+
+        case SShr:
+            if (value->child(1)->hasInt32()) {
+                return rangeFor(value->child(0), timeToLive - 1).sShr(
+                    value->child(1)->asInt32(), value->type());
+            }
+            break;
+
+        case ZShr:
+            if (value->child(1)->hasInt32()) {
+                return rangeFor(value->child(0), timeToLive - 1).zShr(
+                    value->child(1)->asInt32(), value->type());
+            }
+            break;
+
+        case Shl:
+            if (value->child(1)->hasInt32()) {
+                return rangeFor(value->child(0), timeToLive - 1).shl(
+                    value->child(1)->asInt32(), value->type());
+            }
+            break;
+
+        case Add:
+            return rangeFor(value->child(0), timeToLive - 1).add(
+                rangeFor(value->child(1), timeToLive - 1), value->type());
+
+        case Sub:
+            return rangeFor(value->child(0), timeToLive - 1).sub(
+                rangeFor(value->child(1), timeToLive - 1), value->type());
+
+        case Mul:
+            return rangeFor(value->child(0), timeToLive - 1).mul(
+                rangeFor(value->child(1), timeToLive - 1), value->type());
+
+        default:
+            break;
+        }
+
+        return IntRange::top(value->type());
+    }
+
+    template
+    void replaceWithNew(Arguments... arguments)
+    {
+        replaceWithNewValue(m_proc.add(arguments...));
+    }
+
+    bool replaceWithNewValue(Value* newValue)
+    {
+        if (!newValue)
+            return false;
+        m_insertionSet.insertValue(m_index, newValue);
+        m_value->replaceWithIdentity(newValue);
+        m_changed = true;
+        return true;
+    }
+
+    void replaceWithIdentity(Value* newValue)
+    {
+        m_value->replaceWithIdentity(newValue);
+        m_changed = true;
+    }
+
+    void handleShiftAmount()
+    {
+        // Shift anything by zero is identity.
+        if (m_value->child(1)->isInt32(0)) {
+            replaceWithIdentity(m_value->child(0));
+            return;
+        }
+
+        // The shift already masks its shift amount. If the shift amount is being masked by a
+        // redundant amount, then remove the mask. For example,
+        // Turn this: Shl(@x, BitAnd(@y, 63))
+        // Into this: Shl(@x, @y)
+        unsigned mask = sizeofType(m_value->type()) * 8 - 1;
+        if (m_value->child(1)->opcode() == BitAnd
+            && m_value->child(1)->child(1)->hasInt32()
+            && (m_value->child(1)->child(1)->asInt32() & mask) == mask) {
+            m_value->child(1) = m_value->child(1)->child(0);
+            m_changed = true;
+        }
+    }
+
+    void replaceIfRedundant()
+    {
+        m_changed |= m_pureCSE.process(m_value, *m_dominators);
+    }
+
+    void simplifyCFG()
+    {
+        if (verbose) {
+            dataLog("Before simplifyCFG:\n");
+            dataLog(m_proc);
+        }
+        
+        // We have three easy simplification rules:
+        //
+        // 1) If a successor is a block that just jumps to another block, then jump directly to
+        //    that block.
+        //
+        // 2) If all successors are the same and the operation has no effects, then use a jump
+        //    instead.
+        //
+        // 3) If you jump to a block that is not you and has one predecessor, then merge.
+        //
+        // Note that because of the first rule, this phase may introduce critical edges. That's fine.
+        // If you need broken critical edges, then you have to break them yourself.
+
+        // Note that this relies on predecessors being at least conservatively correct. It's fine for
+        // predecessors to mention a block that isn't actually a predecessor. It's *not* fine for a
+        // predecessor to be omitted. We assert as much in the loop. In practice, we precisely preserve
+        // predecessors during strength reduction since that minimizes the total number of fixpoint
+        // iterations needed to kill a lot of code.
+
+        for (BasicBlock* block : m_proc) {
+            if (verbose)
+                dataLog("Considering block ", *block, ":\n");
+
+            checkPredecessorValidity();
+
+            // We don't care about blocks that don't have successors.
+            if (!block->numSuccessors())
+                continue;
+
+            // First check if any of the successors of this block can be forwarded over.
+            for (BasicBlock*& successor : block->successorBlocks()) {
+                if (successor != block
+                    && successor->size() == 1
+                    && successor->last()->opcode() == Jump) {
+                    BasicBlock* newSuccessor = successor->successorBlock(0);
+                    if (newSuccessor != successor) {
+                        if (verbose) {
+                            dataLog(
+                                "Replacing ", pointerDump(block), "->", pointerDump(successor),
+                                " with ", pointerDump(block), "->", pointerDump(newSuccessor),
+                                "\n");
+                        }
+                        // Note that we do not do replacePredecessor() because the block we're
+                        // skipping will still have newSuccessor as its successor.
+                        newSuccessor->addPredecessor(block);
+                        successor = newSuccessor;
+                        m_changedCFG = true;
+                    }
+                }
+            }
+
+            // Now check if the block's terminal can be replaced with a jump.
+            if (block->numSuccessors() > 1) {
+                // The terminal must not have weird effects.
+                Effects effects = block->last()->effects();
+                effects.terminal = false;
+                if (!effects.mustExecute()) {
+                    // All of the successors must be the same.
+                    bool allSame = true;
+                    BasicBlock* firstSuccessor = block->successorBlock(0);
+                    for (unsigned i = 1; i < block->numSuccessors(); ++i) {
+                        if (block->successorBlock(i) != firstSuccessor) {
+                            allSame = false;
+                            break;
+                        }
+                    }
+                    if (allSame) {
+                        if (verbose) {
+                            dataLog(
+                                "Changing ", pointerDump(block), "'s terminal to a Jump.\n");
+                        }
+                        block->last()->replaceWithJump(block, FrequentedBlock(firstSuccessor));
+                        m_changedCFG = true;
+                    }
+                }
+            }
+
+            // Finally handle jumps to a block with one predecessor.
+            if (block->numSuccessors() == 1) {
+                BasicBlock* successor = block->successorBlock(0);
+                if (successor != block && successor->numPredecessors() == 1) {
+                    RELEASE_ASSERT(successor->predecessor(0) == block);
+                    
+                    // We can merge the two blocks, because the predecessor only jumps to the successor
+                    // and the successor is only reachable from the predecessor.
+                    
+                    // Remove the terminal.
+                    Value* value = block->values().takeLast();
+                    Origin jumpOrigin = value->origin();
+                    RELEASE_ASSERT(value->effects().terminal);
+                    m_proc.deleteValue(value);
+                    
+                    // Append the full contents of the successor to the predecessor.
+                    block->values().appendVector(successor->values());
+                    block->successors() = successor->successors();
+                    
+                    // Make sure that the successor has nothing left in it. Make sure that the block
+                    // has a terminal so that nobody chokes when they look at it.
+                    successor->values().resize(0);
+                    successor->appendNew(m_proc, Oops, jumpOrigin);
+                    successor->clearSuccessors();
+                    
+                    // Ensure that predecessors of block's new successors know what's up.
+                    for (BasicBlock* newSuccessor : block->successorBlocks())
+                        newSuccessor->replacePredecessor(successor, block);
+
+                    if (verbose) {
+                        dataLog(
+                            "Merged ", pointerDump(block), "->", pointerDump(successor), "\n");
+                    }
+                    
+                    m_changedCFG = true;
+                }
+            }
+        }
+
+        if (m_changedCFG && verbose) {
+            dataLog("B3 after simplifyCFG:\n");
+            dataLog(m_proc);
+        }
+    }
+
+    void checkPredecessorValidity()
+    {
+        if (!shouldValidateIRAtEachPhase())
+            return;
+
+        for (BasicBlock* block : m_proc) {
+            for (BasicBlock* successor : block->successorBlocks())
+                RELEASE_ASSERT(successor->containsPredecessor(block));
+        }
+    }
+
+    void killDeadCode()
+    {
+        GraphNodeWorklist> worklist;
+        Vector upsilons;
+        for (BasicBlock* block : m_proc) {
+            for (Value* value : *block) {
+                Effects effects;
+                // We don't care about effects of SSA operations, since we model them more
+                // accurately than the effects() method does.
+                if (value->opcode() != Phi && value->opcode() != Upsilon)
+                    effects = value->effects();
+                
+                if (effects.mustExecute())
+                    worklist.push(value);
+                
+                if (UpsilonValue* upsilon = value->as())
+                    upsilons.append(upsilon);
+            }
+        }
+        for (;;) {
+            while (Value* value = worklist.pop()) {
+                for (Value* child : value->children())
+                    worklist.push(child);
+            }
+            
+            bool didPush = false;
+            for (size_t upsilonIndex = 0; upsilonIndex < upsilons.size(); ++upsilonIndex) {
+                UpsilonValue* upsilon = upsilons[upsilonIndex];
+                if (worklist.saw(upsilon->phi())) {
+                    worklist.push(upsilon);
+                    upsilons[upsilonIndex--] = upsilons.last();
+                    upsilons.takeLast();
+                    didPush = true;
+                }
+            }
+            if (!didPush)
+                break;
+        }
+
+        IndexSet liveVariables;
+        
+        for (BasicBlock* block : m_proc) {
+            size_t sourceIndex = 0;
+            size_t targetIndex = 0;
+            while (sourceIndex < block->size()) {
+                Value* value = block->at(sourceIndex++);
+                if (worklist.saw(value)) {
+                    if (VariableValue* variableValue = value->as())
+                        liveVariables.add(variableValue->variable());
+                    block->at(targetIndex++) = value;
+                } else {
+                    m_proc.deleteValue(value);
+                    m_changed = true;
+                }
+            }
+            block->values().resize(targetIndex);
+        }
+
+        for (Variable* variable : m_proc.variables()) {
+            if (!liveVariables.contains(variable))
+                m_proc.deleteVariable(variable);
+        }
+    }
+
+    void simplifySSA()
+    {
+        // This runs Aycock and Horspool's algorithm on our Phi functions [1]. For most CFG patterns,
+        // this can take a suboptimal arrangement of Phi functions and make it optimal, as if you had
+        // run Cytron, Ferrante, Rosen, Wegman, and Zadeck. It's only suboptimal for irreducible
+        // CFGs. In practice, that doesn't matter, since we expect clients of B3 to run their own SSA
+        // conversion before lowering to B3, and in the case of the DFG, that conversion uses Cytron
+        // et al. In that context, this algorithm is intended to simplify Phi functions that were
+        // made redundant by prior CFG simplification. But according to Aycock and Horspool's paper,
+        // this algorithm is good enough that a B3 client could just give us maximal Phi's (i.e. Phi
+        // for each variable at each basic block) and we will make them optimal.
+        // [1] http://pages.cpsc.ucalgary.ca/~aycock/papers/ssa.ps
+
+        // Aycock and Horspool prescribe two rules that are to be run to fixpoint:
+        //
+        // 1) If all of the Phi's children are the same (i.e. it's one child referenced from one or
+        //    more Upsilons), then replace all uses of the Phi with the one child.
+        //
+        // 2) If all of the Phi's children are either the Phi itself or exactly one other child, then
+        //    replace all uses of the Phi with the one other child.
+        //
+        // Rule (2) subsumes rule (1), so we can just run (2). We only run one fixpoint iteration
+        // here. This premise is that in common cases, this will only find optimization opportunities
+        // as a result of CFG simplification and usually CFG simplification will only do one round
+        // of block merging per ReduceStrength fixpoint iteration, so it's OK for this to only do one
+        // round of Phi merging - since Phis are the value analogue of blocks.
+
+        PhiChildren phiChildren(m_proc);
+
+        for (Value* phi : phiChildren.phis()) {
+            Value* otherChild = nullptr;
+            bool ok = true;
+            for (Value* child : phiChildren[phi].values()) {
+                if (child == phi)
+                    continue;
+                if (child == otherChild)
+                    continue;
+                if (!otherChild) {
+                    otherChild = child;
+                    continue;
+                }
+                ok = false;
+                break;
+            }
+            if (!ok)
+                continue;
+            if (!otherChild) {
+                // Wow, this would be super weird. It probably won't happen, except that things could
+                // get weird as a consequence of stepwise simplifications in the strength reduction
+                // fixpoint.
+                continue;
+            }
+            
+            // Turn the Phi into an Identity and turn the Upsilons into Nops.
+            m_changed = true;
+            for (Value* upsilon : phiChildren[phi])
+                upsilon->replaceWithNop();
+            phi->replaceWithIdentity(otherChild);
+        }
+    }
+
+    Procedure& m_proc;
+    InsertionSet m_insertionSet;
+    BlockInsertionSet m_blockInsertionSet;
+    BasicBlock* m_block { nullptr };
+    unsigned m_index { 0 };
+    Value* m_value { nullptr };
+    Dominators* m_dominators { nullptr };
+    PureCSE m_pureCSE;
+    bool m_changed { false };
+    bool m_changedCFG { false };
+};
+
+} // anonymous namespace
+
+bool reduceStrength(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "reduceStrength");
+    ReduceStrength reduceStrength(proc);
+    return reduceStrength.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3ReduceStrength.h b/Source/JavaScriptCore/b3/B3ReduceStrength.h
new file mode 100644
index 000000000..1abb80f64
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ReduceStrength.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Does strength reduction, constant folding, canonicalization, CFG simplification, DCE, and very
+// simple CSE. This phase runs those optimizations to fixpoint. The goal of the phase is to
+// dramatically reduce the complexity of the code. In the future, it's preferable to add optimizations
+// to this phase rather than creating new optimizations because then the optimizations can participate
+// in the fixpoint. However, because of the many interlocking optimizations, it can be difficult to
+// add sophisticated optimizations to it. For that reason we have full CSE in a different phase, for
+// example.
+
+JS_EXPORT_PRIVATE bool reduceStrength(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SSACalculator.cpp b/Source/JavaScriptCore/b3/B3SSACalculator.cpp
new file mode 100644
index 000000000..30692a997
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SSACalculator.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3SSACalculator.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+void SSACalculator::Variable::dump(PrintStream& out) const
+{
+    out.print("var", m_index);
+}
+
+void SSACalculator::Variable::dumpVerbose(PrintStream& out) const
+{
+    dump(out);
+    if (!m_blocksWithDefs.isEmpty()) {
+        out.print("(defs: ");
+        CommaPrinter comma;
+        for (BasicBlock* block : m_blocksWithDefs)
+            out.print(comma, *block);
+        out.print(")");
+    }
+}
+
+void SSACalculator::Def::dump(PrintStream& out) const
+{
+    out.print("def(", *m_variable, ", ", *m_block, ", ", pointerDump(m_value), ")");
+}
+
+SSACalculator::SSACalculator(Procedure& proc)
+    : m_data(proc.size())
+    , m_proc(proc)
+{
+}
+
+SSACalculator::~SSACalculator()
+{
+}
+
+void SSACalculator::reset()
+{
+    m_variables.clear();
+    m_defs.clear();
+    m_phis.clear();
+    for (unsigned blockIndex = m_data.size(); blockIndex--;) {
+        m_data[blockIndex].m_defs.clear();
+        m_data[blockIndex].m_phis.clear();
+    }
+}
+
+SSACalculator::Variable* SSACalculator::newVariable()
+{
+    return &m_variables.alloc(Variable(m_variables.size()));
+}
+
+SSACalculator::Def* SSACalculator::newDef(Variable* variable, BasicBlock* block, Value* value)
+{
+    Def* def = m_defs.add(Def(variable, block, value));
+    auto result = m_data[block].m_defs.add(variable, def);
+    if (result.isNewEntry)
+        variable->m_blocksWithDefs.append(block);
+    else
+        result.iterator->value = def;
+    return def;
+}
+
+SSACalculator::Def* SSACalculator::nonLocalReachingDef(BasicBlock* block, Variable* variable)
+{
+    return reachingDefAtTail(m_dominators->idom(block), variable);
+}
+
+SSACalculator::Def* SSACalculator::reachingDefAtTail(BasicBlock* block, Variable* variable)
+{
+    for (; block; block = m_dominators->idom(block)) {
+        if (Def* def = m_data[block].m_defs.get(variable))
+            return def;
+    }
+    return nullptr;
+}
+
+void SSACalculator::dump(PrintStream& out) const
+{
+    out.print("(this)->m_defs)
+        out.print(comma, *def);
+    out.print("], Phis: [");
+    comma = CommaPrinter();
+    for (Def* def : const_cast(this)->m_phis)
+        out.print(comma, *def);
+    out.print("], Block data: [");
+    comma = CommaPrinter();
+    for (unsigned blockIndex = 0; blockIndex < m_proc.size(); ++blockIndex) {
+        BasicBlock* block = m_proc[blockIndex];
+        if (!block)
+            continue;
+        
+        out.print(comma, *block, "=>(");
+        out.print("Defs: {");
+        CommaPrinter innerComma;
+        for (auto entry : m_data[block].m_defs)
+            out.print(innerComma, *entry.key, "->", *entry.value);
+        out.print("}, Phis: {");
+        innerComma = CommaPrinter();
+        for (Def* def : m_data[block].m_phis)
+            out.print(innerComma, *def);
+        out.print("})");
+    }
+    out.print("]>");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3SSACalculator.h b/Source/JavaScriptCore/b3/B3SSACalculator.h
new file mode 100644
index 000000000..be9a0648f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SSACalculator.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Dominators.h"
+#include "B3ProcedureInlines.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+// SSACalculator provides a reusable tool for building SSA's. It's modeled after
+// DFG::SSACalculator.
+
+class SSACalculator {
+public:
+    SSACalculator(Procedure&);
+    ~SSACalculator();
+
+    void reset();
+
+    class Variable {
+    public:
+        unsigned index() const { return m_index; }
+        
+        void dump(PrintStream&) const;
+        void dumpVerbose(PrintStream&) const;
+        
+    private:
+        friend class SSACalculator;
+        
+        Variable()
+            : m_index(UINT_MAX)
+        {
+        }
+        
+        Variable(unsigned index)
+            : m_index(index)
+        {
+        }
+
+        Vector m_blocksWithDefs;
+        unsigned m_index;
+    };
+
+    class Def {
+    public:
+        Variable* variable() const { return m_variable; }
+        BasicBlock* block() const { return m_block; }
+        
+        Value* value() const { return m_value; }
+        
+        void dump(PrintStream&) const;
+        
+    private:
+        friend class SSACalculator;
+        
+        Def()
+            : m_variable(nullptr)
+            , m_block(nullptr)
+            , m_value(nullptr)
+        {
+        }
+        
+        Def(Variable* variable, BasicBlock* block, Value* value)
+            : m_variable(variable)
+            , m_block(block)
+            , m_value(value)
+        {
+        }
+        
+        Variable* m_variable;
+        BasicBlock* m_block;
+        Value* m_value;
+    };
+
+    Variable* newVariable();
+    Def* newDef(Variable*, BasicBlock*, Value*);
+
+    Variable* variable(unsigned index) { return &m_variables[index]; }
+
+    template
+    void computePhis(const Functor& functor)
+    {
+        m_dominators = &m_proc.dominators();
+        for (Variable& variable : m_variables) {
+            m_dominators->forAllBlocksInPrunedIteratedDominanceFrontierOf(
+                variable.m_blocksWithDefs,
+                [&] (BasicBlock* block) -> bool {
+                    Value* phi = functor(&variable, block);
+                    if (!phi)
+                        return false;
+
+                    BlockData& data = m_data[block];
+                    Def* phiDef = m_phis.add(Def(&variable, block, phi));
+                    data.m_phis.append(phiDef);
+
+                    data.m_defs.add(&variable, phiDef);
+                    return true;
+                });
+        }
+    }
+
+    const Vector& phisForBlock(BasicBlock* block)
+    {
+        return m_data[block].m_phis;
+    }
+    
+    // Ignores defs within the given block; it assumes that you've taken care of those
+    // yourself.
+    Def* nonLocalReachingDef(BasicBlock*, Variable*);
+    Def* reachingDefAtHead(BasicBlock* block, Variable* variable)
+    {
+        return nonLocalReachingDef(block, variable);
+    }
+    
+    // Considers the def within the given block, but only works at the tail of the block.
+    Def* reachingDefAtTail(BasicBlock*, Variable*);
+    
+    void dump(PrintStream&) const;
+    
+private:
+    SegmentedVector m_variables;
+    Bag m_defs;
+    
+    Bag m_phis;
+    
+    struct BlockData {
+        HashMap m_defs;
+        Vector m_phis;
+    };
+    
+    IndexMap m_data;
+
+    Dominators* m_dominators { nullptr };
+    Procedure& m_proc;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SlotBaseValue.cpp b/Source/JavaScriptCore/b3/B3SlotBaseValue.cpp
new file mode 100644
index 000000000..b5fd69bc8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SlotBaseValue.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3SlotBaseValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackSlot.h"
+
+namespace JSC { namespace B3 {
+
+SlotBaseValue::~SlotBaseValue()
+{
+}
+
+void SlotBaseValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, pointerDump(m_slot));
+}
+
+Value* SlotBaseValue::cloneImpl() const
+{
+    return new SlotBaseValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SlotBaseValue.h b/Source/JavaScriptCore/b3/B3SlotBaseValue.h
new file mode 100644
index 000000000..19392ea02
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SlotBaseValue.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class StackSlot;
+
+class JS_EXPORT_PRIVATE SlotBaseValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == SlotBase; }
+
+    ~SlotBaseValue();
+
+    StackSlot* slot() const { return m_slot; }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    SlotBaseValue(Origin origin, StackSlot* slot)
+        : Value(CheckedOpcode, SlotBase, pointerType(), origin)
+        , m_slot(slot)
+    {
+    }
+
+    StackSlot* m_slot;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SparseCollection.h b/Source/JavaScriptCore/b3/B3SparseCollection.h
new file mode 100644
index 000000000..46c33a930
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SparseCollection.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+// B3::Procedure and Air::Code have a lot of collections of indexed things. This has all of the
+// logic.
+
+template
+class SparseCollection {
+    typedef Vector> VectorType;
+    
+public:
+    SparseCollection()
+    {
+    }
+
+    T* add(std::unique_ptr value)
+    {
+        T* result = value.get();
+        
+        size_t index;
+        if (m_indexFreeList.isEmpty()) {
+            index = m_vector.size();
+            m_vector.append(nullptr);
+        } else
+            index = m_indexFreeList.takeLast();
+
+        value->m_index = index;
+        ASSERT(!m_vector[index]);
+        new (NotNull, &m_vector[index]) std::unique_ptr(WTFMove(value));
+
+        return result;
+    }
+
+    template
+    T* addNew(Arguments&&... arguments)
+    {
+        return add(std::unique_ptr(new T(std::forward(arguments)...)));
+    }
+
+    void remove(T* value)
+    {
+        RELEASE_ASSERT(m_vector[value->m_index].get() == value);
+        m_indexFreeList.append(value->m_index);
+        m_vector[value->m_index] = nullptr;
+    }
+
+    unsigned size() const { return m_vector.size(); }
+    bool isEmpty() const { return m_vector.isEmpty(); }
+    
+    T* at(unsigned index) const { return m_vector[index].get(); }
+    T* operator[](unsigned index) const { return at(index); }
+
+    class iterator {
+    public:
+        iterator()
+            : m_collection(nullptr)
+            , m_index(0)
+        {
+        }
+
+        iterator(const SparseCollection& collection, unsigned index)
+            : m_collection(&collection)
+            , m_index(findNext(index))
+        {
+        }
+
+        T* operator*()
+        {
+            return m_collection->at(m_index);
+        }
+
+        iterator& operator++()
+        {
+            m_index = findNext(m_index + 1);
+            return *this;
+        }
+
+        bool operator==(const iterator& other) const
+        {
+            ASSERT(m_collection == other.m_collection);
+            return m_index == other.m_index;
+        }
+
+        bool operator!=(const iterator& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        unsigned findNext(unsigned index)
+        {
+            while (index < m_collection->size() && !m_collection->at(index))
+                index++;
+            return index;
+        }
+
+        const SparseCollection* m_collection;
+        unsigned m_index;
+    };
+
+    iterator begin() const { return iterator(*this, 0); }
+    iterator end() const { return iterator(*this, size()); }
+
+private:
+    Vector, 0, UnsafeVectorOverflow> m_vector;
+    Vector m_indexFreeList;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3StackSlot.cpp b/Source/JavaScriptCore/b3/B3StackSlot.cpp
new file mode 100644
index 000000000..4e22014a4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackSlot.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3StackSlot.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+StackSlot::~StackSlot()
+{
+}
+
+void StackSlot::dump(PrintStream& out) const
+{
+    out.print("stack", m_index);
+}
+
+void StackSlot::deepDump(PrintStream& out) const
+{
+    out.print("byteSize = ", m_byteSize, ", offsetFromFP = ", m_offsetFromFP);
+}
+
+StackSlot::StackSlot(unsigned byteSize)
+    : m_byteSize(byteSize)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3StackSlot.h b/Source/JavaScriptCore/b3/B3StackSlot.h
new file mode 100644
index 000000000..4a475099b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackSlot.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3SparseCollection.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+namespace Air {
+class StackSlot;
+} // namespace Air
+
+class StackSlot {
+    WTF_MAKE_NONCOPYABLE(StackSlot);
+    WTF_MAKE_FAST_ALLOCATED;
+
+public:
+    ~StackSlot();
+
+    unsigned byteSize() const { return m_byteSize; }
+    unsigned index() const { return m_index; }
+
+    // This gets assigned at the end of compilation. But, you can totally pin stack slots. Use the
+    // set method to do that.
+    intptr_t offsetFromFP() const { return m_offsetFromFP; }
+
+    // Note that this is meaningless unless the stack slot is Locked.
+    void setOffsetFromFP(intptr_t value)
+    {
+        m_offsetFromFP = value;
+    }
+
+    void dump(PrintStream&) const;
+    void deepDump(PrintStream&) const;
+
+private:
+    friend class Air::StackSlot;
+    friend class Procedure;
+    friend class SparseCollection;
+
+    StackSlot(unsigned byteSize);
+
+    unsigned m_index { UINT_MAX };
+    unsigned m_byteSize { 0 };
+    intptr_t m_offsetFromFP { 0 };
+};
+
+class DeepStackSlotDump {
+public:
+    DeepStackSlotDump(const StackSlot* slot)
+        : m_slot(slot)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_slot)
+            m_slot->deepDump(out);
+        else
+            out.print("");
+    }
+
+private:
+    const StackSlot* m_slot;
+};
+
+inline DeepStackSlotDump deepDump(const StackSlot* slot)
+{
+    return DeepStackSlotDump(slot);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3StackmapGenerationParams.cpp b/Source/JavaScriptCore/b3/B3StackmapGenerationParams.cpp
new file mode 100644
index 000000000..0a07e4e08
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapGenerationParams.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3StackmapGenerationParams.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "B3StackmapValue.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+const RegisterSet& StackmapGenerationParams::usedRegisters() const
+{
+    return m_value->m_usedRegisters;
+}
+
+RegisterSet StackmapGenerationParams::unavailableRegisters() const
+{
+    RegisterSet result = usedRegisters();
+    
+    RegisterSet unsavedCalleeSaves = RegisterSet::vmCalleeSaveRegisters();
+    for (const RegisterAtOffset& regAtOffset : m_context.code->calleeSaveRegisters())
+        unsavedCalleeSaves.clear(regAtOffset.reg());
+
+    result.merge(unsavedCalleeSaves);
+
+    for (GPRReg gpr : m_gpScratch)
+        result.clear(gpr);
+    for (FPRReg fpr : m_fpScratch)
+        result.clear(fpr);
+    
+    return result;
+}
+
+Vector> StackmapGenerationParams::successorLabels() const
+{
+    RELEASE_ASSERT(m_context.indexInBlock == m_context.currentBlock->size() - 1);
+    RELEASE_ASSERT(m_value->effects().terminal);
+    
+    Vector> result(m_context.currentBlock->numSuccessors());
+    for (unsigned i = m_context.currentBlock->numSuccessors(); i--;)
+        result[i] = m_context.blockLabels[m_context.currentBlock->successorBlock(i)];
+    return result;
+}
+
+bool StackmapGenerationParams::fallsThroughToSuccessor(unsigned successorIndex) const
+{
+    RELEASE_ASSERT(m_context.indexInBlock == m_context.currentBlock->size() - 1);
+    RELEASE_ASSERT(m_value->effects().terminal);
+    
+    Air::BasicBlock* successor = m_context.currentBlock->successorBlock(successorIndex);
+    Air::BasicBlock* nextBlock = m_context.code->findNextBlock(m_context.currentBlock);
+    return successor == nextBlock;
+}
+
+Procedure& StackmapGenerationParams::proc() const
+{
+    return m_context.code->proc();
+}
+
+StackmapGenerationParams::StackmapGenerationParams(
+    StackmapValue* value, const Vector& reps, Air::GenerationContext& context)
+    : m_value(value)
+    , m_reps(reps)
+    , m_context(context)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3StackmapGenerationParams.h b/Source/JavaScriptCore/b3/B3StackmapGenerationParams.h
new file mode 100644
index 000000000..31d19edb9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapGenerationParams.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirGenerationContext.h"
+#include "B3ValueRep.h"
+#include "CCallHelpers.h"
+#include "RegisterSet.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class CheckSpecial;
+class PatchpointSpecial;
+class Procedure;
+class StackmapValue;
+
+// NOTE: It's possible to capture StackmapGenerationParams by value, but not all of the methods will
+// work if you do that.
+class StackmapGenerationParams {
+public:
+    // This is the stackmap value that we're generating.
+    StackmapValue* value() const { return m_value; }
+    
+    // This tells you the actual value representations that were chosen. This is usually different
+    // from the constraints we supplied.
+    const Vector& reps() const { return m_reps; };
+
+    // Usually we wish to access the reps. We make this easy by making ourselves appear to be a
+    // collection of reps.
+    unsigned size() const { return m_reps.size(); }
+    const ValueRep& at(unsigned index) const { return m_reps[index]; }
+    const ValueRep& operator[](unsigned index) const { return at(index); }
+    Vector::const_iterator begin() const { return m_reps.begin(); }
+    Vector::const_iterator end() const { return m_reps.end(); }
+    
+    // This tells you the registers that were used.
+    const RegisterSet& usedRegisters() const;
+
+    // This is a useful helper if you want to do register allocation inside of a patchpoint. The
+    // usedRegisters() set is not directly useful for this purpose because:
+    //
+    // - You can only use callee-save registers for scratch if they were saved in the prologue. So,
+    //   if a register is callee-save, it's not enough that it's not in usedRegisters().
+    //
+    // - Scratch registers are going to be in usedRegisters() at the patchpoint. So, if you want to
+    //   find one of your requested scratch registers using usedRegisters(), you'll have a bad time.
+    //
+    // This gives you the used register set that's useful for allocating scratch registers. This set
+    // is defined as:
+    //
+    //     (usedRegisters() | (RegisterSet::calleeSaveRegisters() - proc.calleeSaveRegisters()))
+    //     - gpScratchRegisters - fpScratchRegisters
+    //
+    // I.e. it is like usedRegisters() but also includes unsaved callee-saves and excludes scratch
+    // registers.
+    JS_EXPORT_PRIVATE RegisterSet unavailableRegisters() const;
+
+    GPRReg gpScratch(unsigned index) const { return m_gpScratch[index]; }
+    FPRReg fpScratch(unsigned index) const { return m_fpScratch[index]; }
+    
+    // This is computed lazily, so it won't work if you capture StackmapGenerationParams by value.
+    // These labels will get populated before any late paths or link tasks execute.
+    JS_EXPORT_PRIVATE Vector> successorLabels() const;
+    
+    // This is computed lazily, so it won't work if you capture StackmapGenerationParams by value.
+    // Returns true if the successor at the given index is going to be emitted right after the
+    // patchpoint.
+    JS_EXPORT_PRIVATE bool fallsThroughToSuccessor(unsigned successorIndex) const;
+
+    // This is provided for convenience; it means that you don't have to capture it if you don't want to.
+    JS_EXPORT_PRIVATE Procedure& proc() const;
+    
+    // The Air::GenerationContext gives you even more power.
+    Air::GenerationContext& context() const { return m_context; };
+
+    template
+    void addLatePath(const Functor& functor) const
+    {
+        context().latePaths.append(
+            createSharedTask(
+                [=] (CCallHelpers& jit, Air::GenerationContext&) {
+                    functor(jit);
+                }));
+    }
+
+private:
+    friend class CheckSpecial;
+    friend class PatchpointSpecial;
+    
+    StackmapGenerationParams(StackmapValue*, const Vector& reps, Air::GenerationContext&);
+
+    StackmapValue* m_value;
+    Vector m_reps;
+    Vector m_gpScratch;
+    Vector m_fpScratch;
+    Air::GenerationContext& m_context;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3StackmapSpecial.cpp b/Source/JavaScriptCore/b3/B3StackmapSpecial.cpp
new file mode 100644
index 000000000..b5aa6c3ff
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapSpecial.cpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3StackmapSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+StackmapSpecial::StackmapSpecial()
+{
+}
+
+StackmapSpecial::~StackmapSpecial()
+{
+}
+
+void StackmapSpecial::reportUsedRegisters(Inst& inst, const RegisterSet& usedRegisters)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    // FIXME: If the Inst that uses the StackmapSpecial gets duplicated, then we end up merging used
+    // register sets from multiple places. This currently won't happen since Air doesn't have taildup
+    // or things like that. But maybe eventually it could be a problem.
+    value->m_usedRegisters.merge(usedRegisters);
+}
+
+RegisterSet StackmapSpecial::extraClobberedRegs(Inst& inst)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    return value->lateClobbered();
+}
+
+RegisterSet StackmapSpecial::extraEarlyClobberedRegs(Inst& inst)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    return value->earlyClobbered();
+}
+
+void StackmapSpecial::forEachArgImpl(
+    unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+    Inst& inst, RoleMode roleMode, std::optional firstRecoverableIndex,
+    const ScopedLambda& callback)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    // Check that insane things have not happened.
+    ASSERT(inst.args.size() >= numIgnoredAirArgs);
+    ASSERT(value->children().size() >= numIgnoredB3Args);
+    ASSERT(inst.args.size() - numIgnoredAirArgs >= value->children().size() - numIgnoredB3Args);
+    
+    for (unsigned i = 0; i < value->children().size() - numIgnoredB3Args; ++i) {
+        Arg& arg = inst.args[i + numIgnoredAirArgs];
+        ConstrainedValue child = value->constrainedChild(i + numIgnoredB3Args);
+
+        Arg::Role role;
+        switch (roleMode) {
+        case ForceLateUseUnlessRecoverable:
+            ASSERT(firstRecoverableIndex);
+            if (arg != inst.args[*firstRecoverableIndex] && arg != inst.args[*firstRecoverableIndex + 1]) {
+                role = Arg::LateColdUse;
+                break;
+            }
+            FALLTHROUGH;
+        case SameAsRep:
+            switch (child.rep().kind()) {
+            case ValueRep::WarmAny:
+            case ValueRep::SomeRegister:
+            case ValueRep::Register:
+            case ValueRep::Stack:
+            case ValueRep::StackArgument:
+            case ValueRep::Constant:
+                role = Arg::Use;
+                break;
+            case ValueRep::LateRegister:
+                role = Arg::LateUse;
+                break;
+            case ValueRep::ColdAny:
+                role = Arg::ColdUse;
+                break;
+            case ValueRep::LateColdAny:
+                role = Arg::LateColdUse;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+            break;
+        case ForceLateUse:
+            role = Arg::LateColdUse;
+            break;
+        }
+
+        Type type = child.value()->type();
+        callback(arg, role, Arg::typeForB3Type(type), Arg::widthForB3Type(type));
+    }
+}
+
+bool StackmapSpecial::isValidImpl(
+    unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+    Inst& inst)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    // Check that insane things have not happened.
+    ASSERT(inst.args.size() >= numIgnoredAirArgs);
+    ASSERT(value->children().size() >= numIgnoredB3Args);
+
+    // For the Inst to be valid, it needs to have the right number of arguments.
+    if (inst.args.size() - numIgnoredAirArgs < value->children().size() - numIgnoredB3Args)
+        return false;
+
+    // Regardless of constraints, stackmaps have some basic requirements for their arguments. For
+    // example, you can't have a non-FP-offset address. This verifies those conditions as well as the
+    // argument types.
+    for (unsigned i = 0; i < value->children().size() - numIgnoredB3Args; ++i) {
+        Value* child = value->child(i + numIgnoredB3Args);
+        Arg& arg = inst.args[i + numIgnoredAirArgs];
+
+        if (!isArgValidForValue(arg, child))
+            return false;
+    }
+
+    // The number of constraints has to be no greater than the number of B3 children.
+    ASSERT(value->m_reps.size() <= value->children().size());
+
+    // Verify any explicitly supplied constraints.
+    for (unsigned i = numIgnoredB3Args; i < value->m_reps.size(); ++i) {
+        ValueRep& rep = value->m_reps[i];
+        Arg& arg = inst.args[i - numIgnoredB3Args + numIgnoredAirArgs];
+
+        if (!isArgValidForRep(code(), arg, rep))
+            return false;
+    }
+
+    return true;
+}
+
+bool StackmapSpecial::admitsStackImpl(
+    unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+    Inst& inst, unsigned argIndex)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    unsigned stackmapArgIndex = argIndex - numIgnoredAirArgs + numIgnoredB3Args;
+
+    if (stackmapArgIndex >= value->numChildren()) {
+        // It's not a stackmap argument, so as far as we are concerned, it doesn't admit stack.
+        return false;
+    }
+
+    if (stackmapArgIndex >= value->m_reps.size()) {
+        // This means that there was no constraint.
+        return true;
+    }
+    
+    // We only admit stack for Any's, since Stack is not a valid input constraint, and StackArgument
+    // translates to a CallArg in Air.
+    if (value->m_reps[stackmapArgIndex].isAny())
+        return true;
+
+    return false;
+}
+
+Vector StackmapSpecial::repsImpl(
+    GenerationContext& context, unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs, Inst& inst)
+{
+    Vector result;
+    for (unsigned i = 0; i < inst.origin->numChildren() - numIgnoredB3Args; ++i)
+        result.append(repForArg(*context.code, inst.args[i + numIgnoredAirArgs]));
+    return result;
+}
+
+bool StackmapSpecial::isArgValidForValue(const Air::Arg& arg, Value* value)
+{
+    switch (arg.kind()) {
+    case Arg::Tmp:
+    case Arg::Imm:
+    case Arg::BigImm:
+        break;
+    default:
+        if (!arg.isStackMemory())
+            return false;
+        break;
+    }
+
+    return arg.canRepresent(value);
+}
+
+bool StackmapSpecial::isArgValidForRep(Air::Code& code, const Air::Arg& arg, const ValueRep& rep)
+{
+    switch (rep.kind()) {
+    case ValueRep::WarmAny:
+    case ValueRep::ColdAny:
+    case ValueRep::LateColdAny:
+        // We already verified by isArgValidForValue().
+        return true;
+    case ValueRep::SomeRegister:
+    case ValueRep::SomeEarlyRegister:
+        return arg.isTmp();
+    case ValueRep::LateRegister:
+    case ValueRep::Register:
+        return arg == Tmp(rep.reg());
+    case ValueRep::StackArgument:
+        if (arg == Arg::callArg(rep.offsetFromSP()))
+            return true;
+        if (arg.isAddr() && code.frameSize()) {
+            if (arg.base() == Tmp(GPRInfo::callFrameRegister)
+                && arg.offset() == rep.offsetFromSP() - code.frameSize())
+                return true;
+            if (arg.base() == Tmp(MacroAssembler::stackPointerRegister)
+                && arg.offset() == rep.offsetFromSP())
+                return true;
+        }
+        return false;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return false;
+    }
+}
+
+ValueRep StackmapSpecial::repForArg(Code& code, const Arg& arg)
+{
+    switch (arg.kind()) {
+    case Arg::Tmp:
+        return ValueRep::reg(arg.reg());
+        break;
+    case Arg::Imm:
+    case Arg::BigImm:
+        return ValueRep::constant(arg.value());
+        break;
+    case Arg::Addr:
+        if (arg.base() == Tmp(GPRInfo::callFrameRegister))
+            return ValueRep::stack(arg.offset());
+        ASSERT(arg.base() == Tmp(MacroAssembler::stackPointerRegister));
+        return ValueRep::stack(arg.offset() - static_cast(code.frameSize()));
+    default:
+        ASSERT_NOT_REACHED();
+        return ValueRep();
+    }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, StackmapSpecial::RoleMode mode)
+{
+    switch (mode) {
+    case StackmapSpecial::SameAsRep:
+        out.print("SameAsRep");
+        return;
+    case StackmapSpecial::ForceLateUseUnlessRecoverable:
+        out.print("ForceLateUseUnlessRecoverable");
+        return;
+    case StackmapSpecial::ForceLateUse:
+        out.print("ForceLateUse");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3StackmapSpecial.h b/Source/JavaScriptCore/b3/B3StackmapSpecial.h
new file mode 100644
index 000000000..97a0813d1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapSpecial.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirSpecial.h"
+#include "B3ValueRep.h"
+
+namespace JSC { namespace B3 {
+
+namespace Air { class Code; }
+
+// This is a base class for specials that have stackmaps. Note that it can find the Stackmap by
+// asking for the Inst's origin. Hence, these objects don't need to even hold a reference to the
+// Stackmap.
+
+class StackmapSpecial : public Air::Special {
+public:
+    StackmapSpecial();
+    virtual ~StackmapSpecial();
+
+    enum RoleMode : int8_t {
+        SameAsRep,
+        ForceLateUseUnlessRecoverable,
+        ForceLateUse
+    };
+
+protected:
+    void reportUsedRegisters(Air::Inst&, const RegisterSet&) override;
+    RegisterSet extraEarlyClobberedRegs(Air::Inst&) override;
+    RegisterSet extraClobberedRegs(Air::Inst&) override;
+
+    // Note that this does not override generate() or dumpImpl()/deepDumpImpl(). We have many some
+    // subclasses that implement that.
+    void forEachArgImpl(
+        unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+        Air::Inst&, RoleMode, std::optional firstRecoverableIndex,
+        const ScopedLambda&);
+    
+    bool isValidImpl(
+        unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+        Air::Inst&);
+    bool admitsStackImpl(
+        unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+        Air::Inst&, unsigned argIndex);
+
+    // Appends the reps for the Inst's args, starting with numIgnoredArgs, to the given vector.
+    Vector repsImpl(
+        Air::GenerationContext&, unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs, Air::Inst&);
+
+    static bool isArgValidForValue(const Air::Arg&, Value*);
+    static bool isArgValidForRep(Air::Code&, const Air::Arg&, const ValueRep&);
+    static ValueRep repForArg(Air::Code&, const Air::Arg&);
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::B3::StackmapSpecial::RoleMode);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3StackmapValue.cpp b/Source/JavaScriptCore/b3/B3StackmapValue.cpp
new file mode 100644
index 000000000..9b0db2f46
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapValue.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3StackmapValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+StackmapValue::~StackmapValue()
+{
+}
+
+void StackmapValue::append(Value* value, const ValueRep& rep)
+{
+    if (rep == ValueRep::ColdAny) {
+        children().append(value);
+        return;
+    }
+
+    while (m_reps.size() < numChildren())
+        m_reps.append(ValueRep::ColdAny);
+
+    children().append(value);
+    m_reps.append(rep);
+}
+
+void StackmapValue::appendSomeRegister(Value* value)
+{
+    append(ConstrainedValue(value, ValueRep::SomeRegister));
+}
+
+void StackmapValue::setConstrainedChild(unsigned index, const ConstrainedValue& constrainedValue)
+{
+    child(index) = constrainedValue.value();
+    setConstraint(index, constrainedValue.rep());
+}
+
+void StackmapValue::setConstraint(unsigned index, const ValueRep& rep)
+{
+    if (rep == ValueRep(ValueRep::ColdAny))
+        return;
+
+    while (m_reps.size() <= index)
+        m_reps.append(ValueRep::ColdAny);
+
+    m_reps[index] = rep;
+}
+
+void StackmapValue::dumpChildren(CommaPrinter& comma, PrintStream& out) const
+{
+    for (ConstrainedValue value : constrainedChildren())
+        out.print(comma, value);
+}
+
+void StackmapValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(
+        comma, "generator = ", RawPointer(m_generator.get()), ", earlyClobbered = ", m_earlyClobbered,
+        ", lateClobbered = ", m_lateClobbered, ", usedRegisters = ", m_usedRegisters);
+}
+
+StackmapValue::StackmapValue(CheckedOpcodeTag, Kind kind, Type type, Origin origin)
+    : Value(CheckedOpcode, kind, type, origin)
+{
+    ASSERT(accepts(kind));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3StackmapValue.h b/Source/JavaScriptCore/b3/B3StackmapValue.h
new file mode 100644
index 000000000..66fc644b1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapValue.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3ConstrainedValue.h"
+#include "B3Value.h"
+#include "B3ValueRep.h"
+#include "CCallHelpers.h"
+#include "RegisterSet.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class StackmapGenerationParams;
+
+typedef void StackmapGeneratorFunction(CCallHelpers&, const StackmapGenerationParams&);
+typedef SharedTask StackmapGenerator;
+
+class JS_EXPORT_PRIVATE StackmapValue : public Value {
+public:
+    static bool accepts(Kind kind)
+    {
+        // This needs to include opcodes of all subclasses.
+        switch (kind.opcode()) {
+        case CheckAdd:
+        case CheckSub:
+        case CheckMul:
+        case Check:
+        case Patchpoint:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    ~StackmapValue();
+
+    // Use this to add children. Note that you could also add children by doing
+    // children().append(). That will work fine, but it's not recommended.
+    void append(const ConstrainedValue& value)
+    {
+        append(value.value(), value.rep());
+    }
+
+    void append(Value*, const ValueRep&);
+
+    template
+    void appendVector(const VectorType& vector)
+    {
+        for (const auto& value : vector)
+            append(value);
+    }
+
+    // Helper for appending a bunch of values with some ValueRep.
+    template
+    void appendVectorWithRep(const VectorType& vector, const ValueRep& rep)
+    {
+        for (Value* value : vector)
+            append(value, rep);
+    }
+
+    // Helper for appending cold any's. This often used by clients to implement OSR.
+    template
+    void appendColdAnys(const VectorType& vector)
+    {
+        appendVectorWithRep(vector, ValueRep::ColdAny);
+    }
+    template
+    void appendLateColdAnys(const VectorType& vector)
+    {
+        appendVectorWithRep(vector, ValueRep::LateColdAny);
+    }
+
+    // This is a helper for something you might do a lot of: append a value that should be constrained
+    // to SomeRegister.
+    void appendSomeRegister(Value*);
+
+    const Vector& reps() const { return m_reps; }
+
+    // Stackmaps allow you to specify that the operation may clobber some registers. Clobbering a register
+    // means that the operation appears to store a value into the register, but the compiler doesn't
+    // assume to know anything about what kind of value might have been stored. In B3's model of
+    // execution, registers are read or written at instruction boundaries rather than inside the
+    // instructions themselves. A register could be read or written immediately before the instruction
+    // executes, or immediately after. Note that at a boundary between instruction A and instruction B we
+    // simultaneously look at what A does after it executes and what B does before it executes. This is
+    // because when the compiler considers what happens to registers, it views the boundary between two
+    // instructions as a kind of atomic point where the late effects of A happen at the same time as the
+    // early effects of B.
+    //
+    // The compiler views a stackmap as a single instruction, even though of course the stackmap may be
+    // composed of any number of instructions (if it's a Patchpoint). You can claim that a stackmap value
+    // clobbers a set of registers before the stackmap's instruction or after. Clobbering before is called
+    // early clobber, while clobbering after is called late clobber.
+    //
+    // This is quite flexible but it has its limitations. Any register listed as an early clobber will
+    // interfere with all uses of the stackmap. Any register listed as a late clobber will interfere with
+    // all defs of the stackmap (i.e. the result). This means that it's currently not possible to claim
+    // to clobber a register while still allowing that register to be used for both an input and an output
+    // of the instruction. It just so happens that B3's sole client (the FTL) currently never wants to
+    // convey such a constraint, but it will want it eventually (FIXME:
+    // https://bugs.webkit.org/show_bug.cgi?id=151823).
+    //
+    // Note that a common use case of early clobber sets is to indicate that this is the set of registers
+    // that shall not be used for inputs to the value. But B3 supports two different ways of specifying
+    // this, the other being LateUse in combination with late clobber (not yet available to stackmaps
+    // directly, FIXME: https://bugs.webkit.org/show_bug.cgi?id=151335). A late use makes the use of that
+    // value appear to happen after the instruction. This means that a late use cannot use the same
+    // register as the result and it cannot use the same register as either early or late clobbered
+    // registers. Late uses are usually a better way of saying that a clobbered register cannot be used
+    // for an input. Early clobber means that some register(s) interfere with *all* inputs, while LateUse
+    // means that some value interferes with whatever is live after the instruction. Below is a list of
+    // examples of how the FTL can handle its various kinds of scenarios using a combination of early
+    // clobber, late clobber, and late use. These examples are for X86_64, w.l.o.g.
+    //
+    // Basic ById patchpoint: Early and late clobber of r11. Early clobber prevents any inputs from using
+    // r11 since that would mess with the MacroAssembler's assumptions when we
+    // AllowMacroScratchRegisterUsage. Late clobber tells B3 that the patchpoint may overwrite r11.
+    //
+    // ById patchpoint in a try block with some live state: This might throw an exception after already
+    // assigning to the result. So, this should LateUse all stackmap values to ensure that the stackmap
+    // values don't interfere with the result. Note that we do not LateUse the non-OSR inputs of the ById
+    // since LateUse implies that the use is cold: the register allocator will assume that the use is not
+    // important for the critical path. Also, early and late clobber of r11.
+    //
+    // Basic ByIdFlush patchpoint: We could do Flush the same way we did it with LLVM: ignore it and let
+    // PolymorphicAccess figure it out. Or, we could add internal clobber support (FIXME:
+    // https://bugs.webkit.org/show_bug.cgi?id=151823). Or, we could do it by early clobbering r11, late
+    // clobbering all volatile registers, and constraining the result to some register. Or, we could do
+    // that but leave the result constrained to SomeRegister, which will cause it to use a callee-save
+    // register. Internal clobber support would allow us to use SomeRegister while getting the result into
+    // a volatile register.
+    //
+    // ByIdFlush patchpoint in a try block with some live state: LateUse all for-OSR stackmap values,
+    // early clobber of r11 to prevent the other inputs from using r11, and late clobber of all volatile
+    // registers to make way for the call. To handle the result, we could do any of what is listed in the
+    // previous paragraph.
+    //
+    // Basic JS call: Force all non-OSR inputs into specific locations (register, stack, whatever).
+    // All volatile registers are late-clobbered. The output is constrained to a register as well.
+    //
+    // JS call in a try block with some live state: LateUse all for-OSR stackmap values, fully constrain
+    // all non-OSR inputs and the result, and late clobber all volatile registers.
+    //
+    // JS tail call: Pass all inputs as a warm variant of Any (FIXME:
+    // https://bugs.webkit.org/show_bug.cgi?id=151811).
+    //
+    // Note that we cannot yet do all of these things because although Air already supports all of these
+    // various forms of uses (LateUse and warm unconstrained use), B3 doesn't yet expose all of it. The
+    // bugs are:
+    // https://bugs.webkit.org/show_bug.cgi?id=151335 (LateUse)
+    // https://bugs.webkit.org/show_bug.cgi?id=151811 (warm Any)
+    void clobberEarly(const RegisterSet& set)
+    {
+        m_earlyClobbered.merge(set);
+    }
+
+    void clobberLate(const RegisterSet& set)
+    {
+        m_lateClobbered.merge(set);
+    }
+
+    void clobber(const RegisterSet& set)
+    {
+        clobberEarly(set);
+        clobberLate(set);
+    }
+
+    RegisterSet& earlyClobbered() { return m_earlyClobbered; }
+    RegisterSet& lateClobbered() { return m_lateClobbered; }
+    const RegisterSet& earlyClobbered() const { return m_earlyClobbered; }
+    const RegisterSet& lateClobbered() const { return m_lateClobbered; }
+
+    void setGenerator(RefPtr generator)
+    {
+        m_generator = generator;
+    }
+
+    template
+    void setGenerator(const Functor& functor)
+    {
+        m_generator = createSharedTask(functor);
+    }
+
+    RefPtr generator() const { return m_generator; }
+
+    ConstrainedValue constrainedChild(unsigned index) const
+    {
+        return ConstrainedValue(child(index), index < m_reps.size() ? m_reps[index] : ValueRep::ColdAny);
+    }
+
+    void setConstrainedChild(unsigned index, const ConstrainedValue&);
+    
+    void setConstraint(unsigned index, const ValueRep&);
+
+    class ConstrainedValueCollection {
+    public:
+        ConstrainedValueCollection(const StackmapValue& value)
+            : m_value(value)
+        {
+        }
+
+        unsigned size() const { return m_value.numChildren(); }
+        
+        ConstrainedValue at(unsigned index) const { return m_value.constrainedChild(index); }
+
+        ConstrainedValue operator[](unsigned index) const { return at(index); }
+
+        class iterator {
+        public:
+            iterator()
+                : m_collection(nullptr)
+                , m_index(0)
+            {
+            }
+
+            iterator(const ConstrainedValueCollection& collection, unsigned index)
+                : m_collection(&collection)
+                , m_index(index)
+            {
+            }
+
+            ConstrainedValue operator*() const
+            {
+                return m_collection->at(m_index);
+            }
+
+            iterator& operator++()
+            {
+                m_index++;
+                return *this;
+            }
+
+            bool operator==(const iterator& other) const
+            {
+                ASSERT(m_collection == other.m_collection);
+                return m_index == other.m_index;
+            }
+
+            bool operator!=(const iterator& other) const
+            {
+                return !(*this == other);
+            }
+            
+        private:
+            const ConstrainedValueCollection* m_collection;
+            unsigned m_index;
+        };
+
+        iterator begin() const { return iterator(*this, 0); }
+        iterator end() const { return iterator(*this, size()); }
+
+    private:
+        const StackmapValue& m_value;
+    };
+
+    ConstrainedValueCollection constrainedChildren() const
+    {
+        return ConstrainedValueCollection(*this);
+    }
+
+protected:
+    void dumpChildren(CommaPrinter&, PrintStream&) const override;
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    StackmapValue(CheckedOpcodeTag, Kind, Type, Origin);
+
+private:
+    friend class CheckSpecial;
+    friend class PatchpointSpecial;
+    friend class StackmapGenerationParams;
+    friend class StackmapSpecial;
+    
+    Vector m_reps;
+    RefPtr m_generator;
+    RegisterSet m_earlyClobbered;
+    RegisterSet m_lateClobbered;
+    RegisterSet m_usedRegisters; // Stackmaps could be further duplicated by Air, but that's unlikely, so we just merge the used registers sets if that were to happen.
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SuccessorCollection.h b/Source/JavaScriptCore/b3/B3SuccessorCollection.h
new file mode 100644
index 000000000..0a7df247b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SuccessorCollection.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+// This is a generic wrapper around lists of frequented blocks, which gives you just the blocks.
+
+template
+class SuccessorCollection {
+public:
+    SuccessorCollection(SuccessorList& list)
+        : m_list(list)
+    {
+    }
+
+    size_t size() const { return m_list.size(); }
+    BasicBlock* at(size_t index) const { return m_list[index].block(); }
+    BasicBlock*& at(size_t index) { return m_list[index].block(); }
+    BasicBlock* operator[](size_t index) const { return at(index); }
+    BasicBlock*& operator[](size_t index) { return at(index); }
+
+    class iterator {
+    public:
+        iterator()
+            : m_collection(nullptr)
+            , m_index(0)
+        {
+        }
+
+        iterator(SuccessorCollection& collection, size_t index)
+            : m_collection(&collection)
+            , m_index(index)
+        {
+        }
+
+        BasicBlock*& operator*() const
+        {
+            return m_collection->at(m_index);
+        }
+
+        iterator& operator++()
+        {
+            m_index++;
+            return *this;
+        }
+
+        bool operator==(const iterator& other) const
+        {
+            ASSERT(m_collection == other.m_collection);
+            return m_index == other.m_index;
+        }
+
+        bool operator!=(const iterator& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        SuccessorCollection* m_collection;
+        size_t m_index;
+    };
+
+    iterator begin() { return iterator(*this, 0); }
+    iterator end() { return iterator(*this, size()); }
+
+    class const_iterator {
+    public:
+        const_iterator()
+            : m_collection(nullptr)
+            , m_index(0)
+        {
+        }
+
+        const_iterator(const SuccessorCollection& collection, size_t index)
+            : m_collection(&collection)
+            , m_index(index)
+        {
+        }
+
+        BasicBlock* operator*() const
+        {
+            return m_collection->at(m_index);
+        }
+
+        const_iterator& operator++()
+        {
+            m_index++;
+            return *this;
+        }
+
+        bool operator==(const const_iterator& other) const
+        {
+            ASSERT(m_collection == other.m_collection);
+            return m_index == other.m_index;
+        }
+
+        bool operator!=(const const_iterator& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        const SuccessorCollection* m_collection;
+        size_t m_index;
+    };
+
+    const_iterator begin() const { return const_iterator(*this, 0); }
+    const_iterator end() const { return const_iterator(*this, size()); }
+
+private:
+    SuccessorList& m_list;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SwitchCase.cpp b/Source/JavaScriptCore/b3/B3SwitchCase.cpp
new file mode 100644
index 000000000..d05332bc6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SwitchCase.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3SwitchCase.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+
+namespace JSC { namespace B3 {
+
+void SwitchCase::dump(PrintStream& out) const
+{
+    out.print(m_caseValue, "->", m_target);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SwitchCase.h b/Source/JavaScriptCore/b3/B3SwitchCase.h
new file mode 100644
index 000000000..5ba6a484c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SwitchCase.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3FrequentedBlock.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class SwitchCase {
+public:
+    SwitchCase()
+    {
+    }
+
+    SwitchCase(int64_t caseValue, const FrequentedBlock& target)
+        : m_caseValue(caseValue)
+        , m_target(target)
+    {
+    }
+
+    explicit operator bool() const { return !!m_target; }
+
+    int64_t caseValue() const { return m_caseValue; }
+    FrequentedBlock target() const { return m_target; }
+    BasicBlock* targetBlock() const { return m_target.block(); }
+
+    void dump(PrintStream& out) const;
+
+private:
+    int64_t m_caseValue;
+    FrequentedBlock m_target;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SwitchValue.cpp b/Source/JavaScriptCore/b3/B3SwitchValue.cpp
new file mode 100644
index 000000000..8b880347a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SwitchValue.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3SwitchValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+SwitchValue::~SwitchValue()
+{
+}
+
+SwitchCase SwitchValue::removeCase(BasicBlock* block, unsigned index)
+{
+    FrequentedBlock resultBlock = block->successor(index);
+    int64_t resultValue = m_values[index];
+    block->successor(index) = block->successors().last();
+    block->successors().removeLast();
+    m_values[index] = m_values.last();
+    m_values.removeLast();
+    return SwitchCase(resultValue, resultBlock);
+}
+
+bool SwitchValue::hasFallThrough(const BasicBlock* block) const
+{
+    unsigned numSuccessors = block->numSuccessors();
+    unsigned numValues = m_values.size();
+    RELEASE_ASSERT(numValues == numSuccessors || numValues + 1 == numSuccessors);
+    
+    return numValues + 1 == numSuccessors;
+}
+
+bool SwitchValue::hasFallThrough() const
+{
+    return hasFallThrough(owner);
+}
+
+void SwitchValue::setFallThrough(BasicBlock* block, const FrequentedBlock& target)
+{
+    if (!hasFallThrough())
+        block->successors().append(target);
+    else
+        block->successors().last() = target;
+    ASSERT(hasFallThrough(block));
+}
+
+void SwitchValue::appendCase(BasicBlock* block, const SwitchCase& switchCase)
+{
+    if (!hasFallThrough())
+        block->successors().append(switchCase.target());
+    else {
+        block->successors().append(block->successors().last());
+        block->successor(block->numSuccessors() - 2) = switchCase.target();
+    }
+    m_values.append(switchCase.caseValue());
+}
+
+void SwitchValue::setFallThrough(const FrequentedBlock& target)
+{
+    setFallThrough(owner, target);
+}
+
+void SwitchValue::appendCase(const SwitchCase& switchCase)
+{
+    appendCase(owner, switchCase);
+}
+
+void SwitchValue::dumpSuccessors(const BasicBlock* block, PrintStream& out) const
+{
+    // We must not crash due to a number-of-successors mismatch! Someone debugging a
+    // number-of-successors bug will want to dump IR!
+    if (numCaseValues() + 1 != block->numSuccessors()) {
+        Value::dumpSuccessors(block, out);
+        return;
+    }
+    
+    out.print(cases(block));
+}
+
+void SwitchValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, "cases = [", listDump(m_values), "]");
+}
+
+Value* SwitchValue::cloneImpl() const
+{
+    return new SwitchValue(*this);
+}
+
+SwitchValue::SwitchValue(Origin origin, Value* child)
+    : Value(CheckedOpcode, Switch, Void, origin, child)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SwitchValue.h b/Source/JavaScriptCore/b3/B3SwitchValue.h
new file mode 100644
index 000000000..a1c27cd9d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SwitchValue.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CaseCollection.h"
+#include "B3SwitchCase.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class SwitchValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Switch; }
+
+    ~SwitchValue();
+
+    // numCaseValues() + 1 == numSuccessors().
+    unsigned numCaseValues() const { return m_values.size(); }
+
+    // The successor for this case value is at the same index.
+    int64_t caseValue(unsigned index) const { return m_values[index]; }
+    
+    const Vector& caseValues() const { return m_values; }
+
+    CaseCollection cases(const BasicBlock* owner) const { return CaseCollection(this, owner); }
+    CaseCollection cases() const { return cases(owner); }
+
+    // This removes the case and reorders things a bit. If you're iterating the cases from 0 to N,
+    // then you can keep iterating after this so long as you revisit this same index (which will now
+    // contain some other case value). This removes the case that was removed.
+    SwitchCase removeCase(BasicBlock*, unsigned index);
+
+    bool hasFallThrough(const BasicBlock*) const;
+    bool hasFallThrough() const;
+
+    // These two functions can be called in any order.
+    void setFallThrough(BasicBlock*, const FrequentedBlock&);
+    void appendCase(BasicBlock*, const SwitchCase&);
+    
+    JS_EXPORT_PRIVATE void setFallThrough(const FrequentedBlock&);
+    JS_EXPORT_PRIVATE void appendCase(const SwitchCase&);
+
+    void dumpSuccessors(const BasicBlock*, PrintStream&) const override;
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    JS_EXPORT_PRIVATE SwitchValue(Origin, Value* child);
+
+    Vector m_values;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3TimingScope.cpp b/Source/JavaScriptCore/b3/B3TimingScope.cpp
new file mode 100644
index 000000000..d8ad42133
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3TimingScope.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3TimingScope.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Common.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+TimingScope::TimingScope(const char* name)
+    : m_name(name)
+{
+    if (shouldMeasurePhaseTiming())
+        m_before = monotonicallyIncreasingTimeMS();
+}
+
+TimingScope::~TimingScope()
+{
+    if (shouldMeasurePhaseTiming()) {
+        double after = monotonicallyIncreasingTimeMS();
+        dataLog("[B3] ", m_name, " took: ", after - m_before, " ms.\n");
+    }
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3TimingScope.h b/Source/JavaScriptCore/b3/B3TimingScope.h
new file mode 100644
index 000000000..a957a0eb0
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3TimingScope.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 {
+
+class TimingScope {
+    WTF_MAKE_NONCOPYABLE(TimingScope);
+public:
+    TimingScope(const char* name);
+    ~TimingScope();
+
+private:
+    const char* m_name;
+    double m_before;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Type.cpp b/Source/JavaScriptCore/b3/B3Type.cpp
new file mode 100644
index 000000000..0057eaf61
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Type.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Type.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, Type type)
+{
+    switch (type) {
+    case Void:
+        out.print("Void");
+        return;
+    case Int32:
+        out.print("Int32");
+        return;
+    case Int64:
+        out.print("Int64");
+        return;
+    case Float:
+        out.print("Float");
+        return;
+    case Double:
+        out.print("Double");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Type.h b/Source/JavaScriptCore/b3/B3Type.h
new file mode 100644
index 000000000..4ceaa8a1d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Type.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Common.h"
+#include 
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+enum Type : int8_t {
+    Void,
+    Int32,
+    Int64,
+    Float,
+    Double,
+};
+
+inline bool isInt(Type type)
+{
+    return type == Int32 || type == Int64;
+}
+
+inline bool isFloat(Type type)
+{
+    return type == Float || type == Double;
+}
+
+inline Type pointerType()
+{
+    if (is32Bit())
+        return Int32;
+    return Int64;
+}
+
+inline size_t sizeofType(Type type)
+{
+    switch (type) {
+    case Void:
+        return 0;
+    case Int32:
+    case Float:
+        return 4;
+    case Int64:
+    case Double:
+        return 8;
+    }
+    ASSERT_NOT_REACHED();
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::B3::Type);
+
+} // namespace WTF
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3TypeMap.h b/Source/JavaScriptCore/b3/B3TypeMap.h
new file mode 100644
index 000000000..c0ea41304
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3TypeMap.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Type.h"
+#include 
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+template
+class TypeMap {
+public:
+    TypeMap()
+        : m_void()
+        , m_int32()
+        , m_int64()
+        , m_float()
+        , m_double()
+    {
+    }
+    
+    T& at(Type type)
+    {
+        switch (type) {
+        case Void:
+            return m_void;
+        case Int32:
+            return m_int32;
+        case Int64:
+            return m_int64;
+        case Float:
+            return m_float;
+        case Double:
+            return m_double;
+        }
+        ASSERT_NOT_REACHED();
+    }
+    
+    const T& at(Type type) const
+    {
+        return bitwise_cast(this)->at(type);
+    }
+    
+    T& operator[](Type type)
+    {
+        return at(type);
+    }
+    
+    const T& operator[](Type type) const
+    {
+        return at(type);
+    }
+    
+    void dump(PrintStream& out) const
+    {
+        out.print(
+            "{void = ", m_void,
+            ", int32 = ", m_int32,
+            ", int64 = ", m_int64,
+            ", float = ", m_float,
+            ", double = ", m_double, "}");
+    }
+    
+private:
+    T m_void;
+    T m_int32;
+    T m_int64;
+    T m_float;
+    T m_double;
+};
+
+} } // namespace JSC::B3
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3UpsilonValue.cpp b/Source/JavaScriptCore/b3/B3UpsilonValue.cpp
new file mode 100644
index 000000000..c87432fb7
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3UpsilonValue.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3UpsilonValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+UpsilonValue::~UpsilonValue()
+{
+}
+
+void UpsilonValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    if (m_phi)
+        out.print(comma, "^", m_phi->index());
+    else {
+        // We want to have a dump for when the Phi isn't set yet, since although such IR won't pass
+        // validation, we may have such IR as an intermediate step.
+        out.print(comma, "^(null)");
+    }
+}
+
+Value* UpsilonValue::cloneImpl() const
+{
+    return new UpsilonValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3UpsilonValue.h b/Source/JavaScriptCore/b3/B3UpsilonValue.h
new file mode 100644
index 000000000..4c479e419
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3UpsilonValue.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE UpsilonValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Upsilon; }
+
+    ~UpsilonValue();
+
+    Value* phi() const { return m_phi; }
+    void setPhi(Value* phi)
+    {
+        ASSERT(child(0)->type() == phi->type());
+        ASSERT(phi->opcode() == Phi);
+        m_phi = phi;
+    }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    // Note that passing the Phi during construction is optional. A valid pattern is to first create
+    // the Upsilons without the Phi, then create the Phi, then go back and tell the Upsilons about
+    // the Phi. This allows you to emit code in its natural order.
+    UpsilonValue(Origin origin, Value* value, Value* phi = nullptr)
+        : Value(CheckedOpcode, Upsilon, Void, origin, value)
+        , m_phi(phi)
+    {
+        if (phi)
+            ASSERT(value->type() == phi->type());
+    }
+
+    Value* m_phi;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3UseCounts.cpp b/Source/JavaScriptCore/b3/B3UseCounts.cpp
new file mode 100644
index 000000000..5fe18d4ff
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3UseCounts.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3UseCounts.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 {
+
+UseCounts::UseCounts(Procedure& procedure)
+    : m_counts(procedure.values().size())
+{
+    Vector children;
+    for (Value* value : procedure.values()) {
+        children.resize(0);
+        for (Value* child : value->children()) {
+            m_counts[child].numUses++;
+            children.append(child);
+        }
+        std::sort(children.begin(), children.end());
+        Value* last = nullptr;
+        for (Value* child : children) {
+            if (child == last)
+                continue;
+
+            m_counts[child].numUsingInstructions++;
+            last = child;
+        }
+    }
+}
+
+UseCounts::~UseCounts()
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3UseCounts.h b/Source/JavaScriptCore/b3/B3UseCounts.h
new file mode 100644
index 000000000..f5a0492a9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3UseCounts.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class UseCounts {
+public:
+    JS_EXPORT_PRIVATE UseCounts(Procedure&);
+    JS_EXPORT_PRIVATE ~UseCounts();
+
+    unsigned numUses(Value* value) const { return m_counts[value].numUses; }
+    unsigned numUsingInstructions(Value* value) const { return m_counts[value].numUsingInstructions; }
+    
+private:
+    struct Counts {
+        unsigned numUses { 0 };
+        unsigned numUsingInstructions { 0 };
+    };
+    
+    IndexMap m_counts;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Validate.cpp b/Source/JavaScriptCore/b3/B3Validate.cpp
new file mode 100644
index 000000000..8df8ace8f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Validate.cpp
@@ -0,0 +1,595 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Validate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3Dominators.h"
+#include "B3MemoryValue.h"
+#include "B3Procedure.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include "B3WasmBoundsCheckValue.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class Validater {
+public:
+    Validater(Procedure& procedure, const char* dumpBefore)
+        : m_procedure(procedure)
+        , m_dumpBefore(dumpBefore)
+    {
+    }
+
+#define VALIDATE(condition, message) do {                               \
+        if (condition)                                                  \
+            break;                                                      \
+        fail(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #condition, toCString message); \
+    } while (false)
+
+    void run()
+    {
+        HashSet blocks;
+        HashSet valueInProc;
+        HashMap valueInBlock;
+        HashMap valueOwner;
+        HashMap valueIndex;
+
+        for (BasicBlock* block : m_procedure) {
+            blocks.add(block);
+            for (unsigned i = 0; i < block->size(); ++i) {
+                Value* value = block->at(i);
+                valueInBlock.add(value, 0).iterator->value++;
+                valueOwner.add(value, block);
+                valueIndex.add(value, i);
+            }
+        }
+
+        for (Value* value : m_procedure.values())
+            valueInProc.add(value);
+
+        for (Value* value : valueInProc)
+            VALIDATE(valueInBlock.contains(value), ("At ", *value));
+        for (auto& entry : valueInBlock) {
+            VALIDATE(valueInProc.contains(entry.key), ("At ", *entry.key));
+            VALIDATE(entry.value == 1, ("At ", *entry.key));
+        }
+
+        // Compute dominators ourselves to avoid perturbing Procedure.
+        Dominators dominators(m_procedure);
+
+        for (Value* value : valueInProc) {
+            for (Value* child : value->children()) {
+                VALIDATE(child, ("At ", *value));
+                VALIDATE(valueInProc.contains(child), ("At ", *value, "->", pointerDump(child)));
+                if (valueOwner.get(child) == valueOwner.get(value))
+                    VALIDATE(valueIndex.get(value) > valueIndex.get(child), ("At ", *value, "->", pointerDump(child)));
+                else
+                    VALIDATE(dominators.dominates(valueOwner.get(child), valueOwner.get(value)), ("at ", *value, "->", pointerDump(child)));
+            }
+        }
+
+        HashMap> allPredecessors;
+        for (BasicBlock* block : blocks) {
+            VALIDATE(block->size() >= 1, ("At ", *block));
+            for (unsigned i = 0; i < block->size() - 1; ++i)
+                VALIDATE(!block->at(i)->effects().terminal, ("At ", *block->at(i)));
+            VALIDATE(block->last()->effects().terminal, ("At ", *block->last()));
+            
+            for (BasicBlock* successor : block->successorBlocks()) {
+                allPredecessors.add(successor, HashSet()).iterator->value.add(block);
+                VALIDATE(
+                    blocks.contains(successor), ("At ", *block, "->", pointerDump(successor)));
+            }
+        }
+
+        // Note that this totally allows dead code.
+        for (auto& entry : allPredecessors) {
+            BasicBlock* successor = entry.key;
+            HashSet& predecessors = entry.value;
+            VALIDATE(predecessors == successor->predecessors(), ("At ", *successor));
+        }
+
+        for (Value* value : m_procedure.values()) {
+            for (Value* child : value->children())
+                VALIDATE(child->type() != Void, ("At ", *value, "->", *child));
+            switch (value->opcode()) {
+            case Nop:
+            case Fence:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                break;
+            case Identity:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+                VALIDATE(value->type() != Void, ("At ", *value));
+                break;
+            case Const32:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                break;
+            case Const64:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Int64, ("At ", *value));
+                break;
+            case ConstDouble:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Double, ("At ", *value));
+                break;
+            case ConstFloat:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Float, ("At ", *value));
+                break;
+            case Set:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == value->as()->variable()->type(), ("At ", *value));
+                break;
+            case Get:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == value->as()->variable()->type(), ("At ", *value));
+                break;
+            case SlotBase:
+            case FramePointer:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == pointerType(), ("At ", *value));
+                break;
+            case ArgumentReg:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(
+                    (value->as()->argumentReg().isGPR() ? pointerType() : Double)
+                    == value->type(), ("At ", *value));
+                break;
+            case Add:
+            case Sub:
+            case Mul:
+            case Div:
+            case UDiv:
+            case Mod:
+            case UMod:
+            case BitAnd:
+            case BitOr:
+            case BitXor:
+                VALIDATE(!value->kind().traps(), ("At ", *value));
+                switch (value->opcode()) {
+                case Div:
+                case Mod:
+                    if (value->isChill()) {
+                        VALIDATE(value->opcode() == Div || value->opcode() == Mod, ("At ", *value));
+                        VALIDATE(isInt(value->type()), ("At ", *value));
+                    }
+                    break;
+                default:
+                    VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                    break;
+                }
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+                VALIDATE(value->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(value->type() != Void, ("At ", *value));
+                break;
+            case Neg:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+                VALIDATE(value->type() != Void, ("At ", *value));
+                break;
+            case Shl:
+            case SShr:
+            case ZShr:
+            case RotR:
+                case RotL:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+                VALIDATE(value->child(1)->type() == Int32, ("At ", *value));
+                VALIDATE(isInt(value->type()), ("At ", *value));
+                break;
+            case BitwiseCast:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->type() != value->child(0)->type(), ("At ", *value));
+                VALIDATE(
+                    (value->type() == Int64 && value->child(0)->type() == Double)
+                    || (value->type() == Double && value->child(0)->type() == Int64)
+                    || (value->type() == Float && value->child(0)->type() == Int32)
+                    || (value->type() == Int32 && value->child(0)->type() == Float),
+                    ("At ", *value));
+                break;
+            case SExt8:
+            case SExt16:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                break;
+            case SExt32:
+            case ZExt32:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+                VALIDATE(value->type() == Int64, ("At ", *value));
+                break;
+            case Clz:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(isInt(value->type()), ("At ", *value));
+                break;
+            case Trunc:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(
+                    (value->type() == Int32 && value->child(0)->type() == Int64)
+                    || (value->type() == Float && value->child(0)->type() == Double),
+                    ("At ", *value));
+                break;
+            case Abs:
+            case Ceil:
+            case Floor:
+            case Sqrt:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isFloat(value->child(0)->type()), ("At ", *value));
+                VALIDATE(isFloat(value->type()), ("At ", *value));
+                break;
+            case IToD:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Double, ("At ", *value));
+                break;
+            case IToF:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Float, ("At ", *value));
+                break;
+            case FloatToDouble:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Float, ("At ", *value));
+                VALIDATE(value->type() == Double, ("At ", *value));
+                break;
+            case DoubleToFloat:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Double, ("At ", *value));
+                VALIDATE(value->type() == Float, ("At ", *value));
+                break;
+            case Equal:
+            case NotEqual:
+            case LessThan:
+            case GreaterThan:
+            case LessEqual:
+            case GreaterEqual:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->child(0)->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                break;
+            case Above:
+            case Below:
+            case AboveEqual:
+            case BelowEqual:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->child(0)->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                break;
+            case EqualOrUnordered:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->child(0)->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(isFloat(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                break;
+            case Select:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 3, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(value->type() == value->child(2)->type(), ("At ", *value));
+                break;
+            case Load8Z:
+            case Load8S:
+            case Load16Z:
+            case Load16S:
+                VALIDATE(!value->kind().isChill(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                validateStackAccess(value);
+                break;
+            case Load:
+                VALIDATE(!value->kind().isChill(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+                VALIDATE(value->type() != Void, ("At ", *value));
+                validateStackAccess(value);
+                break;
+            case Store8:
+            case Store16:
+                VALIDATE(!value->kind().isChill(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+                VALIDATE(value->child(1)->type() == pointerType(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                validateStackAccess(value);
+                break;
+            case Store:
+                VALIDATE(!value->kind().isChill(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->child(1)->type() == pointerType(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                validateStackAccess(value);
+                break;
+            case WasmAddress:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+                VALIDATE(value->type() == pointerType(), ("At ", *value));
+                break;
+            case CCall:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() >= 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+                break;
+            case Patchpoint:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                if (value->type() == Void)
+                    VALIDATE(value->as()->resultConstraint == ValueRep::WarmAny, ("At ", *value));
+                else {
+                    switch (value->as()->resultConstraint.kind()) {
+                    case ValueRep::WarmAny:
+                    case ValueRep::SomeRegister:
+                    case ValueRep::SomeEarlyRegister:
+                    case ValueRep::Register:
+                    case ValueRep::StackArgument:
+                        break;
+                    default:
+                        VALIDATE(false, ("At ", *value));
+                        break;
+                    }
+                    
+                    validateStackmapConstraint(value, ConstrainedValue(value, value->as()->resultConstraint), ConstraintRole::Def);
+                }
+                validateStackmap(value);
+                break;
+            case CheckAdd:
+            case CheckSub:
+            case CheckMul:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() >= 2, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(isInt(value->child(1)->type()), ("At ", *value));
+                VALIDATE(value->as()->constrainedChild(0).rep() == ValueRep::WarmAny, ("At ", *value));
+                VALIDATE(value->as()->constrainedChild(1).rep() == ValueRep::WarmAny, ("At ", *value));
+                validateStackmap(value);
+                break;
+            case Check:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() >= 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->as()->constrainedChild(0).rep() == ValueRep::WarmAny, ("At ", *value));
+                validateStackmap(value);
+                break;
+            case WasmBoundsCheck:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+                VALIDATE(m_procedure.code().isPinned(value->as()->pinnedGPR()), ("At ", *value));
+                VALIDATE(m_procedure.code().wasmBoundsCheckGenerator(), ("At ", *value));
+                break;
+            case Upsilon:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->as()->phi(), ("At ", *value));
+                VALIDATE(value->as()->phi()->opcode() == Phi, ("At ", *value));
+                VALIDATE(value->child(0)->type() == value->as()->phi()->type(), ("At ", *value));
+                VALIDATE(valueInProc.contains(value->as()->phi()), ("At ", *value));
+                break;
+            case Phi:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() != Void, ("At ", *value));
+                break;
+            case Jump:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(valueOwner.get(value)->numSuccessors() == 1, ("At ", *value));
+                break;
+            case Oops:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(!valueOwner.get(value)->numSuccessors(), ("At ", *value));
+                break;
+            case Return:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() <= 1, ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(!valueOwner.get(value)->numSuccessors(), ("At ", *value));
+                break;
+            case Branch:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(valueOwner.get(value)->numSuccessors() == 2, ("At ", *value));
+                break;
+            case Switch: {
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(value->as()->hasFallThrough(valueOwner.get(value)), ("At ", *value));
+                // This validates the same thing as hasFallThrough, but more explicitly. We want to
+                // make sure that if anyone tries to change the definition of hasFallThrough, they
+                // will feel some pain here, since this is fundamental.
+                VALIDATE(valueOwner.get(value)->numSuccessors() == value->as()->numCaseValues() + 1, ("At ", *value));
+                
+                // Check that there are no duplicate cases.
+                Vector caseValues = value->as()->caseValues();
+                std::sort(caseValues.begin(), caseValues.end());
+                for (unsigned i = 1; i < caseValues.size(); ++i)
+                    VALIDATE(caseValues[i - 1] != caseValues[i], ("At ", *value, ", caseValue = ", caseValues[i]));
+                break;
+            }
+            case EntrySwitch:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(valueOwner.get(value)->numSuccessors() == m_procedure.numEntrypoints(), ("At ", *value));
+                break;
+            }
+
+            VALIDATE(!(value->effects().writes && value->key()), ("At ", *value));
+        }
+
+        for (Variable* variable : m_procedure.variables())
+            VALIDATE(variable->type() != Void, ("At ", *variable));
+    }
+
+private:
+    void validateStackmap(Value* value)
+    {
+        StackmapValue* stackmap = value->as();
+        VALIDATE(stackmap, ("At ", *value));
+        VALIDATE(stackmap->numChildren() >= stackmap->reps().size(), ("At ", *stackmap));
+        for (ConstrainedValue child : stackmap->constrainedChildren())
+            validateStackmapConstraint(stackmap, child);
+    }
+    
+    enum class ConstraintRole {
+        Use,
+        Def
+    };
+    void validateStackmapConstraint(Value* context, const ConstrainedValue& value, ConstraintRole role = ConstraintRole::Use)
+    {
+        switch (value.rep().kind()) {
+        case ValueRep::WarmAny:
+        case ValueRep::ColdAny:
+        case ValueRep::LateColdAny:
+        case ValueRep::SomeRegister:
+        case ValueRep::StackArgument:
+            break;
+        case ValueRep::SomeEarlyRegister:
+            VALIDATE(role == ConstraintRole::Def, ("At ", *context, ": ", value));
+            break;
+        case ValueRep::Register:
+        case ValueRep::LateRegister:
+            if (value.rep().reg().isGPR())
+                VALIDATE(isInt(value.value()->type()), ("At ", *context, ": ", value));
+            else
+                VALIDATE(isFloat(value.value()->type()), ("At ", *context, ": ", value));
+            break;
+        default:
+            VALIDATE(false, ("At ", *context, ": ", value));
+            break;
+        }
+    }
+
+    void validateStackAccess(Value* value)
+    {
+        MemoryValue* memory = value->as();
+        SlotBaseValue* slotBase = value->lastChild()->as();
+        if (!slotBase)
+            return;
+
+        StackSlot* stack = slotBase->slot();
+
+        VALIDATE(memory->offset() >= 0, ("At ", *value));
+        VALIDATE(memory->offset() + memory->accessByteSize() <= stack->byteSize(), ("At ", *value));
+    }
+    
+    NO_RETURN_DUE_TO_CRASH void fail(
+        const char* filename, int lineNumber, const char* function, const char* condition,
+        CString message)
+    {
+        CString failureMessage;
+        {
+            StringPrintStream out;
+            out.print("B3 VALIDATION FAILURE\n");
+            out.print("    ", condition, " (", filename, ":", lineNumber, ")\n");
+            out.print("    ", message, "\n");
+            out.print("    After ", m_procedure.lastPhaseName(), "\n");
+            failureMessage = out.toCString();
+        }
+
+        dataLog(failureMessage);
+        if (m_dumpBefore) {
+            dataLog("Before ", m_procedure.lastPhaseName(), ":\n");
+            dataLog(m_dumpBefore);
+        }
+        dataLog("At time of failure:\n");
+        dataLog(m_procedure);
+
+        dataLog(failureMessage);
+        WTFReportAssertionFailure(filename, lineNumber, function, condition);
+        CRASH();
+    }
+    
+    Procedure& m_procedure;
+    const char* m_dumpBefore;
+};
+
+} // anonymous namespace
+
+void validate(Procedure& procedure, const char* dumpBefore)
+{
+    Validater validater(procedure, dumpBefore);
+    validater.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Validate.h b/Source/JavaScriptCore/b3/B3Validate.h
new file mode 100644
index 000000000..d115e22e4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Validate.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+JS_EXPORT_PRIVATE void validate(Procedure&, const char* dumpBefore = nullptr);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Value.cpp b/Source/JavaScriptCore/b3/B3Value.cpp
new file mode 100644
index 000000000..b4fc43369
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Value.cpp
@@ -0,0 +1,870 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Value.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BottomProvider.h"
+#include "B3CCallValue.h"
+#include "B3FenceValue.h"
+#include "B3MemoryValue.h"
+#include "B3OriginDump.h"
+#include "B3ProcedureInlines.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "B3ValueKeyInlines.h"
+#include "B3VariableValue.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+const char* const Value::dumpPrefix = "@";
+
+Value::~Value()
+{
+}
+
+void Value::replaceWithIdentity(Value* value)
+{
+    // This is a bit crazy. It does an in-place replacement of whatever Value subclass this is with
+    // a plain Identity Value. We first collect all of the information we need, then we destruct the
+    // previous value in place, and then we construct the Identity Value in place.
+
+    ASSERT(m_type == value->m_type);
+
+    if (m_type == Void) {
+        replaceWithNopIgnoringType();
+        return;
+    }
+
+    unsigned index = m_index;
+    Type type = m_type;
+    Origin origin = m_origin;
+    BasicBlock* owner = this->owner;
+
+    RELEASE_ASSERT(type == value->type());
+
+    this->~Value();
+
+    new (this) Value(Identity, type, origin, value);
+
+    this->owner = owner;
+    this->m_index = index;
+}
+
+void Value::replaceWithBottom(InsertionSet& insertionSet, size_t index)
+{
+    replaceWithBottom(BottomProvider(insertionSet, index));
+}
+
+void Value::replaceWithNop()
+{
+    RELEASE_ASSERT(m_type == Void);
+    replaceWithNopIgnoringType();
+}
+
+void Value::replaceWithNopIgnoringType()
+{
+    unsigned index = m_index;
+    Origin origin = m_origin;
+    BasicBlock* owner = this->owner;
+
+    this->~Value();
+
+    new (this) Value(Nop, Void, origin);
+
+    this->owner = owner;
+    this->m_index = index;
+}
+
+void Value::replaceWithPhi()
+{
+    if (m_type == Void) {
+        replaceWithNop();
+        return;
+    }
+    
+    unsigned index = m_index;
+    Origin origin = m_origin;
+    BasicBlock* owner = this->owner;
+    Type type = m_type;
+
+    this->~Value();
+
+    new (this) Value(Phi, type, origin);
+
+    this->owner = owner;
+    this->m_index = index;
+}
+
+void Value::replaceWithJump(BasicBlock* owner, FrequentedBlock target)
+{
+    RELEASE_ASSERT(owner->last() == this);
+    
+    unsigned index = m_index;
+    Origin origin = m_origin;
+    
+    this->~Value();
+    
+    new (this) Value(Jump, Void, origin);
+    
+    this->owner = owner;
+    this->m_index = index;
+    
+    owner->setSuccessors(target);
+}
+
+void Value::replaceWithOops(BasicBlock* owner)
+{
+    RELEASE_ASSERT(owner->last() == this);
+    
+    unsigned index = m_index;
+    Origin origin = m_origin;
+    
+    this->~Value();
+    
+    new (this) Value(Oops, Void, origin);
+    
+    this->owner = owner;
+    this->m_index = index;
+    
+    owner->clearSuccessors();
+}
+
+void Value::replaceWithJump(FrequentedBlock target)
+{
+    replaceWithJump(owner, target);
+}
+
+void Value::replaceWithOops()
+{
+    replaceWithOops(owner);
+}
+
+void Value::dump(PrintStream& out) const
+{
+    bool isConstant = false;
+
+    switch (opcode()) {
+    case Const32:
+        out.print("$", asInt32(), "(");
+        isConstant = true;
+        break;
+    case Const64:
+        out.print("$", asInt64(), "(");
+        isConstant = true;
+        break;
+    case ConstFloat:
+        out.print("$", asFloat(), "(");
+        isConstant = true;
+        break;
+    case ConstDouble:
+        out.print("$", asDouble(), "(");
+        isConstant = true;
+        break;
+    default:
+        break;
+    }
+    
+    out.print(dumpPrefix, m_index);
+
+    if (isConstant)
+        out.print(")");
+}
+
+Value* Value::cloneImpl() const
+{
+    return new Value(*this);
+}
+
+void Value::dumpChildren(CommaPrinter& comma, PrintStream& out) const
+{
+    for (Value* child : children())
+        out.print(comma, pointerDump(child));
+}
+
+void Value::deepDump(const Procedure* proc, PrintStream& out) const
+{
+    out.print(m_type, " ", dumpPrefix, m_index, " = ", m_kind);
+
+    out.print("(");
+    CommaPrinter comma;
+    dumpChildren(comma, out);
+
+    if (m_origin)
+        out.print(comma, OriginDump(proc, m_origin));
+
+    dumpMeta(comma, out);
+
+    {
+        CString string = toCString(effects());
+        if (string.length())
+            out.print(comma, string);
+    }
+
+    out.print(")");
+}
+
+void Value::dumpSuccessors(const BasicBlock* block, PrintStream& out) const
+{
+    // Note that this must not crash if we have the wrong number of successors, since someone
+    // debugging a number-of-successors bug will probably want to dump IR!
+    
+    if (opcode() == Branch && block->numSuccessors() == 2) {
+        out.print("Then:", block->taken(), ", Else:", block->notTaken());
+        return;
+    }
+    
+    out.print(listDump(block->successors()));
+}
+
+Value* Value::negConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::addConstant(Procedure&, int32_t) const
+{
+    return nullptr;
+}
+
+Value* Value::addConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::subConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::mulConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::checkAddConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::checkSubConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::checkMulConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::checkNegConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::divConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::uDivConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::modConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::uModConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::bitAndConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::bitOrConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::bitXorConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::shlConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::sShrConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::zShrConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::rotRConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::rotLConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::bitwiseCastConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::iToDConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::iToFConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::doubleToFloatConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::floatToDoubleConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::absConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::ceilConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::floorConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::sqrtConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+TriState Value::equalConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::notEqualConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::lessThanConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::greaterThanConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::lessEqualConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::greaterEqualConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::aboveConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::belowConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::aboveEqualConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::belowEqualConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::equalOrUnorderedConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+Value* Value::invertedCompare(Procedure& proc) const
+{
+    if (!numChildren())
+        return nullptr;
+    if (std::optional invertedOpcode = B3::invertedCompare(opcode(), child(0)->type())) {
+        ASSERT(!kind().hasExtraBits());
+        return proc.add(*invertedOpcode, type(), origin(), children());
+    }
+    return nullptr;
+}
+
+bool Value::isRounded() const
+{
+    ASSERT(isFloat(type()));
+    switch (opcode()) {
+    case Floor:
+    case Ceil:
+    case IToD:
+    case IToF:
+        return true;
+
+    case ConstDouble: {
+        double value = asDouble();
+        return std::isfinite(value) && value == ceil(value);
+    }
+
+    case ConstFloat: {
+        float value = asFloat();
+        return std::isfinite(value) && value == ceilf(value);
+    }
+
+    default:
+        return false;
+    }
+}
+
+bool Value::returnsBool() const
+{
+    if (type() != Int32)
+        return false;
+    switch (opcode()) {
+    case Const32:
+        return asInt32() == 0 || asInt32() == 1;
+    case BitAnd:
+        return child(1)->isInt32(1)
+            || (child(0)->returnsBool() && child(1)->hasInt() && child(1)->asInt() & 1);
+    case Equal:
+    case NotEqual:
+    case LessThan:
+    case GreaterThan:
+    case LessEqual:
+    case GreaterEqual:
+    case Above:
+    case Below:
+    case AboveEqual:
+    case BelowEqual:
+    case EqualOrUnordered:
+        return true;
+    case Phi:
+        // FIXME: We should have a story here.
+        // https://bugs.webkit.org/show_bug.cgi?id=150725
+        return false;
+    default:
+        return false;
+    }
+}
+
+TriState Value::asTriState() const
+{
+    switch (opcode()) {
+    case Const32:
+        return triState(!!asInt32());
+    case Const64:
+        return triState(!!asInt64());
+    case ConstDouble:
+        // Use "!= 0" to really emphasize what this mean with respect to NaN and such.
+        return triState(asDouble() != 0);
+    case ConstFloat:
+        return triState(asFloat() != 0.);
+    default:
+        return MixedTriState;
+    }
+}
+
+Effects Value::effects() const
+{
+    Effects result;
+    switch (opcode()) {
+    case Nop:
+    case Identity:
+    case Const32:
+    case Const64:
+    case ConstDouble:
+    case ConstFloat:
+    case SlotBase:
+    case ArgumentReg:
+    case FramePointer:
+    case Add:
+    case Sub:
+    case Mul:
+    case Neg:
+    case BitAnd:
+    case BitOr:
+    case BitXor:
+    case Shl:
+    case SShr:
+    case ZShr:
+    case RotR:
+    case RotL:
+    case Clz:
+    case Abs:
+    case Ceil:
+    case Floor:
+    case Sqrt:
+    case BitwiseCast:
+    case SExt8:
+    case SExt16:
+    case SExt32:
+    case ZExt32:
+    case Trunc:
+    case IToD:
+    case IToF:
+    case FloatToDouble:
+    case DoubleToFloat:
+    case Equal:
+    case NotEqual:
+    case LessThan:
+    case GreaterThan:
+    case LessEqual:
+    case GreaterEqual:
+    case Above:
+    case Below:
+    case AboveEqual:
+    case BelowEqual:
+    case EqualOrUnordered:
+    case Select:
+        break;
+    case Div:
+    case UDiv:
+    case Mod:
+    case UMod:
+        result.controlDependent = true;
+        break;
+    case Load8Z:
+    case Load8S:
+    case Load16Z:
+    case Load16S:
+    case Load:
+        result.reads = as()->range();
+        result.controlDependent = true;
+        break;
+    case Store8:
+    case Store16:
+    case Store:
+        result.writes = as()->range();
+        result.controlDependent = true;
+        break;
+    case WasmAddress:
+        result.readsPinned = true;
+        break;
+    case Fence: {
+        const FenceValue* fence = as();
+        result.reads = fence->read;
+        result.writes = fence->write;
+        
+        // Prevent killing of fences that claim not to write anything. It's a bit weird that we use
+        // local state as the way to do this, but it happens to work: we must assume that we cannot
+        // kill writesLocalState unless we understands exactly what the instruction is doing (like
+        // the way that fixSSA understands Set/Get and the way that reduceStrength and others
+        // understand Upsilon). This would only become a problem if we had some analysis that was
+        // looking to use the writesLocalState bit to invalidate a CSE over local state operations.
+        // Then a Fence would look block, say, the elimination of a redundant Get. But it like
+        // that's not at all how our optimizations for Set/Get/Upsilon/Phi work - they grok their
+        // operations deeply enough that they have no need to check this bit - so this cheat is
+        // fine.
+        result.writesLocalState = true;
+        break;
+    }
+    case CCall:
+        result = as()->effects;
+        break;
+    case Patchpoint:
+        result = as()->effects;
+        break;
+    case CheckAdd:
+    case CheckSub:
+    case CheckMul:
+    case Check:
+        result = Effects::forCheck();
+        break;
+    case WasmBoundsCheck:
+        result.readsPinned = true;
+        result.exitsSideways = true;
+        break;
+    case Upsilon:
+    case Set:
+        result.writesLocalState = true;
+        break;
+    case Phi:
+    case Get:
+        result.readsLocalState = true;
+        break;
+    case Jump:
+    case Branch:
+    case Switch:
+    case Return:
+    case Oops:
+    case EntrySwitch:
+        result.terminal = true;
+        break;
+    }
+    if (traps()) {
+        result.exitsSideways = true;
+        result.reads = HeapRange::top();
+    }
+    return result;
+}
+
+ValueKey Value::key() const
+{
+    switch (opcode()) {
+    case FramePointer:
+        return ValueKey(kind(), type());
+    case Identity:
+    case Abs:
+    case Ceil:
+    case Floor:
+    case Sqrt:
+    case SExt8:
+    case SExt16:
+    case SExt32:
+    case ZExt32:
+    case Clz:
+    case Trunc:
+    case IToD:
+    case IToF:
+    case FloatToDouble:
+    case DoubleToFloat:
+    case Check:
+    case BitwiseCast:
+    case Neg:
+        return ValueKey(kind(), type(), child(0));
+    case Add:
+    case Sub:
+    case Mul:
+    case Div:
+    case UDiv:
+    case Mod:
+    case UMod:
+    case BitAnd:
+    case BitOr:
+    case BitXor:
+    case Shl:
+    case SShr:
+    case ZShr:
+    case RotR:
+    case RotL:
+    case Equal:
+    case NotEqual:
+    case LessThan:
+    case GreaterThan:
+    case Above:
+    case Below:
+    case AboveEqual:
+    case BelowEqual:
+    case EqualOrUnordered:
+    case CheckAdd:
+    case CheckSub:
+    case CheckMul:
+        return ValueKey(kind(), type(), child(0), child(1));
+    case Select:
+        return ValueKey(kind(), type(), child(0), child(1), child(2));
+    case Const32:
+        return ValueKey(Const32, type(), static_cast(asInt32()));
+    case Const64:
+        return ValueKey(Const64, type(), asInt64());
+    case ConstDouble:
+        return ValueKey(ConstDouble, type(), asDouble());
+    case ConstFloat:
+        return ValueKey(ConstFloat, type(), asFloat());
+    case ArgumentReg:
+        return ValueKey(
+            ArgumentReg, type(),
+            static_cast(as()->argumentReg().index()));
+    case SlotBase:
+        return ValueKey(
+            SlotBase, type(),
+            static_cast(as()->slot()->index()));
+    default:
+        return ValueKey();
+    }
+}
+
+void Value::performSubstitution()
+{
+    for (Value*& child : children()) {
+        while (child->opcode() == Identity)
+            child = child->child(0);
+    }
+}
+
+bool Value::isFree() const
+{
+    switch (opcode()) {
+    case Const32:
+    case Const64:
+    case ConstDouble:
+    case ConstFloat:
+    case Identity:
+    case Nop:
+        return true;
+    default:
+        return false;
+    }
+}
+
+void Value::dumpMeta(CommaPrinter&, PrintStream&) const
+{
+}
+
+Type Value::typeFor(Kind kind, Value* firstChild, Value* secondChild)
+{
+    switch (kind.opcode()) {
+    case Identity:
+    case Add:
+    case Sub:
+    case Mul:
+    case Div:
+    case UDiv:
+    case Mod:
+    case UMod:
+    case Neg:
+    case BitAnd:
+    case BitOr:
+    case BitXor:
+    case Shl:
+    case SShr:
+    case ZShr:
+    case RotR:
+    case RotL:
+    case Clz:
+    case Abs:
+    case Ceil:
+    case Floor:
+    case Sqrt:
+    case CheckAdd:
+    case CheckSub:
+    case CheckMul:
+        return firstChild->type();
+    case FramePointer:
+        return pointerType();
+    case SExt8:
+    case SExt16:
+    case Equal:
+    case NotEqual:
+    case LessThan:
+    case GreaterThan:
+    case LessEqual:
+    case GreaterEqual:
+    case Above:
+    case Below:
+    case AboveEqual:
+    case BelowEqual:
+    case EqualOrUnordered:
+        return Int32;
+    case Trunc:
+        return firstChild->type() == Int64 ? Int32 : Float;
+    case SExt32:
+    case ZExt32:
+        return Int64;
+    case FloatToDouble:
+    case IToD:
+        return Double;
+    case DoubleToFloat:
+    case IToF:
+        return Float;
+    case BitwiseCast:
+        switch (firstChild->type()) {
+        case Int64:
+            return Double;
+        case Double:
+            return Int64;
+        case Int32:
+            return Float;
+        case Float:
+            return Int32;
+        case Void:
+            ASSERT_NOT_REACHED();
+        }
+        return Void;
+    case Nop:
+    case Jump:
+    case Branch:
+    case Return:
+    case Oops:
+    case EntrySwitch:
+    case WasmBoundsCheck:
+        return Void;
+    case Select:
+        ASSERT(secondChild);
+        return secondChild->type();
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+}
+
+void Value::badKind(Kind kind, unsigned numArgs)
+{
+    dataLog("Bad kind ", kind, " with ", numArgs, " args.\n");
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Value.h b/Source/JavaScriptCore/b3/B3Value.h
new file mode 100644
index 000000000..ebe52ad3f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Value.h
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "B3Effects.h"
+#include "B3FrequentedBlock.h"
+#include "B3Kind.h"
+#include "B3Origin.h"
+#include "B3SparseCollection.h"
+#include "B3Type.h"
+#include "B3ValueKey.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class CheckValue;
+class InsertionSet;
+class PhiChildren;
+class Procedure;
+
+class JS_EXPORT_PRIVATE Value {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    typedef Vector AdjacencyList;
+
+    static const char* const dumpPrefix;
+
+    static bool accepts(Kind) { return true; }
+
+    virtual ~Value();
+
+    unsigned index() const { return m_index; }
+    
+    // Note that the kind is immutable, except for replacing values with:
+    // Identity, Nop, Oops, Jump, and Phi. See below for replaceWithXXX() methods.
+    Kind kind() const { return m_kind; }
+    
+    Opcode opcode() const { return kind().opcode(); }
+    
+    // It's good practice to mirror Kind methods here, so you can say value->isBlah()
+    // instead of value->kind().isBlah().
+    bool isChill() const { return kind().isChill(); }
+    bool traps() const { return kind().traps(); }
+
+    Origin origin() const { return m_origin; }
+    void setOrigin(Origin origin) { m_origin = origin; }
+    
+    Value*& child(unsigned index) { return m_children[index]; }
+    Value* child(unsigned index) const { return m_children[index]; }
+
+    Value*& lastChild() { return m_children.last(); }
+    Value* lastChild() const { return m_children.last(); }
+
+    unsigned numChildren() const { return m_children.size(); }
+
+    Type type() const { return m_type; }
+    void setType(Type type) { m_type = type; }
+
+    // This is useful when lowering. Note that this is only valid for non-void values.
+    Air::Arg::Type airType() const { return Air::Arg::typeForB3Type(type()); }
+    Air::Arg::Width airWidth() const { return Air::Arg::widthForB3Type(type()); }
+
+    AdjacencyList& children() { return m_children; } 
+    const AdjacencyList& children() const { return m_children; }
+
+    // If you want to replace all uses of this value with a different value, then replace this
+    // value with Identity. Then do a pass of performSubstitution() on all of the values that use
+    // this one. Usually we do all of this in one pass in pre-order, which ensures that the
+    // X->replaceWithIdentity() calls happen before the performSubstitution() calls on X's users.
+    void replaceWithIdentity(Value*);
+    
+    // It's often necessary to kill a value. It's tempting to replace the value with Nop or to
+    // just remove it. But unless you are sure that the value is Void, you will probably still
+    // have other values that use this one. Sure, you may kill those later, or you might not. This
+    // method lets you kill a value safely. It will replace Void values with Nop and non-Void
+    // values with Identities on bottom constants. For this reason, this takes a callback that is
+    // responsible for creating bottoms. There's a utility for this, see B3BottomProvider.h. You
+    // can also access that utility using replaceWithBottom(InsertionSet&, size_t).
+    template
+    void replaceWithBottom(const BottomProvider&);
+    
+    void replaceWithBottom(InsertionSet&, size_t index);
+
+    // Use this if you want to kill a value and you are sure that the value is Void.
+    void replaceWithNop();
+    
+    // Use this if you want to kill a value and you are sure that nobody is using it anymore.
+    void replaceWithNopIgnoringType();
+    
+    void replaceWithPhi();
+    
+    // These transformations are only valid for terminals.
+    void replaceWithJump(BasicBlock* owner, FrequentedBlock);
+    void replaceWithOops(BasicBlock* owner);
+    
+    // You can use this form if owners are valid. They're usually not valid.
+    void replaceWithJump(FrequentedBlock);
+    void replaceWithOops();
+
+    void dump(PrintStream&) const;
+    void deepDump(const Procedure*, PrintStream&) const;
+    
+    virtual void dumpSuccessors(const BasicBlock*, PrintStream&) const;
+
+    // This is how you cast Values. For example, if you want to do something provided that we have a
+    // ArgumentRegValue, you can do:
+    //
+    // if (ArgumentRegValue* argumentReg = value->as()) {
+    //     things
+    // }
+    //
+    // This will return null if this kind() != ArgumentReg. This works because this returns nullptr
+    // if T::accepts(kind()) returns false.
+    template
+    T* as();
+    template
+    const T* as() const;
+
+    // What follows are a bunch of helpers for inspecting and modifying values. Note that we have a
+    // bunch of different idioms for implementing such helpers. You can use virtual methods, and
+    // override from the various Value subclasses. You can put the method inside Value and make it
+    // non-virtual, and the implementation can switch on kind. The method could be inline or not.
+    // If a method is specific to some Value subclass, you could put it in the subclass, or you could
+    // put it on Value anyway. It's fine to pick whatever feels right, and we shouldn't restrict
+    // ourselves to any particular idiom.
+
+    bool isConstant() const;
+    bool isInteger() const;
+    
+    virtual Value* negConstant(Procedure&) const;
+    virtual Value* addConstant(Procedure&, int32_t other) const;
+    virtual Value* addConstant(Procedure&, const Value* other) const;
+    virtual Value* subConstant(Procedure&, const Value* other) const;
+    virtual Value* mulConstant(Procedure&, const Value* other) const;
+    virtual Value* checkAddConstant(Procedure&, const Value* other) const;
+    virtual Value* checkSubConstant(Procedure&, const Value* other) const;
+    virtual Value* checkMulConstant(Procedure&, const Value* other) const;
+    virtual Value* checkNegConstant(Procedure&) const;
+    virtual Value* divConstant(Procedure&, const Value* other) const; // This chooses Div semantics for integers.
+    virtual Value* uDivConstant(Procedure&, const Value* other) const;
+    virtual Value* modConstant(Procedure&, const Value* other) const; // This chooses Mod semantics.
+    virtual Value* uModConstant(Procedure&, const Value* other) const;
+    virtual Value* bitAndConstant(Procedure&, const Value* other) const;
+    virtual Value* bitOrConstant(Procedure&, const Value* other) const;
+    virtual Value* bitXorConstant(Procedure&, const Value* other) const;
+    virtual Value* shlConstant(Procedure&, const Value* other) const;
+    virtual Value* sShrConstant(Procedure&, const Value* other) const;
+    virtual Value* zShrConstant(Procedure&, const Value* other) const;
+    virtual Value* rotRConstant(Procedure&, const Value* other) const;
+    virtual Value* rotLConstant(Procedure&, const Value* other) const;
+    virtual Value* bitwiseCastConstant(Procedure&) const;
+    virtual Value* iToDConstant(Procedure&) const;
+    virtual Value* iToFConstant(Procedure&) const;
+    virtual Value* doubleToFloatConstant(Procedure&) const;
+    virtual Value* floatToDoubleConstant(Procedure&) const;
+    virtual Value* absConstant(Procedure&) const;
+    virtual Value* ceilConstant(Procedure&) const;
+    virtual Value* floorConstant(Procedure&) const;
+    virtual Value* sqrtConstant(Procedure&) const;
+
+    virtual TriState equalConstant(const Value* other) const;
+    virtual TriState notEqualConstant(const Value* other) const;
+    virtual TriState lessThanConstant(const Value* other) const;
+    virtual TriState greaterThanConstant(const Value* other) const;
+    virtual TriState lessEqualConstant(const Value* other) const;
+    virtual TriState greaterEqualConstant(const Value* other) const;
+    virtual TriState aboveConstant(const Value* other) const;
+    virtual TriState belowConstant(const Value* other) const;
+    virtual TriState aboveEqualConstant(const Value* other) const;
+    virtual TriState belowEqualConstant(const Value* other) const;
+    virtual TriState equalOrUnorderedConstant(const Value* other) const;
+    
+    // If the value is a comparison then this returns the inverted form of that comparison, if
+    // possible. It can be impossible for double comparisons, where for example LessThan and
+    // GreaterEqual behave differently. If this returns a value, it is a new value, which must be
+    // either inserted into some block or deleted.
+    Value* invertedCompare(Procedure&) const;
+
+    bool hasInt32() const;
+    int32_t asInt32() const;
+    bool isInt32(int32_t) const;
+    
+    bool hasInt64() const;
+    int64_t asInt64() const;
+    bool isInt64(int64_t) const;
+
+    bool hasInt() const;
+    int64_t asInt() const;
+    bool isInt(int64_t value) const;
+
+    bool hasIntPtr() const;
+    intptr_t asIntPtr() const;
+    bool isIntPtr(intptr_t) const;
+
+    bool hasDouble() const;
+    double asDouble() const;
+    bool isEqualToDouble(double) const; // We say "isEqualToDouble" because "isDouble" would be a bit equality.
+
+    bool hasFloat() const;
+    float asFloat() const;
+
+    bool hasNumber() const;
+    template bool isRepresentableAs() const;
+    template T asNumber() const;
+
+    // Booleans in B3 are Const32(0) or Const32(1). So this is true if the type is Int32 and the only
+    // possible return values are 0 or 1. It's OK for this method to conservatively return false.
+    bool returnsBool() const;
+
+    bool isNegativeZero() const;
+
+    bool isRounded() const;
+
+    TriState asTriState() const;
+    bool isLikeZero() const { return asTriState() == FalseTriState; }
+    bool isLikeNonZero() const { return asTriState() == TrueTriState; }
+
+    Effects effects() const;
+
+    // This returns a ValueKey that describes that this Value returns when it executes. Returns an
+    // empty ValueKey if this Value is impure. Note that an operation that returns Void could still
+    // have a non-empty ValueKey. This happens for example with Check operations.
+    ValueKey key() const;
+
+    // Makes sure that none of the children are Identity's. If a child points to Identity, this will
+    // repoint it at the Identity's child. For simplicity, this will follow arbitrarily long chains
+    // of Identity's.
+    void performSubstitution();
+    
+    // Free values are those whose presence is guaranteed not to hurt code. We consider constants,
+    // Identities, and Nops to be free. Constants are free because we hoist them to an optimal place.
+    // Identities and Nops are free because we remove them.
+    bool isFree() const;
+
+    // Walk the ancestors of this value (i.e. the graph of things it transitively uses). This
+    // either walks phis or not, depending on whether PhiChildren is null. Your callback gets
+    // called with the signature:
+    //
+    //     (Value*) -> WalkStatus
+    enum WalkStatus {
+        Continue,
+        IgnoreChildren,
+        Stop
+    };
+    template
+    void walk(const Functor& functor, PhiChildren* = nullptr);
+
+protected:
+    virtual Value* cloneImpl() const;
+    
+    virtual void dumpChildren(CommaPrinter&, PrintStream&) const;
+    virtual void dumpMeta(CommaPrinter&, PrintStream&) const;
+
+private:
+    friend class Procedure;
+    friend class SparseCollection;
+
+    // Checks that this kind is valid for use with B3::Value.
+    ALWAYS_INLINE static void checkKind(Kind kind, unsigned numArgs)
+    {
+        switch (kind.opcode()) {
+        case FramePointer:
+        case Nop:
+        case Phi:
+        case Jump:
+        case Oops:
+        case EntrySwitch:
+            if (UNLIKELY(numArgs))
+                badKind(kind, numArgs);
+            break;
+        case Return:
+            if (UNLIKELY(numArgs > 1))
+                badKind(kind, numArgs);
+            break;
+        case Identity:
+        case Neg:
+        case Clz:
+        case Abs:
+        case Ceil:
+        case Floor:
+        case Sqrt:
+        case SExt8:
+        case SExt16:
+        case Trunc:
+        case SExt32:
+        case ZExt32:
+        case FloatToDouble:
+        case IToD:
+        case DoubleToFloat:
+        case IToF:
+        case BitwiseCast:
+        case Branch:
+            if (UNLIKELY(numArgs != 1))
+                badKind(kind, numArgs);
+            break;
+        case Add:
+        case Sub:
+        case Mul:
+        case Div:
+        case UDiv:
+        case Mod:
+        case UMod:
+        case BitAnd:
+        case BitOr:
+        case BitXor:
+        case Shl:
+        case SShr:
+        case ZShr:
+        case RotR:
+        case RotL:
+        case Equal:
+        case NotEqual:
+        case LessThan:
+        case GreaterThan:
+        case LessEqual:
+        case GreaterEqual:
+        case Above:
+        case Below:
+        case AboveEqual:
+        case BelowEqual:
+        case EqualOrUnordered:
+            if (UNLIKELY(numArgs != 2))
+                badKind(kind, numArgs);
+            break;
+        case Select:
+            if (UNLIKELY(numArgs != 3))
+                badKind(kind, numArgs);
+            break;
+        default:
+            badKind(kind, numArgs);
+            break;
+        }
+    }
+
+protected:
+    enum CheckedOpcodeTag { CheckedOpcode };
+
+    Value(const Value&) = default;
+    Value& operator=(const Value&) = default;
+    
+    // Instantiate values via Procedure.
+    // This form requires specifying the type explicitly:
+    template
+    explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin, Value* firstChild, Arguments... arguments)
+        : m_kind(kind)
+        , m_type(type)
+        , m_origin(origin)
+        , m_children{ firstChild, arguments... }
+    {
+    }
+    // This form is for specifying the type explicitly when the opcode has no children:
+    explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin)
+        : m_kind(kind)
+        , m_type(type)
+        , m_origin(origin)
+    {
+    }
+    // This form is for those opcodes that can infer their type from the opcode and first child:
+    template
+    explicit Value(CheckedOpcodeTag, Kind kind, Origin origin, Value* firstChild)
+        : m_kind(kind)
+        , m_type(typeFor(kind, firstChild))
+        , m_origin(origin)
+        , m_children{ firstChild }
+    {
+    }
+    // This form is for those opcodes that can infer their type from the opcode and first and second child:
+    template
+    explicit Value(CheckedOpcodeTag, Kind kind, Origin origin, Value* firstChild, Value* secondChild, Arguments... arguments)
+        : m_kind(kind)
+        , m_type(typeFor(kind, firstChild, secondChild))
+        , m_origin(origin)
+        , m_children{ firstChild, secondChild, arguments... }
+    {
+    }
+    // This form is for those opcodes that can infer their type from the opcode alone, and that don't
+    // take any arguments:
+    explicit Value(CheckedOpcodeTag, Kind kind, Origin origin)
+        : m_kind(kind)
+        , m_type(typeFor(kind, nullptr))
+        , m_origin(origin)
+    {
+    }
+    // Use this form for varargs.
+    explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin, const AdjacencyList& children)
+        : m_kind(kind)
+        , m_type(type)
+        , m_origin(origin)
+        , m_children(children)
+    {
+    }
+    explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin, AdjacencyList&& children)
+        : m_kind(kind)
+        , m_type(type)
+        , m_origin(origin)
+        , m_children(WTFMove(children))
+    {
+    }
+
+    // This is the constructor you end up actually calling, if you're instantiating Value
+    // directly.
+    template
+        explicit Value(Kind kind, Type type, Origin origin)
+        : Value(CheckedOpcode, kind, type, origin)
+    {
+        checkKind(kind, 0);
+    }
+    template
+        explicit Value(Kind kind, Type type, Origin origin, Value* firstChild, Arguments&&... arguments)
+        : Value(CheckedOpcode, kind, type, origin, firstChild, std::forward(arguments)...)
+    {
+        checkKind(kind, 1 + sizeof...(arguments));
+    }
+    template
+        explicit Value(Kind kind, Type type, Origin origin, const AdjacencyList& children)
+        : Value(CheckedOpcode, kind, type, origin, children)
+    {
+        checkKind(kind, children.size());
+    }
+    template
+        explicit Value(Kind kind, Type type, Origin origin, AdjacencyList&& children)
+        : Value(CheckedOpcode, kind, type, origin, WTFMove(children))
+    {
+        checkKind(kind, m_children.size());
+    }
+    template
+        explicit Value(Kind kind, Origin origin, Arguments&&... arguments)
+        : Value(CheckedOpcode, kind, origin, std::forward(arguments)...)
+    {
+        checkKind(kind, sizeof...(arguments));
+    }
+
+private:
+    friend class CheckValue; // CheckValue::convertToAdd() modifies m_kind.
+    
+    static Type typeFor(Kind, Value* firstChild, Value* secondChild = nullptr);
+
+    // This group of fields is arranged to fit in 64 bits.
+protected:
+    unsigned m_index { UINT_MAX };
+private:
+    Kind m_kind;
+    Type m_type;
+    
+    Origin m_origin;
+    AdjacencyList m_children;
+
+    JS_EXPORT_PRIVATE NO_RETURN_DUE_TO_CRASH static void badKind(Kind, unsigned);
+
+public:
+    BasicBlock* owner { nullptr }; // computed by Procedure::resetValueOwners().
+};
+
+class DeepValueDump {
+public:
+    DeepValueDump(const Procedure* proc, const Value* value)
+        : m_proc(proc)
+        , m_value(value)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_value)
+            m_value->deepDump(m_proc, out);
+        else
+            out.print("");
+    }
+
+private:
+    const Procedure* m_proc;
+    const Value* m_value;
+};
+
+inline DeepValueDump deepDump(const Procedure& proc, const Value* value)
+{
+    return DeepValueDump(&proc, value);
+}
+inline DeepValueDump deepDump(const Value* value)
+{
+    return DeepValueDump(nullptr, value);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ValueInlines.h b/Source/JavaScriptCore/b3/B3ValueInlines.h
new file mode 100644
index 000000000..57f93d60a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueInlines.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CheckValue.h"
+#include "B3Const32Value.h"
+#include "B3Const64Value.h"
+#include "B3ConstDoubleValue.h"
+#include "B3ConstFloatValue.h"
+#include "B3PatchpointValue.h"
+#include "B3PhiChildren.h"
+#include "B3Procedure.h"
+#include "B3Value.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+template
+void Value::replaceWithBottom(const BottomProvider& bottomProvider)
+{
+    if (m_type == Void) {
+        replaceWithNop();
+        return;
+    }
+    
+    if (isConstant())
+        return;
+    
+    replaceWithIdentity(bottomProvider(m_origin, m_type));
+}
+
+template
+inline T* Value::as()
+{
+    if (T::accepts(kind()))
+        return static_cast(this);
+    return nullptr;
+}
+
+template
+inline const T* Value::as() const
+{
+    return const_cast(this)->as();
+}
+
+inline bool Value::isConstant() const
+{
+    return B3::isConstant(opcode());
+}
+
+inline bool Value::isInteger() const
+{
+    return type() == Int32 || type() == Int64;
+}
+
+inline bool Value::hasInt32() const
+{
+    return !!as();
+}
+
+inline int32_t Value::asInt32() const
+{
+    return as()->value();
+}
+
+inline bool Value::isInt32(int32_t value) const
+{
+    return hasInt32() && asInt32() == value;
+}
+
+inline bool Value::hasInt64() const
+{
+    return !!as();
+}
+
+inline int64_t Value::asInt64() const
+{
+    return as()->value();
+}
+
+inline bool Value::isInt64(int64_t value) const
+{
+    return hasInt64() && asInt64() == value;
+}
+
+inline bool Value::hasInt() const
+{
+    return hasInt32() || hasInt64();
+}
+
+inline int64_t Value::asInt() const
+{
+    return hasInt32() ? asInt32() : asInt64();
+}
+
+inline bool Value::isInt(int64_t value) const
+{
+    return hasInt() && asInt() == value;
+}
+
+inline bool Value::hasIntPtr() const
+{
+    if (is64Bit())
+        return hasInt64();
+    return hasInt32();
+}
+
+inline intptr_t Value::asIntPtr() const
+{
+    if (is64Bit())
+        return asInt64();
+    return asInt32();
+}
+
+inline bool Value::isIntPtr(intptr_t value) const
+{
+    return hasIntPtr() && asIntPtr() == value;
+}
+
+inline bool Value::hasDouble() const
+{
+    return !!as();
+}
+
+inline double Value::asDouble() const
+{
+    return as()->value();
+}
+
+inline bool Value::isEqualToDouble(double value) const
+{
+    return hasDouble() && asDouble() == value;
+}
+
+inline bool Value::hasFloat() const
+{
+    return !!as();
+}
+
+inline float Value::asFloat() const
+{
+    return as()->value();
+}
+
+inline bool Value::hasNumber() const
+{
+    return hasInt() || hasDouble() || hasFloat();
+}
+
+inline bool Value::isNegativeZero() const
+{
+    if (hasDouble()) {
+        double value = asDouble();
+        return !value && std::signbit(value);
+    }
+    if (hasFloat()) {
+        float value = asFloat();
+        return !value && std::signbit(value);
+    }
+    return false;
+}
+
+template
+inline bool Value::isRepresentableAs() const
+{
+    switch (opcode()) {
+    case Const32:
+        return B3::isRepresentableAs(asInt32());
+    case Const64:
+        return B3::isRepresentableAs(asInt64());
+    case ConstDouble:
+        return B3::isRepresentableAs(asDouble());
+    case ConstFloat:
+        return B3::isRepresentableAs(asFloat());
+    default:
+        return false;
+    }
+}
+
+template
+inline T Value::asNumber() const
+{
+    switch (opcode()) {
+    case Const32:
+        return static_cast(asInt32());
+    case Const64:
+        return static_cast(asInt64());
+    case ConstDouble:
+        return static_cast(asDouble());
+    case ConstFloat:
+        return static_cast(asFloat());
+    default:
+        return T();
+    }
+}
+
+template
+void Value::walk(const Functor& functor, PhiChildren* phiChildren)
+{
+    GraphNodeWorklist worklist;
+    worklist.push(this);
+    while (Value* value = worklist.pop()) {
+        WalkStatus status = functor(value);
+        switch (status) {
+        case Continue:
+            if (value->opcode() == Phi) {
+                if (phiChildren)
+                    worklist.pushAll(phiChildren->at(value).values());
+            } else
+                worklist.pushAll(value->children());
+            break;
+        case IgnoreChildren:
+            break;
+        case Stop:
+            return;
+        }
+    }
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ValueKey.cpp b/Source/JavaScriptCore/b3/B3ValueKey.cpp
new file mode 100644
index 000000000..10edff3c4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueKey.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ValueKey.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ArgumentRegValue.h"
+#include "B3ProcedureInlines.h"
+#include "B3SlotBaseValue.h"
+#include "B3ValueInlines.h"
+#include "B3ValueKeyInlines.h"
+
+namespace JSC { namespace B3 {
+
+ValueKey ValueKey::intConstant(Type type, int64_t value)
+{
+    switch (type) {
+    case Int32:
+        return ValueKey(Const32, Int32, value);
+    case Int64:
+        return ValueKey(Const64, Int64, value);
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return ValueKey();
+    }
+}
+
+void ValueKey::dump(PrintStream& out) const
+{
+    out.print(m_type, " ", m_kind, "(", u.indices[0], ", ", u.indices[1], ", ", u.indices[2], ")");
+}
+
+Value* ValueKey::materialize(Procedure& proc, Origin origin) const
+{
+    switch (opcode()) {
+    case FramePointer:
+        return proc.add(kind(), type(), origin);
+    case Identity:
+    case Sqrt:
+    case SExt8:
+    case SExt16:
+    case SExt32:
+    case ZExt32:
+    case Clz:
+    case Trunc:
+    case IToD:
+    case IToF:
+    case FloatToDouble:
+    case DoubleToFloat:
+    case Check:
+        return proc.add(kind(), type(), origin, child(proc, 0));
+    case Add:
+    case Sub:
+    case Mul:
+    case Div:
+    case UDiv:
+    case Mod:
+    case UMod:
+    case BitAnd:
+    case BitOr:
+    case BitXor:
+    case Shl:
+    case SShr:
+    case ZShr:
+    case RotR:
+    case RotL:
+    case Equal:
+    case NotEqual:
+    case LessThan:
+    case GreaterThan:
+    case Above:
+    case Below:
+    case AboveEqual:
+    case BelowEqual:
+        return proc.add(kind(), type(), origin, child(proc, 0), child(proc, 1));
+    case Select:
+        return proc.add(kind(), type(), origin, child(proc, 0), child(proc, 1), child(proc, 2));
+    case Const32:
+        return proc.add(origin, static_cast(value()));
+    case Const64:
+        return proc.add(origin, value());
+    case ConstDouble:
+        return proc.add(origin, doubleValue());
+    case ConstFloat:
+        return proc.add(origin, floatValue());
+    case ArgumentReg:
+        return proc.add(origin, Reg::fromIndex(static_cast(value())));
+    case SlotBase:
+        return proc.add(origin, proc.stackSlots()[value()]);
+    default:
+        return nullptr;
+    }
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3ValueKey.h b/Source/JavaScriptCore/b3/B3ValueKey.h
new file mode 100644
index 000000000..18b092c59
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueKey.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include "B3Kind.h"
+#include "B3Origin.h"
+#include "B3Type.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+class Value;
+
+// ValueKeys are useful for CSE. They abstractly describe the value that a Value returns when it
+// executes. Any Value that has the same ValueKey is guaranteed to return the same value, provided
+// that they return a non-empty ValueKey. Operations that have effects, or that can have their
+// behavior affected by other operations' effects, will return an empty ValueKey. You have to use
+// other mechanisms for doing CSE for impure operations.
+
+class ValueKey {
+public:
+    ValueKey()
+    {
+    }
+
+    ValueKey(Kind kind, Type type)
+        : m_kind(kind)
+        , m_type(type)
+    {
+    }
+
+    ValueKey(Kind, Type, Value* child);
+
+    ValueKey(Kind, Type, Value* left, Value* right);
+
+    ValueKey(Kind, Type, Value* a, Value* b, Value* c);
+
+    ValueKey(Kind kind, Type type, int64_t value)
+        : m_kind(kind)
+        , m_type(type)
+    {
+        u.value = value;
+    }
+
+    ValueKey(Kind kind, Type type, double value)
+        : m_kind(kind)
+        , m_type(type)
+    {
+        u.doubleValue = value;
+    }
+
+    ValueKey(Kind kind, Type type, float value)
+        : m_kind(kind)
+        , m_type(type)
+    {
+        u.floatValue = value;
+    }
+
+    static ValueKey intConstant(Type type, int64_t value);
+
+    Kind kind() const { return m_kind; }
+    Opcode opcode() const { return kind().opcode(); }
+    Type type() const { return m_type; }
+    unsigned childIndex(unsigned index) const { return u.indices[index]; }
+    Value* child(Procedure&, unsigned index) const;
+    int64_t value() const { return u.value; }
+    double doubleValue() const { return u.doubleValue; }
+    double floatValue() const { return u.floatValue; }
+
+    bool operator==(const ValueKey& other) const
+    {
+        return m_kind == other.m_kind
+            && m_type == other.m_type
+            && u == other.u;
+    }
+
+    bool operator!=(const ValueKey& other) const
+    {
+        return !(*this == other);
+    }
+
+    unsigned hash() const
+    {
+        return m_kind.hash() + m_type + WTF::IntHash::hash(u.indices[0]) + u.indices[1] + u.indices[2];
+    }
+
+    explicit operator bool() const { return *this != ValueKey(); }
+
+    void dump(PrintStream&) const;
+
+    bool canMaterialize() const
+    {
+        if (!*this)
+            return false;
+        switch (opcode()) {
+        case CheckAdd:
+        case CheckSub:
+        case CheckMul:
+            return false;
+        default:
+            return true;
+        }
+    }
+
+    bool isConstant() const
+    {
+        return B3::isConstant(opcode());
+    }
+
+    // Attempts to materialize the Value for this ValueKey. May return nullptr if the value cannot
+    // be materialized. This happens for CheckAdd and friends. You can use canMaterialize() to check
+    // if your key is materializable.
+    Value* materialize(Procedure&, Origin) const;
+
+    ValueKey(WTF::HashTableDeletedValueType)
+        : m_type { Int32 }
+    {
+    }
+
+    bool isHashTableDeletedValue() const
+    {
+        return *this == ValueKey(WTF::HashTableDeletedValue);
+    }
+        
+private:
+    Kind m_kind;
+    Type m_type { Void };
+    union U {
+        unsigned indices[3];
+        int64_t value;
+        double doubleValue;
+        float floatValue;
+
+        U()
+        {
+            indices[0] = 0;
+            indices[1] = 0;
+            indices[2] = 0;
+        }
+
+        bool operator==(const U& other) const
+        {
+            return indices[0] == other.indices[0]
+                && indices[1] == other.indices[1]
+                && indices[2] == other.indices[2];
+        }
+    } u;
+};
+
+struct ValueKeyHash {
+    static unsigned hash(const ValueKey& key) { return key.hash(); }
+    static bool equal(const ValueKey& a, const ValueKey& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::B3::ValueKeyHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : public SimpleClassHashTraits {
+    static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ValueKeyInlines.h b/Source/JavaScriptCore/b3/B3ValueKeyInlines.h
new file mode 100644
index 000000000..14158d501
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueKeyInlines.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+#include "B3Value.h"
+#include "B3ValueKey.h"
+
+namespace JSC { namespace B3 {
+
+inline ValueKey::ValueKey(Kind kind, Type type, Value* child)
+    : m_kind(kind)
+    , m_type(type)
+{
+    u.indices[0] = child->index();
+}
+
+inline ValueKey::ValueKey(Kind kind, Type type, Value* left, Value* right)
+    : m_kind(kind)
+    , m_type(type)
+{
+    u.indices[0] = left->index();
+    u.indices[1] = right->index();
+}
+
+inline ValueKey::ValueKey(Kind kind, Type type, Value* a, Value* b, Value* c)
+    : m_kind(kind)
+    , m_type(type)
+{
+    u.indices[0] = a->index();
+    u.indices[1] = b->index();
+    u.indices[2] = c->index();
+}
+
+inline Value* ValueKey::child(Procedure& proc, unsigned index) const
+{
+    return proc.values()[index];
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ValueRep.cpp b/Source/JavaScriptCore/b3/B3ValueRep.cpp
new file mode 100644
index 000000000..9888d228f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueRep.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ValueRep.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AssemblyHelpers.h"
+#include "JSCInlines.h"
+
+namespace JSC { namespace B3 {
+
+void ValueRep::addUsedRegistersTo(RegisterSet& set) const
+{
+    switch (m_kind) {
+    case WarmAny:
+    case ColdAny:
+    case LateColdAny:
+    case SomeRegister:
+    case SomeEarlyRegister:
+    case Constant:
+        return;
+    case LateRegister:
+    case Register:
+        set.set(reg());
+        return;
+    case Stack:
+    case StackArgument:
+        set.set(MacroAssembler::stackPointerRegister);
+        set.set(GPRInfo::callFrameRegister);
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+RegisterSet ValueRep::usedRegisters() const
+{
+    RegisterSet result;
+    addUsedRegistersTo(result);
+    return result;
+}
+
+void ValueRep::dump(PrintStream& out) const
+{
+    out.print(m_kind);
+    switch (m_kind) {
+    case WarmAny:
+    case ColdAny:
+    case LateColdAny:
+    case SomeRegister:
+    case SomeEarlyRegister:
+        return;
+    case LateRegister:
+    case Register:
+        out.print("(", reg(), ")");
+        return;
+    case Stack:
+        out.print("(", offsetFromFP(), ")");
+        return;
+    case StackArgument:
+        out.print("(", offsetFromSP(), ")");
+        return;
+    case Constant:
+        out.print("(", value(), ")");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void ValueRep::emitRestore(AssemblyHelpers& jit, Reg reg) const
+{
+    if (reg.isGPR()) {
+        switch (kind()) {
+        case LateRegister:
+        case Register:
+            if (isGPR())
+                jit.move(gpr(), reg.gpr());
+            else
+                jit.moveDoubleTo64(fpr(), reg.gpr());
+            break;
+        case Stack:
+            jit.load64(AssemblyHelpers::Address(GPRInfo::callFrameRegister, offsetFromFP()), reg.gpr());
+            break;
+        case Constant:
+            jit.move(AssemblyHelpers::TrustedImm64(value()), reg.gpr());
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+        return;
+    }
+    
+    switch (kind()) {
+    case LateRegister:
+    case Register:
+        if (isGPR())
+            jit.move64ToDouble(gpr(), reg.fpr());
+        else
+            jit.moveDouble(fpr(), reg.fpr());
+        break;
+    case Stack:
+        jit.loadDouble(AssemblyHelpers::Address(GPRInfo::callFrameRegister, offsetFromFP()), reg.fpr());
+        break;
+    case Constant:
+        jit.move(AssemblyHelpers::TrustedImm64(value()), jit.scratchRegister());
+        jit.move64ToDouble(jit.scratchRegister(), reg.fpr());
+        break;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        break;
+    }
+}
+
+ValueRecovery ValueRep::recoveryForJSValue() const
+{
+    switch (kind()) {
+    case LateRegister:
+    case Register:
+        return ValueRecovery::inGPR(gpr(), DataFormatJS);
+    case Stack:
+        RELEASE_ASSERT(!(offsetFromFP() % sizeof(EncodedJSValue)));
+        return ValueRecovery::displacedInJSStack(
+            VirtualRegister(offsetFromFP() / sizeof(EncodedJSValue)),
+            DataFormatJS);
+    case Constant:
+        return ValueRecovery::constant(JSValue::decode(value()));
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return { };
+    }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, ValueRep::Kind kind)
+{
+    switch (kind) {
+    case ValueRep::WarmAny:
+        out.print("WarmAny");
+        return;
+    case ValueRep::ColdAny:
+        out.print("ColdAny");
+        return;
+    case ValueRep::LateColdAny:
+        out.print("LateColdAny");
+        return;
+    case ValueRep::SomeRegister:
+        out.print("SomeRegister");
+        return;
+    case ValueRep::SomeEarlyRegister:
+        out.print("SomeEarlyRegister");
+        return;
+    case ValueRep::Register:
+        out.print("Register");
+        return;
+    case ValueRep::LateRegister:
+        out.print("LateRegister");
+        return;
+    case ValueRep::Stack:
+        out.print("Stack");
+        return;
+    case ValueRep::StackArgument:
+        out.print("StackArgument");
+        return;
+    case ValueRep::Constant:
+        out.print("Constant");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ValueRep.h b/Source/JavaScriptCore/b3/B3ValueRep.h
new file mode 100644
index 000000000..5f9635e7a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueRep.h
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "JSCJSValue.h"
+#include "Reg.h"
+#include "RegisterSet.h"
+#include "ValueRecovery.h"
+#include 
+
+namespace JSC {
+
+class AssemblyHelpers;
+
+namespace B3 {
+
+// We use this class to describe value representations at stackmaps. It's used both to force a
+// representation and to get the representation. When the B3 client forces a representation, we say
+// that it's an input. When B3 tells the client what representation it picked, we say that it's an
+// output.
+
+class ValueRep {
+public:
+    enum Kind {
+        // As an input representation, this means that B3 can pick any representation. As an output
+        // representation, this means that we don't know. This will only arise as an output
+        // representation for the active arguments of Check/CheckAdd/CheckSub/CheckMul.
+        WarmAny,
+
+        // Same as WarmAny, but implies that the use is cold. A cold use is not counted as a use for
+        // computing the priority of the used temporary.
+        ColdAny,
+
+        // Same as ColdAny, but also implies that the use occurs after all other effects of the stackmap
+        // value.
+        LateColdAny,
+
+        // As an input representation, this means that B3 should pick some register. It could be a
+        // register that this claims to clobber!
+        SomeRegister,
+
+        // As an input representation, this tells us that B3 should pick some register, but implies
+        // that the def happens before any of the effects of the stackmap. This is only valid for
+        // the result constraint of a Patchpoint.
+        SomeEarlyRegister,
+
+        // As an input representation, this forces a particular register. As an output
+        // representation, this tells us what register B3 picked.
+        Register,
+
+        // As an input representation, this forces a particular register and states that
+        // the register is used late. This means that the register is used after the result
+        // is defined (i.e, the result will interfere with this as an input).
+        // It's not a valid output representation.
+        LateRegister,
+
+        // As an output representation, this tells us what stack slot B3 picked. It's not a valid
+        // input representation.
+        Stack,
+
+        // As an input representation, this forces the value to end up in the argument area at some
+        // offset.
+        StackArgument,
+
+        // As an output representation, this tells us that B3 constant-folded the value.
+        Constant
+    };
+    
+    ValueRep()
+        : m_kind(WarmAny)
+    {
+    }
+
+    explicit ValueRep(Reg reg)
+        : m_kind(Register)
+    {
+        u.reg = reg;
+    }
+
+    ValueRep(Kind kind)
+        : m_kind(kind)
+    {
+        ASSERT(kind == WarmAny || kind == ColdAny || kind == LateColdAny || kind == SomeRegister || kind == SomeEarlyRegister);
+    }
+
+    static ValueRep reg(Reg reg)
+    {
+        return ValueRep(reg);
+    }
+
+    static ValueRep lateReg(Reg reg)
+    {
+        ValueRep result(reg);
+        result.m_kind = LateRegister;
+        return result;
+    }
+
+    static ValueRep stack(intptr_t offsetFromFP)
+    {
+        ValueRep result;
+        result.m_kind = Stack;
+        result.u.offsetFromFP = offsetFromFP;
+        return result;
+    }
+
+    static ValueRep stackArgument(intptr_t offsetFromSP)
+    {
+        ValueRep result;
+        result.m_kind = StackArgument;
+        result.u.offsetFromSP = offsetFromSP;
+        return result;
+    }
+
+    static ValueRep constant(int64_t value)
+    {
+        ValueRep result;
+        result.m_kind = Constant;
+        result.u.value = value;
+        return result;
+    }
+
+    static ValueRep constantDouble(double value)
+    {
+        return ValueRep::constant(bitwise_cast(value));
+    }
+
+    Kind kind() const { return m_kind; }
+
+    bool operator==(const ValueRep& other) const
+    {
+        if (kind() != other.kind())
+            return false;
+        switch (kind()) {
+        case LateRegister:
+        case Register:
+            return u.reg == other.u.reg;
+        case Stack:
+            return u.offsetFromFP == other.u.offsetFromFP;
+        case StackArgument:
+            return u.offsetFromSP == other.u.offsetFromSP;
+        case Constant:
+            return u.value == other.u.value;
+        default:
+            return true;
+        }
+    }
+
+    bool operator!=(const ValueRep& other) const
+    {
+        return !(*this == other);
+    }
+
+    explicit operator bool() const { return kind() != WarmAny; }
+
+    bool isAny() const { return kind() == WarmAny || kind() == ColdAny || kind() == LateColdAny; }
+
+    bool isReg() const { return kind() == Register || kind() == LateRegister; }
+    
+    Reg reg() const
+    {
+        ASSERT(isReg());
+        return u.reg;
+    }
+
+    bool isGPR() const { return isReg() && reg().isGPR(); }
+    bool isFPR() const { return isReg() && reg().isFPR(); }
+
+    GPRReg gpr() const { return reg().gpr(); }
+    FPRReg fpr() const { return reg().fpr(); }
+
+    bool isStack() const { return kind() == Stack; }
+
+    intptr_t offsetFromFP() const
+    {
+        ASSERT(isStack());
+        return u.offsetFromFP;
+    }
+
+    bool isStackArgument() const { return kind() == StackArgument; }
+
+    intptr_t offsetFromSP() const
+    {
+        ASSERT(isStackArgument());
+        return u.offsetFromSP;
+    }
+
+    bool isConstant() const { return kind() == Constant; }
+
+    int64_t value() const
+    {
+        ASSERT(isConstant());
+        return u.value;
+    }
+
+    double doubleValue() const
+    {
+        return bitwise_cast(value());
+    }
+
+    ValueRep withOffset(intptr_t offset) const
+    {
+        switch (kind()) {
+        case Stack:
+            return stack(offsetFromFP() + offset);
+        case StackArgument:
+            return stackArgument(offsetFromSP() + offset);
+        default:
+            return *this;
+        }
+    }
+
+    void addUsedRegistersTo(RegisterSet&) const;
+    
+    RegisterSet usedRegisters() const;
+
+    // Get the used registers for a vector of ValueReps.
+    template
+    static RegisterSet usedRegisters(const VectorType& vector)
+    {
+        RegisterSet result;
+        for (const ValueRep& value : vector)
+            value.addUsedRegistersTo(result);
+        return result;
+    }
+
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+    // This has a simple contract: it emits code to restore the value into the given register. This
+    // will work even if it requires moving between bits a GPR and a FPR.
+    void emitRestore(AssemblyHelpers&, Reg) const;
+
+    // Computes the ValueRecovery assuming that the Value* was for a JSValue (i.e. Int64).
+    // NOTE: We should avoid putting JSValue-related methods in B3, but this was hard to avoid
+    // because some parts of JSC use ValueRecovery like a general "where my bits at" object, almost
+    // exactly like ValueRep.
+    ValueRecovery recoveryForJSValue() const;
+
+private:
+    Kind m_kind;
+    union U {
+        Reg reg;
+        intptr_t offsetFromFP;
+        intptr_t offsetFromSP;
+        int64_t value;
+
+        U()
+        {
+            memset(this, 0, sizeof(*this));
+        }
+    } u;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::B3::ValueRep::Kind);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Variable.cpp b/Source/JavaScriptCore/b3/B3Variable.cpp
new file mode 100644
index 000000000..2314ee2dd
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Variable.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Variable.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+Variable::~Variable()
+{
+}
+
+void Variable::dump(PrintStream& out) const
+{
+    out.print("var", m_index);
+}
+
+void Variable::deepDump(PrintStream& out) const
+{
+    out.print(m_type, " var", m_index);
+}
+
+Variable::Variable(Type type)
+    : m_type(type)
+{
+    ASSERT(type != Void);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Variable.h b/Source/JavaScriptCore/b3/B3Variable.h
new file mode 100644
index 000000000..f4d610ff7
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Variable.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3SparseCollection.h"
+#include "B3Type.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class Variable {
+    WTF_MAKE_NONCOPYABLE(Variable);
+    WTF_MAKE_FAST_ALLOCATED;
+
+public:
+    ~Variable();
+
+    Type type() const { return m_type; }
+    unsigned index() const { return m_index; }
+
+    void dump(PrintStream&) const;
+    void deepDump(PrintStream&) const;
+
+private:
+    friend class Procedure;
+    friend class SparseCollection;
+
+    Variable(Type);
+    
+    unsigned m_index;
+    Type m_type;
+};
+
+class DeepVariableDump {
+public:
+    DeepVariableDump(const Variable* variable)
+        : m_variable(variable)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_variable)
+            m_variable->deepDump(out);
+        else
+            out.print("");
+    }
+
+private:
+    const Variable* m_variable;
+};
+
+inline DeepVariableDump deepDump(const Variable* variable)
+{
+    return DeepVariableDump(variable);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3VariableValue.cpp b/Source/JavaScriptCore/b3/B3VariableValue.cpp
new file mode 100644
index 000000000..6aeef479b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3VariableValue.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3VariableValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Variable.h"
+
+namespace JSC { namespace B3 {
+
+VariableValue::~VariableValue()
+{
+}
+
+void VariableValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, pointerDump(m_variable));
+}
+
+Value* VariableValue::cloneImpl() const
+{
+    return new VariableValue(*this);
+}
+
+VariableValue::VariableValue(Kind kind, Origin origin, Variable* variable, Value* value)
+    : Value(CheckedOpcode, kind, Void, origin, value)
+    , m_variable(variable)
+{
+    ASSERT(kind == Set);
+}
+
+VariableValue::VariableValue(Kind kind, Origin origin, Variable* variable)
+    : Value(CheckedOpcode, kind, variable->type(), origin)
+    , m_variable(variable)
+{
+    ASSERT(kind == Get);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3VariableValue.h b/Source/JavaScriptCore/b3/B3VariableValue.h
new file mode 100644
index 000000000..067ba42c6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3VariableValue.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class Variable;
+
+class JS_EXPORT_PRIVATE VariableValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Get || kind == Set; }
+
+    ~VariableValue();
+
+    Variable* variable() const { return m_variable; }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    // Use this for Set.
+    VariableValue(Kind, Origin, Variable*, Value*);
+
+    // Use this for Get.
+    VariableValue(Kind, Origin, Variable*);
+
+    Variable* m_variable;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3WasmAddressValue.cpp b/Source/JavaScriptCore/b3/B3WasmAddressValue.cpp
new file mode 100644
index 000000000..57d762852
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3WasmAddressValue.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3WasmAddressValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+WasmAddressValue::~WasmAddressValue()
+{
+}
+
+void WasmAddressValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, m_pinnedGPR);
+}
+
+Value* WasmAddressValue::cloneImpl() const
+{
+    return new WasmAddressValue(*this);
+}
+
+WasmAddressValue::WasmAddressValue(Origin origin, Value* value, GPRReg pinnedGPR)
+    : Value(CheckedOpcode, WasmAddress, Int64, origin, value)
+    , m_pinnedGPR(pinnedGPR)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3WasmAddressValue.h b/Source/JavaScriptCore/b3/B3WasmAddressValue.h
new file mode 100644
index 000000000..d93860275
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3WasmAddressValue.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE WasmAddressValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == WasmAddress; }
+
+    ~WasmAddressValue();
+
+    GPRReg pinnedGPR() const { return m_pinnedGPR; }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    WasmAddressValue(Origin, Value*, GPRReg);
+
+    GPRReg m_pinnedGPR;
+};
+
+} } // namespace JSC::B3
+
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.cpp b/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.cpp
new file mode 100644
index 000000000..b3a3290dc
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3WasmBoundsCheckValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+WasmBoundsCheckValue::~WasmBoundsCheckValue()
+{
+}
+
+WasmBoundsCheckValue::WasmBoundsCheckValue(Origin origin, Value* ptr, GPRReg pinnedGPR, unsigned offset)
+    : Value(CheckedOpcode, WasmBoundsCheck, origin, ptr)
+    , m_pinnedGPR(pinnedGPR)
+    , m_offset(offset)
+{
+}
+
+Value* WasmBoundsCheckValue::cloneImpl() const
+{
+    return new WasmBoundsCheckValue(*this);
+}
+
+void WasmBoundsCheckValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, "sizeRegister = ", m_pinnedGPR, ", offset = ", m_offset);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.h b/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.h
new file mode 100644
index 000000000..ccc54b86b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include "CCallHelpers.h"
+
+namespace JSC { namespace B3 {
+
+class WasmBoundsCheckValue : public Value {
+public:
+    static bool accepts(Kind kind)
+    {
+        switch (kind.opcode()) {
+        case WasmBoundsCheck:
+            return true;
+        default:
+            return false;
+        }
+    }
+    
+    ~WasmBoundsCheckValue();
+
+    GPRReg pinnedGPR() const { return m_pinnedGPR; }
+    unsigned offset() const { return m_offset; }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    JS_EXPORT_PRIVATE WasmBoundsCheckValue(Origin, Value* ptr, GPRReg pinnedGPR, unsigned offset);
+
+    GPRReg m_pinnedGPR;
+    unsigned m_offset;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirAllocateStack.cpp b/Source/JavaScriptCore/b3/air/AirAllocateStack.cpp
new file mode 100644
index 000000000..de9297f26
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirAllocateStack.cpp
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirAllocateStack.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPhaseScope.h"
+#include "StackAlignment.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+const bool verbose = false;
+
+bool attemptAssignment(
+    StackSlot* slot, intptr_t offsetFromFP, const Vector& otherSlots)
+{
+    if (verbose)
+        dataLog("Attempting to assign ", pointerDump(slot), " to ", offsetFromFP, " with interference ", pointerListDump(otherSlots), "\n");
+
+    // Need to align it to the slot's desired alignment.
+    offsetFromFP = -WTF::roundUpToMultipleOf(slot->alignment(), -offsetFromFP);
+    
+    for (StackSlot* otherSlot : otherSlots) {
+        if (!otherSlot->offsetFromFP())
+            continue;
+        bool overlap = WTF::rangesOverlap(
+            offsetFromFP,
+            offsetFromFP + static_cast(slot->byteSize()),
+            otherSlot->offsetFromFP(),
+            otherSlot->offsetFromFP() + static_cast(otherSlot->byteSize()));
+        if (overlap)
+            return false;
+    }
+
+    if (verbose)
+        dataLog("Assigned ", pointerDump(slot), " to ", offsetFromFP, "\n");
+    slot->setOffsetFromFP(offsetFromFP);
+    return true;
+}
+
+void assign(StackSlot* slot, const Vector& otherSlots)
+{
+    if (verbose)
+        dataLog("Attempting to assign ", pointerDump(slot), " with interference ", pointerListDump(otherSlots), "\n");
+    
+    if (attemptAssignment(slot, -static_cast(slot->byteSize()), otherSlots))
+        return;
+
+    for (StackSlot* otherSlot : otherSlots) {
+        if (!otherSlot->offsetFromFP())
+            continue;
+        bool didAssign = attemptAssignment(
+            slot, otherSlot->offsetFromFP() - static_cast(slot->byteSize()), otherSlots);
+        if (didAssign)
+            return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // anonymous namespace
+
+void allocateStack(Code& code)
+{
+    PhaseScope phaseScope(code, "allocateStack");
+
+    // Allocate all of the escaped slots in order. This is kind of a crazy algorithm to allow for
+    // the possibility of stack slots being assigned frame offsets before we even get here.
+    ASSERT(!code.frameSize());
+    Vector assignedEscapedStackSlots;
+    Vector escapedStackSlotsWorklist;
+    for (StackSlot* slot : code.stackSlots()) {
+        if (slot->isLocked()) {
+            if (slot->offsetFromFP())
+                assignedEscapedStackSlots.append(slot);
+            else
+                escapedStackSlotsWorklist.append(slot);
+        } else {
+            // It would be super strange to have an unlocked stack slot that has an offset already.
+            ASSERT(!slot->offsetFromFP());
+        }
+    }
+    // This is a fairly expensive loop, but it's OK because we'll usually only have a handful of
+    // escaped stack slots.
+    while (!escapedStackSlotsWorklist.isEmpty()) {
+        StackSlot* slot = escapedStackSlotsWorklist.takeLast();
+        assign(slot, assignedEscapedStackSlots);
+        assignedEscapedStackSlots.append(slot);
+    }
+
+    // Now we handle the spill slots.
+    StackSlotLiveness liveness(code);
+    IndexMap> interference(code.stackSlots().size());
+    Vector slots;
+
+    for (BasicBlock* block : code) {
+        StackSlotLiveness::LocalCalc localCalc(liveness, block);
+
+        auto interfere = [&] (unsigned instIndex) {
+            if (verbose)
+                dataLog("Interfering: ", WTF::pointerListDump(localCalc.live()), "\n");
+
+            Inst::forEachDef(
+                block->get(instIndex), block->get(instIndex + 1),
+                [&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+                    if (!arg.isStack())
+                        return;
+                    StackSlot* slot = arg.stackSlot();
+                    if (slot->kind() != StackSlotKind::Spill)
+                        return;
+
+                    for (StackSlot* otherSlot : localCalc.live()) {
+                        interference[slot].add(otherSlot);
+                        interference[otherSlot].add(slot);
+                    }
+                });
+        };
+
+        for (unsigned instIndex = block->size(); instIndex--;) {
+            if (verbose)
+                dataLog("Analyzing: ", block->at(instIndex), "\n");
+
+            // Kill dead stores. For simplicity we say that a store is killable if it has only late
+            // defs and those late defs are to things that are dead right now. We only do that
+            // because that's the only kind of dead stack store we will see here.
+            Inst& inst = block->at(instIndex);
+            if (!inst.hasNonArgEffects()) {
+                bool ok = true;
+                inst.forEachArg(
+                    [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
+                        if (Arg::isEarlyDef(role)) {
+                            ok = false;
+                            return;
+                        }
+                        if (!Arg::isLateDef(role))
+                            return;
+                        if (!arg.isStack()) {
+                            ok = false;
+                            return;
+                        }
+                        StackSlot* slot = arg.stackSlot();
+                        if (slot->kind() != StackSlotKind::Spill) {
+                            ok = false;
+                            return;
+                        }
+
+                        if (localCalc.isLive(slot)) {
+                            ok = false;
+                            return;
+                        }
+                    });
+                if (ok)
+                    inst = Inst();
+            }
+            
+            interfere(instIndex);
+            localCalc.execute(instIndex);
+        }
+        interfere(-1);
+        
+        block->insts().removeAllMatching(
+            [&] (const Inst& inst) -> bool {
+                return !inst;
+            });
+    }
+
+    if (verbose) {
+        for (StackSlot* slot : code.stackSlots())
+            dataLog("Interference of ", pointerDump(slot), ": ", pointerListDump(interference[slot]), "\n");
+    }
+
+    // Now we assign stack locations. At its heart this algorithm is just first-fit. For each
+    // StackSlot we just want to find the offsetFromFP that is closest to zero while ensuring no
+    // overlap with other StackSlots that this overlaps with.
+    Vector otherSlots = assignedEscapedStackSlots;
+    for (StackSlot* slot : code.stackSlots()) {
+        if (slot->offsetFromFP()) {
+            // Already assigned an offset.
+            continue;
+        }
+
+        HashSet& interferingSlots = interference[slot];
+        otherSlots.resize(assignedEscapedStackSlots.size());
+        otherSlots.resize(assignedEscapedStackSlots.size() + interferingSlots.size());
+        unsigned nextIndex = assignedEscapedStackSlots.size();
+        for (StackSlot* otherSlot : interferingSlots)
+            otherSlots[nextIndex++] = otherSlot;
+
+        assign(slot, otherSlots);
+    }
+
+    // Figure out how much stack we're using for stack slots.
+    unsigned frameSizeForStackSlots = 0;
+    for (StackSlot* slot : code.stackSlots()) {
+        frameSizeForStackSlots = std::max(
+            frameSizeForStackSlots,
+            static_cast(-slot->offsetFromFP()));
+    }
+
+    frameSizeForStackSlots = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSizeForStackSlots);
+
+    // Now we need to deduce how much argument area we need.
+    for (BasicBlock* block : code) {
+        for (Inst& inst : *block) {
+            for (Arg& arg : inst.args) {
+                if (arg.isCallArg()) {
+                    // For now, we assume that we use 8 bytes of the call arg. But that's not
+                    // such an awesome assumption.
+                    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150454
+                    ASSERT(arg.offset() >= 0);
+                    code.requestCallArgAreaSizeInBytes(arg.offset() + 8);
+                }
+            }
+        }
+    }
+
+    code.setFrameSize(frameSizeForStackSlots + code.callArgAreaSizeInBytes());
+
+    // Finally, transform the code to use Addr's instead of StackSlot's. This is a lossless
+    // transformation since we can search the StackSlots array to figure out which StackSlot any
+    // offset-from-FP refers to.
+
+    // FIXME: This may produce addresses that aren't valid if we end up with a ginormous stack frame.
+    // We would have to scavenge for temporaries if this happened. Fortunately, this case will be
+    // extremely rare so we can do crazy things when it arises.
+    // https://bugs.webkit.org/show_bug.cgi?id=152530
+
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+            Inst& inst = block->at(instIndex);
+            inst.forEachArg(
+                [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width width) {
+                    auto stackAddr = [&] (int32_t offset) -> Arg {
+                        return Arg::stackAddr(offset, code.frameSize(), width);
+                    };
+                    
+                    switch (arg.kind()) {
+                    case Arg::Stack: {
+                        StackSlot* slot = arg.stackSlot();
+                        if (Arg::isZDef(role)
+                            && slot->kind() == StackSlotKind::Spill
+                            && slot->byteSize() > Arg::bytes(width)) {
+                            // Currently we only handle this simple case because it's the only one
+                            // that arises: ZDef's are only 32-bit right now. So, when we hit these
+                            // assertions it means that we need to implement those other kinds of
+                            // zero fills.
+                            RELEASE_ASSERT(slot->byteSize() == 8);
+                            RELEASE_ASSERT(width == Arg::Width32);
+
+                            RELEASE_ASSERT(isValidForm(StoreZero32, Arg::Stack));
+                            insertionSet.insert(
+                                instIndex + 1, StoreZero32, inst.origin,
+                                stackAddr(arg.offset() + 4 + slot->offsetFromFP()));
+                        }
+                        arg = stackAddr(arg.offset() + slot->offsetFromFP());
+                        break;
+                    }
+                    case Arg::CallArg:
+                        arg = stackAddr(arg.offset() - code.frameSize());
+                        break;
+                    default:
+                        break;
+                    }
+                }
+            );
+        }
+        insertionSet.execute(block);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/Source/JavaScriptCore/b3/air/AirAllocateStack.h b/Source/JavaScriptCore/b3/air/AirAllocateStack.h
new file mode 100644
index 000000000..31519d246
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirAllocateStack.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This allocates StackSlots to places on the stack. It first allocates the pinned ones in index
+// order and then it allocates the rest using first fit. Takes the opportunity to kill dead
+// assignments to stack slots, since it knows which ones are live. Also fixes ZDefs to anonymous
+// stack slots.
+
+void allocateStack(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirArg.cpp b/Source/JavaScriptCore/b3/air/AirArg.cpp
new file mode 100644
index 000000000..c777928b7
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirArg.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirArg.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirSpecial.h"
+#include "AirStackSlot.h"
+#include "B3Value.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool Arg::isStackMemory() const
+{
+    switch (kind()) {
+    case Addr:
+        return base() == Air::Tmp(GPRInfo::callFrameRegister)
+            || base() == Air::Tmp(MacroAssembler::stackPointerRegister);
+    case Stack:
+    case CallArg:
+        return true;
+    default:
+        return false;
+    }
+}
+
+bool Arg::isRepresentableAs(Width width, Signedness signedness) const
+{
+    return isRepresentableAs(width, signedness, value());
+}
+
+bool Arg::usesTmp(Air::Tmp tmp) const
+{
+    bool uses = false;
+    const_cast(this)->forEachTmpFast(
+        [&] (Air::Tmp otherTmp) {
+            if (otherTmp == tmp)
+                uses = true;
+        });
+    return uses;
+}
+
+bool Arg::canRepresent(Value* value) const
+{
+    return isType(typeForB3Type(value->type()));
+}
+
+bool Arg::isCompatibleType(const Arg& other) const
+{
+    if (hasType())
+        return other.isType(type());
+    if (other.hasType())
+        return isType(other.type());
+    return true;
+}
+
+unsigned Arg::jsHash() const
+{
+    unsigned result = static_cast(m_kind);
+    
+    switch (m_kind) {
+    case Invalid:
+    case Special:
+        break;
+    case Tmp:
+        result += m_base.internalValue();
+        break;
+    case Imm:
+    case BitImm:
+    case CallArg:
+    case RelCond:
+    case ResCond:
+    case DoubleCond:
+    case WidthArg:
+        result += static_cast(m_offset);
+        break;
+    case BigImm:
+    case BitImm64:
+        result += static_cast(m_offset);
+        result += static_cast(m_offset >> 32);
+        break;
+    case Addr:
+        result += m_offset;
+        result += m_base.internalValue();
+        break;
+    case Index:
+        result += static_cast(m_offset);
+        result += m_scale;
+        result += m_base.internalValue();
+        result += m_index.internalValue();
+        break;
+    case Stack:
+        result += static_cast(m_scale);
+        result += stackSlot()->index();
+        break;
+    }
+    
+    return result;
+}
+
+void Arg::dump(PrintStream& out) const
+{
+    switch (m_kind) {
+    case Invalid:
+        out.print("");
+        return;
+    case Tmp:
+        out.print(tmp());
+        return;
+    case Imm:
+        out.print("$", m_offset);
+        return;
+    case BigImm:
+        out.printf("$0x%llx", static_cast(m_offset));
+        return;
+    case BitImm:
+        out.print("$", m_offset);
+        return;
+    case BitImm64:
+        out.printf("$0x%llx", static_cast(m_offset));
+        return;
+    case Addr:
+        if (offset())
+            out.print(offset());
+        out.print("(", base(), ")");
+        return;
+    case Index:
+        if (offset())
+            out.print(offset());
+        out.print("(", base(), ",", index());
+        if (scale() != 1)
+            out.print(",", scale());
+        out.print(")");
+        return;
+    case Stack:
+        if (offset())
+            out.print(offset());
+        out.print("(", pointerDump(stackSlot()), ")");
+        return;
+    case CallArg:
+        if (offset())
+            out.print(offset());
+        out.print("(callArg)");
+        return;
+    case RelCond:
+        out.print(asRelationalCondition());
+        return;
+    case ResCond:
+        out.print(asResultCondition());
+        return;
+    case DoubleCond:
+        out.print(asDoubleCondition());
+        return;
+    case Special:
+        out.print(pointerDump(special()));
+        return;
+    case WidthArg:
+        out.print(width());
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+using namespace JSC::B3::Air;
+
+void printInternal(PrintStream& out, Arg::Kind kind)
+{
+    switch (kind) {
+    case Arg::Invalid:
+        out.print("Invalid");
+        return;
+    case Arg::Tmp:
+        out.print("Tmp");
+        return;
+    case Arg::Imm:
+        out.print("Imm");
+        return;
+    case Arg::BigImm:
+        out.print("BigImm");
+        return;
+    case Arg::BitImm:
+        out.print("BitImm");
+        return;
+    case Arg::BitImm64:
+        out.print("BitImm64");
+        return;
+    case Arg::Addr:
+        out.print("Addr");
+        return;
+    case Arg::Stack:
+        out.print("Stack");
+        return;
+    case Arg::CallArg:
+        out.print("CallArg");
+        return;
+    case Arg::Index:
+        out.print("Index");
+        return;
+    case Arg::RelCond:
+        out.print("RelCond");
+        return;
+    case Arg::ResCond:
+        out.print("ResCond");
+        return;
+    case Arg::DoubleCond:
+        out.print("DoubleCond");
+        return;
+    case Arg::Special:
+        out.print("Special");
+        return;
+    case Arg::WidthArg:
+        out.print("WidthArg");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Role role)
+{
+    switch (role) {
+    case Arg::Use:
+        out.print("Use");
+        return;
+    case Arg::Def:
+        out.print("Def");
+        return;
+    case Arg::UseDef:
+        out.print("UseDef");
+        return;
+    case Arg::ZDef:
+        out.print("ZDef");
+        return;
+    case Arg::UseZDef:
+        out.print("UseZDef");
+        return;
+    case Arg::UseAddr:
+        out.print("UseAddr");
+        return;
+    case Arg::ColdUse:
+        out.print("ColdUse");
+        return;
+    case Arg::LateUse:
+        out.print("LateUse");
+        return;
+    case Arg::LateColdUse:
+        out.print("LateColdUse");
+        return;
+    case Arg::EarlyDef:
+        out.print("EarlyDef");
+        return;
+    case Arg::Scratch:
+        out.print("Scratch");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Type type)
+{
+    switch (type) {
+    case Arg::GP:
+        out.print("GP");
+        return;
+    case Arg::FP:
+        out.print("FP");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Width width)
+{
+    switch (width) {
+    case Arg::Width8:
+        out.print("8");
+        return;
+    case Arg::Width16:
+        out.print("16");
+        return;
+    case Arg::Width32:
+        out.print("32");
+        return;
+    case Arg::Width64:
+        out.print("64");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Signedness signedness)
+{
+    switch (signedness) {
+    case Arg::Signed:
+        out.print("Signed");
+        return;
+    case Arg::Unsigned:
+        out.print("Unsigned");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirArg.h b/Source/JavaScriptCore/b3/air/AirArg.h
new file mode 100644
index 000000000..13db1ce7e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirArg.h
@@ -0,0 +1,1383 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirTmp.h"
+#include "B3Common.h"
+#include "B3Type.h"
+#include 
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+class Value;
+
+namespace Air {
+
+class Special;
+class StackSlot;
+
+// This class name is also intentionally terse because we will say it a lot. You'll see code like
+// Inst(..., Arg::imm(5), Arg::addr(thing, blah), ...)
+class Arg {
+public:
+    // These enum members are intentionally terse because we have to mention them a lot.
+    enum Kind : int8_t {
+        Invalid,
+
+        // This is either an unassigned temporary or a register. All unassigned temporaries
+        // eventually become registers.
+        Tmp,
+
+        // This is an immediate that the instruction will materialize. Imm is the immediate that can be
+        // inlined into most instructions, while BigImm indicates a constant materialization and is
+        // usually only usable with Move. Specials may also admit it, for example for stackmaps used for
+        // OSR exit and tail calls.
+        // BitImm is an immediate for Bitwise operation (And, Xor, etc).
+        Imm,
+        BigImm,
+        BitImm,
+        BitImm64,
+
+        // These are the addresses. Instructions may load from (Use), store to (Def), or evaluate
+        // (UseAddr) addresses.
+        Addr,
+        Stack,
+        CallArg,
+        Index,
+
+        // Immediate operands that customize the behavior of an operation. You can think of them as
+        // secondary opcodes. They are always "Use"'d.
+        RelCond,
+        ResCond,
+        DoubleCond,
+        Special,
+        WidthArg
+    };
+
+    enum Role : int8_t {
+        // Use means that the Inst will read from this value before doing anything else.
+        //
+        // For Tmp: The Inst will read this Tmp.
+        // For Arg::addr and friends: The Inst will load from this address.
+        // For Arg::imm and friends: The Inst will materialize and use this immediate.
+        // For RelCond/ResCond/Special: This is the only valid role for these kinds.
+        //
+        // Note that Use of an address does not mean escape. It only means that the instruction will
+        // load from the address before doing anything else. This is a bit tricky; for example
+        // Specials could theoretically squirrel away the address and effectively escape it. However,
+        // this is not legal. On the other hand, any address other than Stack is presumed to be
+        // always escaping, and Stack is presumed to be always escaping if it's Locked.
+        Use,
+
+        // Exactly like Use, except that it also implies that the use is cold: that is, replacing the
+        // use with something on the stack is free.
+        ColdUse,
+
+        // LateUse means that the Inst will read from this value after doing its Def's. Note that LateUse
+        // on an Addr or Index still means Use on the internal temporaries. Note that specifying the
+        // same Tmp once as Def and once as LateUse has undefined behavior: the use may happen before
+        // the def, or it may happen after it.
+        LateUse,
+
+        // Combination of LateUse and ColdUse.
+        LateColdUse,
+
+        // Def means that the Inst will write to this value after doing everything else.
+        //
+        // For Tmp: The Inst will write to this Tmp.
+        // For Arg::addr and friends: The Inst will store to this address.
+        // This isn't valid for any other kinds.
+        //
+        // Like Use of address, Def of address does not mean escape.
+        Def,
+
+        // This is a special variant of Def that implies that the upper bits of the target register are
+        // zero-filled. Specifically, if the Width of a ZDef is less than the largest possible width of
+        // the argument (for example, we're on a 64-bit machine and we have a Width32 ZDef of a GPR) then
+        // this has different implications for the upper bits (i.e. the top 32 bits in our example)
+        // depending on the kind of the argument:
+        //
+        // For register: the upper bits are zero-filled.
+        // For anonymous stack slot: the upper bits are zero-filled.
+        // For address: the upper bits are not touched (i.e. we do a 32-bit store in our example).
+        // For tmp: either the upper bits are not touched or they are zero-filled, and we won't know
+        // which until we lower the tmp to either a StackSlot or a Reg.
+        //
+        // The behavior of ZDef is consistent with what happens when you perform 32-bit operations on a
+        // 64-bit GPR. It's not consistent with what happens with 8-bit or 16-bit Defs on x86 GPRs, or
+        // what happens with float Defs in ARM NEON or X86 SSE. Hence why we have both Def and ZDef.
+        ZDef,
+
+        // This is a combined Use and Def. It means that both things happen.
+        UseDef,
+
+        // This is a combined Use and ZDef. It means that both things happen.
+        UseZDef,
+
+        // This is like Def, but implies that the assignment occurs before the start of the Inst's
+        // execution rather than after. Note that specifying the same Tmp once as EarlyDef and once
+        // as Use has undefined behavior: the use may happen before the def, or it may happen after
+        // it.
+        EarlyDef,
+
+        // Some instructions need a scratch register. We model this by saying that the temporary is
+        // defined early and used late. This role implies that.
+        Scratch,
+
+        // This is a special kind of use that is only valid for addresses. It means that the
+        // instruction will evaluate the address expression and consume the effective address, but it
+        // will neither load nor store. This is an escaping use, because now the address may be
+        // passed along to who-knows-where. Note that this isn't really a Use of the Arg, but it does
+        // imply that we're Use'ing any registers that the Arg contains.
+        UseAddr
+    };
+
+    enum Type : int8_t {
+        GP,
+        FP
+    };
+
+    static const unsigned numTypes = 2;
+
+    template
+    static void forEachType(const Functor& functor)
+    {
+        functor(GP);
+        functor(FP);
+    }
+
+    enum Width : int8_t {
+        Width8,
+        Width16,
+        Width32,
+        Width64
+    };
+
+    static Width pointerWidth()
+    {
+        if (sizeof(void*) == 8)
+            return Width64;
+        return Width32;
+    }
+
+    enum Signedness : int8_t {
+        Signed,
+        Unsigned
+    };
+
+    // Returns true if the Role implies that the Inst will Use the Arg. It's deliberately false for
+    // UseAddr, since isAnyUse() for an Arg::addr means that we are loading from the address.
+    static bool isAnyUse(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseDef:
+        case UseZDef:
+        case LateUse:
+        case LateColdUse:
+        case Scratch:
+            return true;
+        case Def:
+        case ZDef:
+        case UseAddr:
+        case EarlyDef:
+            return false;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    static bool isColdUse(Role role)
+    {
+        switch (role) {
+        case ColdUse:
+        case LateColdUse:
+            return true;
+        case Use:
+        case UseDef:
+        case UseZDef:
+        case LateUse:
+        case Def:
+        case ZDef:
+        case UseAddr:
+        case Scratch:
+        case EarlyDef:
+            return false;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    static bool isWarmUse(Role role)
+    {
+        return isAnyUse(role) && !isColdUse(role);
+    }
+
+    static Role cooled(Role role)
+    {
+        switch (role) {
+        case ColdUse:
+        case LateColdUse:
+        case UseDef:
+        case UseZDef:
+        case Def:
+        case ZDef:
+        case UseAddr:
+        case Scratch:
+        case EarlyDef:
+            return role;
+        case Use:
+            return ColdUse;
+        case LateUse:
+            return LateColdUse;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will Use the Arg before doing anything else.
+    static bool isEarlyUse(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseDef:
+        case UseZDef:
+            return true;
+        case Def:
+        case ZDef:
+        case UseAddr:
+        case LateUse:
+        case LateColdUse:
+        case Scratch:
+        case EarlyDef:
+            return false;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will Use the Arg after doing everything else.
+    static bool isLateUse(Role role)
+    {
+        switch (role) {
+        case LateUse:
+        case LateColdUse:
+        case Scratch:
+            return true;
+        case ColdUse:
+        case Use:
+        case UseDef:
+        case UseZDef:
+        case Def:
+        case ZDef:
+        case UseAddr:
+        case EarlyDef:
+            return false;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will Def the Arg.
+    static bool isAnyDef(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseAddr:
+        case LateUse:
+        case LateColdUse:
+            return false;
+        case Def:
+        case UseDef:
+        case ZDef:
+        case UseZDef:
+        case EarlyDef:
+        case Scratch:
+            return true;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will Def the Arg before start of execution.
+    static bool isEarlyDef(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseAddr:
+        case LateUse:
+        case Def:
+        case UseDef:
+        case ZDef:
+        case UseZDef:
+        case LateColdUse:
+            return false;
+        case EarlyDef:
+        case Scratch:
+            return true;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will Def the Arg after the end of execution.
+    static bool isLateDef(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseAddr:
+        case LateUse:
+        case EarlyDef:
+        case Scratch:
+        case LateColdUse:
+            return false;
+        case Def:
+        case UseDef:
+        case ZDef:
+        case UseZDef:
+            return true;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will ZDef the Arg.
+    static bool isZDef(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseAddr:
+        case LateUse:
+        case Def:
+        case UseDef:
+        case EarlyDef:
+        case Scratch:
+        case LateColdUse:
+            return false;
+        case ZDef:
+        case UseZDef:
+            return true;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    static Type typeForB3Type(B3::Type type)
+    {
+        switch (type) {
+        case Void:
+            ASSERT_NOT_REACHED();
+            return GP;
+        case Int32:
+        case Int64:
+            return GP;
+        case Float:
+        case Double:
+            return FP;
+        }
+        ASSERT_NOT_REACHED();
+        return GP;
+    }
+
+    static Width widthForB3Type(B3::Type type)
+    {
+        switch (type) {
+        case Void:
+            ASSERT_NOT_REACHED();
+            return Width8;
+        case Int32:
+        case Float:
+            return Width32;
+        case Int64:
+        case Double:
+            return Width64;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    static Width conservativeWidth(Type type)
+    {
+        return type == GP ? pointerWidth() : Width64;
+    }
+
+    static Width minimumWidth(Type type)
+    {
+        return type == GP ? Width8 : Width32;
+    }
+
+    static unsigned bytes(Width width)
+    {
+        return 1 << width;
+    }
+
+    static Width widthForBytes(unsigned bytes)
+    {
+        switch (bytes) {
+        case 0:
+        case 1:
+            return Width8;
+        case 2:
+            return Width16;
+        case 3:
+        case 4:
+            return Width32;
+        default:
+            return Width64;
+        }
+    }
+
+    Arg()
+        : m_kind(Invalid)
+    {
+    }
+
+    Arg(Air::Tmp tmp)
+        : m_kind(Tmp)
+        , m_base(tmp)
+    {
+    }
+
+    Arg(Reg reg)
+        : Arg(Air::Tmp(reg))
+    {
+    }
+
+    static Arg imm(int64_t value)
+    {
+        Arg result;
+        result.m_kind = Imm;
+        result.m_offset = value;
+        return result;
+    }
+
+    static Arg bigImm(int64_t value)
+    {
+        Arg result;
+        result.m_kind = BigImm;
+        result.m_offset = value;
+        return result;
+    }
+
+    static Arg bitImm(int64_t value)
+    {
+        Arg result;
+        result.m_kind = BitImm;
+        result.m_offset = value;
+        return result;
+    }
+
+    static Arg bitImm64(int64_t value)
+    {
+        Arg result;
+        result.m_kind = BitImm64;
+        result.m_offset = value;
+        return result;
+    }
+
+    static Arg immPtr(const void* address)
+    {
+        return bigImm(bitwise_cast(address));
+    }
+
+    static Arg addr(Air::Tmp base, int32_t offset = 0)
+    {
+        ASSERT(base.isGP());
+        Arg result;
+        result.m_kind = Addr;
+        result.m_base = base;
+        result.m_offset = offset;
+        return result;
+    }
+
+    static Arg stack(StackSlot* value, int32_t offset = 0)
+    {
+        Arg result;
+        result.m_kind = Stack;
+        result.m_offset = bitwise_cast(value);
+        result.m_scale = offset; // I know, yuck.
+        return result;
+    }
+
+    static Arg callArg(int32_t offset)
+    {
+        Arg result;
+        result.m_kind = CallArg;
+        result.m_offset = offset;
+        return result;
+    }
+
+    static Arg stackAddr(int32_t offsetFromFP, unsigned frameSize, Width width)
+    {
+        Arg result = Arg::addr(Air::Tmp(GPRInfo::callFrameRegister), offsetFromFP);
+        if (!result.isValidForm(width)) {
+            result = Arg::addr(
+                Air::Tmp(MacroAssembler::stackPointerRegister),
+                offsetFromFP + frameSize);
+        }
+        return result;
+    }
+
+    // If you don't pass a Width, this optimistically assumes that you're using the right width.
+    static bool isValidScale(unsigned scale, std::optional width = std::nullopt)
+    {
+        switch (scale) {
+        case 1:
+            if (isX86() || isARM64())
+                return true;
+            return false;
+        case 2:
+        case 4:
+        case 8:
+            if (isX86())
+                return true;
+            if (isARM64()) {
+                if (!width)
+                    return true;
+                return scale == 1 || scale == bytes(*width);
+            }
+            return false;
+        default:
+            return false;
+        }
+    }
+
+    static unsigned logScale(unsigned scale)
+    {
+        switch (scale) {
+        case 1:
+            return 0;
+        case 2:
+            return 1;
+        case 4:
+            return 2;
+        case 8:
+            return 3;
+        default:
+            ASSERT_NOT_REACHED();
+            return 0;
+        }
+    }
+
+    static Arg index(Air::Tmp base, Air::Tmp index, unsigned scale = 1, int32_t offset = 0)
+    {
+        ASSERT(base.isGP());
+        ASSERT(index.isGP());
+        ASSERT(isValidScale(scale));
+        Arg result;
+        result.m_kind = Index;
+        result.m_base = base;
+        result.m_index = index;
+        result.m_scale = static_cast(scale);
+        result.m_offset = offset;
+        return result;
+    }
+
+    static Arg relCond(MacroAssembler::RelationalCondition condition)
+    {
+        Arg result;
+        result.m_kind = RelCond;
+        result.m_offset = condition;
+        return result;
+    }
+
+    static Arg resCond(MacroAssembler::ResultCondition condition)
+    {
+        Arg result;
+        result.m_kind = ResCond;
+        result.m_offset = condition;
+        return result;
+    }
+
+    static Arg doubleCond(MacroAssembler::DoubleCondition condition)
+    {
+        Arg result;
+        result.m_kind = DoubleCond;
+        result.m_offset = condition;
+        return result;
+    }
+
+    static Arg special(Air::Special* special)
+    {
+        Arg result;
+        result.m_kind = Special;
+        result.m_offset = bitwise_cast(special);
+        return result;
+    }
+
+    static Arg widthArg(Width width)
+    {
+        Arg result;
+        result.m_kind = WidthArg;
+        result.m_offset = width;
+        return result;
+    }
+
+    bool operator==(const Arg& other) const
+    {
+        return m_offset == other.m_offset
+            && m_kind == other.m_kind
+            && m_base == other.m_base
+            && m_index == other.m_index
+            && m_scale == other.m_scale;
+    }
+
+    bool operator!=(const Arg& other) const
+    {
+        return !(*this == other);
+    }
+
+    explicit operator bool() const { return *this != Arg(); }
+
+    Kind kind() const
+    {
+        return m_kind;
+    }
+
+    bool isTmp() const
+    {
+        return kind() == Tmp;
+    }
+
+    bool isImm() const
+    {
+        return kind() == Imm;
+    }
+
+    bool isBigImm() const
+    {
+        return kind() == BigImm;
+    }
+
+    bool isBitImm() const
+    {
+        return kind() == BitImm;
+    }
+
+    bool isBitImm64() const
+    {
+        return kind() == BitImm64;
+    }
+
+    bool isSomeImm() const
+    {
+        switch (kind()) {
+        case Imm:
+        case BigImm:
+        case BitImm:
+        case BitImm64:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool isAddr() const
+    {
+        return kind() == Addr;
+    }
+
+    bool isStack() const
+    {
+        return kind() == Stack;
+    }
+
+    bool isCallArg() const
+    {
+        return kind() == CallArg;
+    }
+
+    bool isIndex() const
+    {
+        return kind() == Index;
+    }
+
+    bool isMemory() const
+    {
+        switch (kind()) {
+        case Addr:
+        case Stack:
+        case CallArg:
+        case Index:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool isStackMemory() const;
+
+    bool isRelCond() const
+    {
+        return kind() == RelCond;
+    }
+
+    bool isResCond() const
+    {
+        return kind() == ResCond;
+    }
+
+    bool isDoubleCond() const
+    {
+        return kind() == DoubleCond;
+    }
+
+    bool isCondition() const
+    {
+        switch (kind()) {
+        case RelCond:
+        case ResCond:
+        case DoubleCond:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool isSpecial() const
+    {
+        return kind() == Special;
+    }
+
+    bool isWidthArg() const
+    {
+        return kind() == WidthArg;
+    }
+
+    bool isAlive() const
+    {
+        return isTmp() || isStack();
+    }
+
+    Air::Tmp tmp() const
+    {
+        ASSERT(kind() == Tmp);
+        return m_base;
+    }
+
+    int64_t value() const
+    {
+        ASSERT(isSomeImm());
+        return m_offset;
+    }
+
+    template
+    bool isRepresentableAs() const
+    {
+        return B3::isRepresentableAs(value());
+    }
+    
+    static bool isRepresentableAs(Width width, Signedness signedness, int64_t value)
+    {
+        switch (signedness) {
+        case Signed:
+            switch (width) {
+            case Width8:
+                return B3::isRepresentableAs(value);
+            case Width16:
+                return B3::isRepresentableAs(value);
+            case Width32:
+                return B3::isRepresentableAs(value);
+            case Width64:
+                return B3::isRepresentableAs(value);
+            }
+        case Unsigned:
+            switch (width) {
+            case Width8:
+                return B3::isRepresentableAs(value);
+            case Width16:
+                return B3::isRepresentableAs(value);
+            case Width32:
+                return B3::isRepresentableAs(value);
+            case Width64:
+                return B3::isRepresentableAs(value);
+            }
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    bool isRepresentableAs(Width, Signedness) const;
+    
+    static int64_t castToType(Width width, Signedness signedness, int64_t value)
+    {
+        switch (signedness) {
+        case Signed:
+            switch (width) {
+            case Width8:
+                return static_cast(value);
+            case Width16:
+                return static_cast(value);
+            case Width32:
+                return static_cast(value);
+            case Width64:
+                return static_cast(value);
+            }
+        case Unsigned:
+            switch (width) {
+            case Width8:
+                return static_cast(value);
+            case Width16:
+                return static_cast(value);
+            case Width32:
+                return static_cast(value);
+            case Width64:
+                return static_cast(value);
+            }
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    template
+    T asNumber() const
+    {
+        return static_cast(value());
+    }
+
+    void* pointerValue() const
+    {
+        ASSERT(kind() == BigImm);
+        return bitwise_cast(static_cast(m_offset));
+    }
+
+    Air::Tmp base() const
+    {
+        ASSERT(kind() == Addr || kind() == Index);
+        return m_base;
+    }
+
+    bool hasOffset() const { return isMemory(); }
+    
+    int32_t offset() const
+    {
+        if (kind() == Stack)
+            return static_cast(m_scale);
+        ASSERT(kind() == Addr || kind() == CallArg || kind() == Index);
+        return static_cast(m_offset);
+    }
+
+    StackSlot* stackSlot() const
+    {
+        ASSERT(kind() == Stack);
+        return bitwise_cast(m_offset);
+    }
+
+    Air::Tmp index() const
+    {
+        ASSERT(kind() == Index);
+        return m_index;
+    }
+
+    unsigned scale() const
+    {
+        ASSERT(kind() == Index);
+        return m_scale;
+    }
+
+    unsigned logScale() const
+    {
+        return logScale(scale());
+    }
+
+    Air::Special* special() const
+    {
+        ASSERT(kind() == Special);
+        return bitwise_cast(m_offset);
+    }
+
+    Width width() const
+    {
+        ASSERT(kind() == WidthArg);
+        return static_cast(m_offset);
+    }
+
+    bool isGPTmp() const
+    {
+        return isTmp() && tmp().isGP();
+    }
+
+    bool isFPTmp() const
+    {
+        return isTmp() && tmp().isFP();
+    }
+    
+    // Tells us if this Arg can be used in a position that requires a GP value.
+    bool isGP() const
+    {
+        switch (kind()) {
+        case Imm:
+        case BigImm:
+        case BitImm:
+        case BitImm64:
+        case Addr:
+        case Index:
+        case Stack:
+        case CallArg:
+        case RelCond:
+        case ResCond:
+        case DoubleCond:
+        case Special:
+        case WidthArg:
+            return true;
+        case Tmp:
+            return isGPTmp();
+        case Invalid:
+            return false;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Tells us if this Arg can be used in a position that requires a FP value.
+    bool isFP() const
+    {
+        switch (kind()) {
+        case Imm:
+        case BitImm:
+        case BitImm64:
+        case RelCond:
+        case ResCond:
+        case DoubleCond:
+        case Special:
+        case WidthArg:
+        case Invalid:
+            return false;
+        case Addr:
+        case Index:
+        case Stack:
+        case CallArg:
+        case BigImm: // Yes, we allow BigImm as a double immediate. We use this for implementing stackmaps.
+            return true;
+        case Tmp:
+            return isFPTmp();
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    bool hasType() const
+    {
+        switch (kind()) {
+        case Imm:
+        case BitImm:
+        case BitImm64:
+        case Special:
+        case Tmp:
+            return true;
+        default:
+            return false;
+        }
+    }
+    
+    // The type is ambiguous for some arg kinds. Call with care.
+    Type type() const
+    {
+        return isGP() ? GP : FP;
+    }
+
+    bool isType(Type type) const
+    {
+        switch (type) {
+        case GP:
+            return isGP();
+        case FP:
+            return isFP();
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    bool canRepresent(Value* value) const;
+
+    bool isCompatibleType(const Arg& other) const;
+
+    bool isGPR() const
+    {
+        return isTmp() && tmp().isGPR();
+    }
+
+    GPRReg gpr() const
+    {
+        return tmp().gpr();
+    }
+
+    bool isFPR() const
+    {
+        return isTmp() && tmp().isFPR();
+    }
+
+    FPRReg fpr() const
+    {
+        return tmp().fpr();
+    }
+    
+    bool isReg() const
+    {
+        return isTmp() && tmp().isReg();
+    }
+
+    Reg reg() const
+    {
+        return tmp().reg();
+    }
+
+    unsigned gpTmpIndex() const
+    {
+        return tmp().gpTmpIndex();
+    }
+
+    unsigned fpTmpIndex() const
+    {
+        return tmp().fpTmpIndex();
+    }
+
+    unsigned tmpIndex() const
+    {
+        return tmp().tmpIndex();
+    }
+
+    static bool isValidImmForm(int64_t value)
+    {
+        if (isX86())
+            return B3::isRepresentableAs(value);
+        if (isARM64())
+            return isUInt12(value);
+        return false;
+    }
+
+    static bool isValidBitImmForm(int64_t value)
+    {
+        if (isX86())
+            return B3::isRepresentableAs(value);
+        if (isARM64())
+            return ARM64LogicalImmediate::create32(value).isValid();
+        return false;
+    }
+
+    static bool isValidBitImm64Form(int64_t value)
+    {
+        if (isX86())
+            return B3::isRepresentableAs(value);
+        if (isARM64())
+            return ARM64LogicalImmediate::create64(value).isValid();
+        return false;
+    }
+
+    static bool isValidAddrForm(int32_t offset, std::optional width = std::nullopt)
+    {
+        if (isX86())
+            return true;
+        if (isARM64()) {
+            if (!width)
+                return true;
+
+            if (isValidSignedImm9(offset))
+                return true;
+
+            switch (*width) {
+            case Width8:
+                return isValidScaledUImm12<8>(offset);
+            case Width16:
+                return isValidScaledUImm12<16>(offset);
+            case Width32:
+                return isValidScaledUImm12<32>(offset);
+            case Width64:
+                return isValidScaledUImm12<64>(offset);
+            }
+        }
+        return false;
+    }
+
+    static bool isValidIndexForm(unsigned scale, int32_t offset, std::optional width = std::nullopt)
+    {
+        if (!isValidScale(scale, width))
+            return false;
+        if (isX86())
+            return true;
+        if (isARM64())
+            return !offset;
+        return false;
+    }
+
+    // If you don't pass a width then this optimistically assumes that you're using the right width. But
+    // the width is relevant to validity, so passing a null width is only useful for assertions. Don't
+    // pass null widths when cascading through Args in the instruction selector!
+    bool isValidForm(std::optional width = std::nullopt) const
+    {
+        switch (kind()) {
+        case Invalid:
+            return false;
+        case Tmp:
+            return true;
+        case Imm:
+            return isValidImmForm(value());
+        case BigImm:
+            return true;
+        case BitImm:
+            return isValidBitImmForm(value());
+        case BitImm64:
+            return isValidBitImm64Form(value());
+        case Addr:
+        case Stack:
+        case CallArg:
+            return isValidAddrForm(offset(), width);
+        case Index:
+            return isValidIndexForm(scale(), offset(), width);
+        case RelCond:
+        case ResCond:
+        case DoubleCond:
+        case Special:
+        case WidthArg:
+            return true;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    template
+    void forEachTmpFast(const Functor& functor)
+    {
+        switch (m_kind) {
+        case Tmp:
+        case Addr:
+            functor(m_base);
+            break;
+        case Index:
+            functor(m_base);
+            functor(m_index);
+            break;
+        default:
+            break;
+        }
+    }
+
+    bool usesTmp(Air::Tmp tmp) const;
+
+    template
+    bool is() const;
+
+    template
+    Thing as() const;
+
+    template
+    void forEachFast(const Functor&);
+
+    template
+    void forEach(Role, Type, Width, const Functor&);
+
+    // This is smart enough to know that an address arg in a Def or UseDef rule will use its
+    // tmps and never def them. For example, this:
+    //
+    // mov %rax, (%rcx)
+    //
+    // This defs (%rcx) but uses %rcx.
+    template
+    void forEachTmp(Role argRole, Type argType, Width argWidth, const Functor& functor)
+    {
+        switch (m_kind) {
+        case Tmp:
+            ASSERT(isAnyUse(argRole) || isAnyDef(argRole));
+            functor(m_base, argRole, argType, argWidth);
+            break;
+        case Addr:
+            functor(m_base, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
+            break;
+        case Index:
+            functor(m_base, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
+            functor(m_index, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
+            break;
+        default:
+            break;
+        }
+    }
+
+    MacroAssembler::TrustedImm32 asTrustedImm32() const
+    {
+        ASSERT(isImm() || isBitImm());
+        return MacroAssembler::TrustedImm32(static_cast(m_offset));
+    }
+
+#if USE(JSVALUE64)
+    MacroAssembler::TrustedImm64 asTrustedImm64() const
+    {
+        ASSERT(isBigImm() || isBitImm64());
+        return MacroAssembler::TrustedImm64(value());
+    }
+#endif
+
+    MacroAssembler::TrustedImmPtr asTrustedImmPtr() const
+    {
+        if (is64Bit())
+            ASSERT(isBigImm());
+        else
+            ASSERT(isImm());
+        return MacroAssembler::TrustedImmPtr(pointerValue());
+    }
+
+    MacroAssembler::Address asAddress() const
+    {
+        ASSERT(isAddr());
+        return MacroAssembler::Address(m_base.gpr(), static_cast(m_offset));
+    }
+
+    MacroAssembler::BaseIndex asBaseIndex() const
+    {
+        ASSERT(isIndex());
+        return MacroAssembler::BaseIndex(
+            m_base.gpr(), m_index.gpr(), static_cast(logScale()),
+            static_cast(m_offset));
+    }
+
+    MacroAssembler::RelationalCondition asRelationalCondition() const
+    {
+        ASSERT(isRelCond());
+        return static_cast(m_offset);
+    }
+
+    MacroAssembler::ResultCondition asResultCondition() const
+    {
+        ASSERT(isResCond());
+        return static_cast(m_offset);
+    }
+
+    MacroAssembler::DoubleCondition asDoubleCondition() const
+    {
+        ASSERT(isDoubleCond());
+        return static_cast(m_offset);
+    }
+    
+    // Tells you if the Arg is invertible. Only condition arguments are invertible, and even for those, there
+    // are a few exceptions - notably Overflow and Signed.
+    bool isInvertible() const
+    {
+        switch (kind()) {
+        case RelCond:
+        case DoubleCond:
+            return true;
+        case ResCond:
+            return MacroAssembler::isInvertible(asResultCondition());
+        default:
+            return false;
+        }
+    }
+
+    // This is valid for condition arguments. It will invert them.
+    Arg inverted(bool inverted = true) const
+    {
+        if (!inverted)
+            return *this;
+        switch (kind()) {
+        case RelCond:
+            return relCond(MacroAssembler::invert(asRelationalCondition()));
+        case ResCond:
+            return resCond(MacroAssembler::invert(asResultCondition()));
+        case DoubleCond:
+            return doubleCond(MacroAssembler::invert(asDoubleCondition()));
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return Arg();
+        }
+    }
+
+    Arg flipped(bool flipped = true) const
+    {
+        if (!flipped)
+            return Arg();
+        return relCond(MacroAssembler::flip(asRelationalCondition()));
+    }
+
+    bool isSignedCond() const
+    {
+        return isRelCond() && MacroAssembler::isSigned(asRelationalCondition());
+    }
+
+    bool isUnsignedCond() const
+    {
+        return isRelCond() && MacroAssembler::isUnsigned(asRelationalCondition());
+    }
+
+    // This computes a hash for comparing this to JSAir's Arg.
+    unsigned jsHash() const;
+    
+    void dump(PrintStream&) const;
+
+    Arg(WTF::HashTableDeletedValueType)
+        : m_base(WTF::HashTableDeletedValue)
+    {
+    }
+
+    bool isHashTableDeletedValue() const
+    {
+        return *this == Arg(WTF::HashTableDeletedValue);
+    }
+
+    unsigned hash() const
+    {
+        // This really doesn't have to be that great.
+        return WTF::IntHash::hash(m_offset) + m_kind + m_scale + m_base.hash() +
+            m_index.hash();
+    }
+
+private:
+    int64_t m_offset { 0 };
+    Kind m_kind { Invalid };
+    int32_t m_scale { 1 };
+    Air::Tmp m_base;
+    Air::Tmp m_index;
+};
+
+struct ArgHash {
+    static unsigned hash(const Arg& key) { return key.hash(); }
+    static bool equal(const Arg& a, const Arg& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Kind);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Role);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Type);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Width);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Signedness);
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::B3::Air::ArgHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits {
+    // Because m_scale is 1 in the empty value.
+    static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirArgInlines.h b/Source/JavaScriptCore/b3/air/AirArgInlines.h
new file mode 100644
index 000000000..73f7d5bba
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirArgInlines.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+template struct ArgThingHelper;
+
+template<> struct ArgThingHelper {
+    static bool is(const Arg& arg)
+    {
+        return arg.isTmp();
+    }
+
+    static Tmp as(const Arg& arg)
+    {
+        if (is(arg))
+            return arg.tmp();
+        return Tmp();
+    }
+
+    template
+    static void forEachFast(Arg& arg, const Functor& functor)
+    {
+        arg.forEachTmpFast(functor);
+    }
+
+    template
+    static void forEach(Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width, const Functor& functor)
+    {
+        arg.forEachTmp(role, type, width, functor);
+    }
+};
+
+template<> struct ArgThingHelper {
+    static bool is(const Arg&)
+    {
+        return true;
+    }
+
+    static Arg as(const Arg& arg)
+    {
+        return arg;
+    }
+
+    template
+    static void forEachFast(Arg& arg, const Functor& functor)
+    {
+        functor(arg);
+    }
+
+    template
+    static void forEach(Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width, const Functor& functor)
+    {
+        functor(arg, role, type, width);
+    }
+};
+
+template<> struct ArgThingHelper {
+    static bool is(const Arg& arg)
+    {
+        return arg.isStack();
+    }
+    
+    static StackSlot* as(const Arg& arg)
+    {
+        return arg.stackSlot();
+    }
+    
+    template
+    static void forEachFast(Arg& arg, const Functor& functor)
+    {
+        if (!arg.isStack())
+            return;
+        
+        StackSlot* stackSlot = arg.stackSlot();
+        functor(stackSlot);
+        arg = Arg::stack(stackSlot, arg.offset());
+    }
+    
+    template
+    static void forEach(Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width, const Functor& functor)
+    {
+        if (!arg.isStack())
+            return;
+        
+        StackSlot* stackSlot = arg.stackSlot();
+        
+        // FIXME: This is way too optimistic about the meaning of "Def". It gets lucky for
+        // now because our only use of "Anonymous" stack slots happens to want the optimistic
+        // semantics. We could fix this by just changing the comments that describe the
+        // semantics of "Anonymous".
+        // https://bugs.webkit.org/show_bug.cgi?id=151128
+        
+        functor(stackSlot, role, type, width);
+        arg = Arg::stack(stackSlot, arg.offset());
+    }
+};
+
+template<> struct ArgThingHelper {
+    static bool is(const Arg& arg)
+    {
+        return arg.isReg();
+    }
+    
+    static Reg as(const Arg& arg)
+    {
+        return arg.reg();
+    }
+    
+    template
+    static void forEachFast(Arg& arg, const Functor& functor)
+    {
+        arg.forEachTmpFast(
+            [&] (Tmp& tmp) {
+                if (!tmp.isReg())
+                    return;
+                
+                Reg reg = tmp.reg();
+                functor(reg);
+                tmp = Tmp(reg);
+            });
+    }
+    
+    template
+    static void forEach(Arg& arg, Arg::Role argRole, Arg::Type argType, Arg::Width argWidth, const Functor& functor)
+    {
+        arg.forEachTmp(
+            argRole, argType, argWidth,
+            [&] (Tmp& tmp, Arg::Role role, Arg::Type type, Arg::Width width) {
+                if (!tmp.isReg())
+                    return;
+                
+                Reg reg = tmp.reg();
+                functor(reg, role, type, width);
+                tmp = Tmp(reg);
+            });
+    }
+};
+
+template
+bool Arg::is() const
+{
+    return ArgThingHelper::is(*this);
+}
+
+template
+Thing Arg::as() const
+{
+    return ArgThingHelper::as(*this);
+}
+
+template
+void Arg::forEachFast(const Functor& functor)
+{
+    ArgThingHelper::forEachFast(*this, functor);
+}
+
+template
+void Arg::forEach(Role role, Type type, Width width, const Functor& functor)
+{
+    ArgThingHelper::forEach(*this, role, type, width, functor);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirBasicBlock.cpp b/Source/JavaScriptCore/b3/air/AirBasicBlock.cpp
new file mode 100644
index 000000000..fa3ad8e4d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirBasicBlock.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirBasicBlock.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockUtils.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+const char* const BasicBlock::dumpPrefix = "#";
+
+bool BasicBlock::addPredecessor(BasicBlock* block)
+{
+    return B3::addPredecessor(this, block);
+}
+
+bool BasicBlock::removePredecessor(BasicBlock* block)
+{
+    return B3::removePredecessor(this, block);
+}
+
+bool BasicBlock::replacePredecessor(BasicBlock* from, BasicBlock* to)
+{
+    return B3::replacePredecessor(this, from, to);
+}
+
+void BasicBlock::dump(PrintStream& out) const
+{
+    out.print(dumpPrefix, m_index);
+}
+
+void BasicBlock::deepDump(PrintStream& out) const
+{
+    dumpHeader(out);
+    for (const Inst& inst : *this)
+        out.print("    ", inst, "\n");
+    dumpFooter(out);
+}
+
+void BasicBlock::dumpHeader(PrintStream& out) const
+{
+    out.print("BB", *this, ": ; frequency = ", m_frequency, "\n");
+    if (predecessors().size())
+        out.print("  Predecessors: ", pointerListDump(predecessors()), "\n");
+}
+
+void BasicBlock::dumpFooter(PrintStream& out) const
+{
+    if (successors().size())
+        out.print("  Successors: ", listDump(successors()), "\n");
+}
+
+BasicBlock::BasicBlock(unsigned index, double frequency)
+    : m_index(index)
+    , m_frequency(frequency)
+{
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirBasicBlock.h b/Source/JavaScriptCore/b3/air/AirBasicBlock.h
new file mode 100644
index 000000000..431bd711c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirBasicBlock.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirFrequentedBlock.h"
+#include "AirInst.h"
+#include "B3SuccessorCollection.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BlockInsertionSet;
+class Code;
+class InsertionSet;
+
+class BasicBlock {
+    WTF_MAKE_NONCOPYABLE(BasicBlock);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    static const char* const dumpPrefix;
+
+    typedef Vector InstList;
+    typedef Vector PredecessorList;
+    typedef Vector SuccessorList;
+
+    unsigned index() const { return m_index; }
+
+    // This method is exposed for phases that mess with the layout of basic blocks. Currently that means just
+    // optimizeBlockOrder().
+    void setIndex(unsigned index) { m_index = index; }
+    
+    unsigned size() const { return m_insts.size(); }
+    InstList::iterator begin() { return m_insts.begin(); }
+    InstList::iterator end() { return m_insts.end(); }
+    InstList::const_iterator begin() const { return m_insts.begin(); }
+    InstList::const_iterator end() const { return m_insts.end(); }
+
+    const Inst& at(unsigned index) const { return m_insts[index]; }
+    Inst& at(unsigned index) { return m_insts[index]; }
+
+    Inst* get(unsigned index)
+    {
+        return index < size() ? &at(index) : nullptr;
+    }
+
+    const Inst& last() const { return m_insts.last(); }
+    Inst& last() { return m_insts.last(); }
+
+    void resize(unsigned size) { m_insts.resize(size); }
+
+    const InstList& insts() const { return m_insts; }
+    InstList& insts() { return m_insts; }
+
+    template
+    Inst& appendInst(Inst&& inst)
+    {
+        m_insts.append(std::forward(inst));
+        return m_insts.last();
+    }
+
+    template
+    Inst& append(Arguments&&... arguments)
+    {
+        m_insts.append(Inst(std::forward(arguments)...));
+        return m_insts.last();
+    }
+
+    // The "0" case is the case to which the branch jumps, so the "then" case. The "1" case is the
+    // "else" case, and is used to represent the fall-through of a conditional branch.
+    unsigned numSuccessors() const { return m_successors.size(); }
+    FrequentedBlock successor(unsigned index) const { return m_successors[index]; }
+    FrequentedBlock& successor(unsigned index) { return m_successors[index]; }
+    const SuccessorList& successors() const { return m_successors; }
+    SuccessorList& successors() { return m_successors; }
+
+    BasicBlock* successorBlock(unsigned index) const { return successor(index).block(); }
+    BasicBlock*& successorBlock(unsigned index) { return successor(index).block(); }
+    SuccessorCollection successorBlocks()
+    {
+        return SuccessorCollection(m_successors);
+    }
+    SuccessorCollection successorBlocks() const
+    {
+        return SuccessorCollection(m_successors);
+    }
+
+    unsigned numPredecessors() const { return m_predecessors.size(); }
+    BasicBlock* predecessor(unsigned index) const { return m_predecessors[index]; }
+    BasicBlock*& predecessor(unsigned index) { return m_predecessors[index]; }
+    const PredecessorList& predecessors() const { return m_predecessors; }
+    PredecessorList& predecessors() { return m_predecessors; }
+
+    bool addPredecessor(BasicBlock*);
+    bool removePredecessor(BasicBlock*);
+    bool replacePredecessor(BasicBlock* from, BasicBlock* to);
+    bool containsPredecessor(BasicBlock* predecessor) const { return m_predecessors.contains(predecessor); }
+
+    double frequency() const { return m_frequency; }
+
+    void dump(PrintStream&) const;
+    void deepDump(PrintStream&) const;
+
+    void dumpHeader(PrintStream&) const;
+    void dumpFooter(PrintStream&) const;
+
+private:
+    friend class BlockInsertionSet;
+    friend class Code;
+    friend class InsertionSet;
+    
+    BasicBlock(unsigned index, double frequency);
+
+    unsigned m_index;
+    InstList m_insts;
+    SuccessorList m_successors;
+    PredecessorList m_predecessors;
+    double m_frequency;
+};
+
+class DeepBasicBlockDump {
+public:
+    DeepBasicBlockDump(const BasicBlock* block)
+        : m_block(block)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_block)
+            m_block->deepDump(out);
+        else
+            out.print("");
+    }
+
+private:
+    const BasicBlock* m_block;
+};
+
+inline DeepBasicBlockDump deepDump(const BasicBlock* block)
+{
+    return DeepBasicBlockDump(block);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirBlockWorklist.h b/Source/JavaScriptCore/b3/air/AirBlockWorklist.h
new file mode 100644
index 000000000..ba231a9b5
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirBlockWorklist.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "B3BlockWorklist.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+typedef GraphNodeWorklist> BlockWorklist;
+
+// When you say BlockWith you should read it as "block with an int".
+template using BlockWith = GraphNodeWith;
+
+// Extended block worklist is useful for enqueueing some meta-data along with the block. It also
+// permits forcibly enqueueing things even if the block has already been seen. It's useful for
+// things like building a spanning tree, in which case T (the auxiliary payload) would be the
+// successor index.
+template using ExtendedBlockWorklist = ExtendedGraphNodeWorklist>;
+
+typedef GraphNodeWithOrder BlockWithOrder;
+
+typedef PostOrderGraphNodeWorklist> PostOrderBlockWorklist;
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp b/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp
new file mode 100644
index 000000000..f1b6d710e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirCCallSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+CCallSpecial::CCallSpecial()
+{
+    m_clobberedRegs = RegisterSet::allRegisters();
+    m_clobberedRegs.exclude(RegisterSet::stackRegisters());
+    m_clobberedRegs.exclude(RegisterSet::reservedHardwareRegisters());
+    m_clobberedRegs.exclude(RegisterSet::calleeSaveRegisters());
+    m_clobberedRegs.clear(GPRInfo::returnValueGPR);
+    m_clobberedRegs.clear(GPRInfo::returnValueGPR2);
+    m_clobberedRegs.clear(FPRInfo::returnValueFPR);
+}
+
+CCallSpecial::~CCallSpecial()
+{
+}
+
+void CCallSpecial::forEachArg(Inst& inst, const ScopedLambda& callback)
+{
+    for (unsigned i = 0; i < numCalleeArgs; ++i)
+        callback(inst.args[calleeArgOffset + i], Arg::Use, Arg::GP, Arg::pointerWidth());
+    for (unsigned i = 0; i < numReturnGPArgs; ++i)
+        callback(inst.args[returnGPArgOffset + i], Arg::Def, Arg::GP, Arg::pointerWidth());
+    for (unsigned i = 0; i < numReturnFPArgs; ++i)
+        callback(inst.args[returnFPArgOffset + i], Arg::Def, Arg::FP, Arg::Width64);
+    
+    for (unsigned i = argArgOffset; i < inst.args.size(); ++i) {
+        // For the type, we can just query the arg's type. The arg will have a type, because we
+        // require these args to be argument registers.
+        Arg::Type type = inst.args[i].type();
+        callback(inst.args[i], Arg::Use, type, Arg::conservativeWidth(type));
+    }
+}
+
+bool CCallSpecial::isValid(Inst& inst)
+{
+    if (inst.args.size() < argArgOffset)
+        return false;
+
+    for (unsigned i = 0; i < numCalleeArgs; ++i) {
+        Arg& arg = inst.args[i + calleeArgOffset];
+        if (!arg.isGP())
+            return false;
+        switch (arg.kind()) {
+        case Arg::Imm:
+            if (is32Bit())
+                break;
+            return false;
+        case Arg::BigImm:
+            if (is64Bit())
+                break;
+            return false;
+        case Arg::Tmp:
+        case Arg::Addr:
+        case Arg::Stack:
+        case Arg::CallArg:
+            break;
+        default:
+            return false;
+        }
+    }
+
+    // Return args need to be exact.
+    if (inst.args[returnGPArgOffset + 0] != Tmp(GPRInfo::returnValueGPR))
+        return false;
+    if (inst.args[returnGPArgOffset + 1] != Tmp(GPRInfo::returnValueGPR2))
+        return false;
+    if (inst.args[returnFPArgOffset + 0] != Tmp(FPRInfo::returnValueFPR))
+        return false;
+
+    for (unsigned i = argArgOffset; i < inst.args.size(); ++i) {
+        if (!inst.args[i].isReg())
+            return false;
+
+        if (inst.args[i] == Tmp(scratchRegister))
+            return false;
+    }
+    return true;
+}
+
+bool CCallSpecial::admitsStack(Inst&, unsigned argIndex)
+{
+    // The callee can be on the stack.
+    if (argIndex == calleeArgOffset)
+        return true;
+    
+    return false;
+}
+
+void CCallSpecial::reportUsedRegisters(Inst&, const RegisterSet&)
+{
+}
+
+CCallHelpers::Jump CCallSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext&)
+{
+    switch (inst.args[calleeArgOffset].kind()) {
+    case Arg::Imm:
+    case Arg::BigImm:
+        jit.move(inst.args[calleeArgOffset].asTrustedImmPtr(), scratchRegister);
+        jit.call(scratchRegister);
+        break;
+    case Arg::Tmp:
+        jit.call(inst.args[calleeArgOffset].gpr());
+        break;
+    case Arg::Addr:
+        jit.call(inst.args[calleeArgOffset].asAddress());
+        break;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        break;
+    }
+    return CCallHelpers::Jump();
+}
+
+RegisterSet CCallSpecial::extraEarlyClobberedRegs(Inst&)
+{
+    return m_emptyRegs;
+}
+
+RegisterSet CCallSpecial::extraClobberedRegs(Inst&)
+{
+    return m_clobberedRegs;
+}
+
+void CCallSpecial::dumpImpl(PrintStream& out) const
+{
+    out.print("CCall");
+}
+
+void CCallSpecial::deepDumpImpl(PrintStream& out) const
+{
+    out.print("function call that uses the C calling convention.");
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCCallSpecial.h b/Source/JavaScriptCore/b3/air/AirCCallSpecial.h
new file mode 100644
index 000000000..ec909b9f0
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCCallSpecial.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirSpecial.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+// Use this special for constructing a C call. Arg 0 is of course a Special arg that refers to the
+// CCallSpecial object. Arg 1 is the callee, and it can be an ImmPtr, a register, or an address. The
+// next three args - arg 2, arg 3, and arg 4 - hold the return value GPRs and FPR. The remaining args
+// are just the set of argument registers used by this call. For arguments that go to the stack, you
+// have to do the grunt work of doing those stack stores. In fact, the only reason why we specify the
+// argument registers as arguments to a call is so that the liveness analysis can see that they get
+// used here. It would be wrong to automagically report all argument registers as being used because
+// if we had a call that didn't pass them, then they'd appear to be live until some clobber point or
+// the prologue, whichever happened sooner.
+
+class CCallSpecial : public Special {
+public:
+    CCallSpecial();
+    ~CCallSpecial();
+
+    // You cannot use this register to pass arguments. It just so happens that this register is not
+    // used for arguments in the C calling convention. By the way, this is the only thing that causes
+    // this special to be specific to C calls.
+    static const GPRReg scratchRegister = GPRInfo::nonArgGPR0;
+
+protected:
+    void forEachArg(Inst&, const ScopedLambda&) override;
+    bool isValid(Inst&) override;
+    bool admitsStack(Inst&, unsigned argIndex) override;
+    void reportUsedRegisters(Inst&, const RegisterSet&) override;
+    CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&) override;
+    RegisterSet extraEarlyClobberedRegs(Inst&) override;
+    RegisterSet extraClobberedRegs(Inst&) override;
+
+    void dumpImpl(PrintStream&) const override;
+    void deepDumpImpl(PrintStream&) const override;
+
+private:
+    static const unsigned specialArgOffset = 0;
+    static const unsigned numSpecialArgs = 1;
+    static const unsigned calleeArgOffset = numSpecialArgs;
+    static const unsigned numCalleeArgs = 1;
+    static const unsigned returnGPArgOffset = numSpecialArgs + numCalleeArgs;
+    static const unsigned numReturnGPArgs = 2;
+    static const unsigned returnFPArgOffset = numSpecialArgs + numCalleeArgs + numReturnGPArgs;
+    static const unsigned numReturnFPArgs = 1;
+    static const unsigned argArgOffset =
+        numSpecialArgs + numCalleeArgs + numReturnGPArgs + numReturnFPArgs;
+    
+    RegisterSet m_clobberedRegs;
+    RegisterSet m_emptyRegs;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp b/Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp
new file mode 100644
index 000000000..2b6f733bf
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirCCallingConvention.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallSpecial.h"
+#include "AirCode.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+template
+Arg marshallCCallArgumentImpl(unsigned& argumentCount, unsigned& stackOffset, Value* child)
+{
+    unsigned argumentIndex = argumentCount++;
+    if (argumentIndex < BankInfo::numberOfArgumentRegisters)
+        return Tmp(BankInfo::toArgumentRegister(argumentIndex));
+
+    unsigned slotSize;
+    if (isARM64() && isIOS()) {
+        // Arguments are packed.
+        slotSize = sizeofType(child->type());
+    } else {
+        // Arguments are aligned.
+        slotSize = 8;
+    }
+
+    stackOffset = WTF::roundUpToMultipleOf(slotSize, stackOffset);
+    Arg result = Arg::callArg(stackOffset);
+    stackOffset += slotSize;
+    return result;
+}
+
+Arg marshallCCallArgument(
+    unsigned& gpArgumentCount, unsigned& fpArgumentCount, unsigned& stackOffset, Value* child)
+{
+    switch (Arg::typeForB3Type(child->type())) {
+    case Arg::GP:
+        return marshallCCallArgumentImpl(gpArgumentCount, stackOffset, child);
+    case Arg::FP:
+        return marshallCCallArgumentImpl(fpArgumentCount, stackOffset, child);
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+    return Arg();
+}
+
+} // anonymous namespace
+
+Vector computeCCallingConvention(Code& code, CCallValue* value)
+{
+    Vector result;
+    result.append(Tmp(CCallSpecial::scratchRegister));
+    unsigned gpArgumentCount = 0;
+    unsigned fpArgumentCount = 0;
+    unsigned stackOffset = 0;
+    for (unsigned i = 1; i < value->numChildren(); ++i) {
+        result.append(
+            marshallCCallArgument(gpArgumentCount, fpArgumentCount, stackOffset, value->child(i)));
+    }
+    code.requestCallArgAreaSizeInBytes(WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset));
+    return result;
+}
+
+Tmp cCallResult(Type type)
+{
+    switch (type) {
+    case Void:
+        return Tmp();
+    case Int32:
+    case Int64:
+        return Tmp(GPRInfo::returnValueGPR);
+    case Float:
+    case Double:
+        return Tmp(FPRInfo::returnValueFPR);
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+    return Tmp();
+}
+
+Inst buildCCall(Code& code, Value* origin, const Vector& arguments)
+{
+    Inst inst(Patch, origin, Arg::special(code.cCallSpecial()));
+    inst.args.append(arguments[0]);
+    inst.args.append(Tmp(GPRInfo::returnValueGPR));
+    inst.args.append(Tmp(GPRInfo::returnValueGPR2));
+    inst.args.append(Tmp(FPRInfo::returnValueFPR));
+    for (unsigned i = 1; i < arguments.size(); ++i) {
+        Arg arg = arguments[i];
+        if (arg.isTmp())
+            inst.args.append(arg);
+    }
+    return inst;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirCCallingConvention.h b/Source/JavaScriptCore/b3/air/AirCCallingConvention.h
new file mode 100644
index 000000000..76acc29ab
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCCallingConvention.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirInst.h"
+#include "B3Type.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class CCallValue;
+
+namespace Air {
+
+class Code;
+
+Vector computeCCallingConvention(Code&, CCallValue*);
+
+Tmp cCallResult(Type);
+
+Inst buildCCall(Code&, Value* origin, const Vector&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCode.cpp b/Source/JavaScriptCore/b3/air/AirCode.cpp
new file mode 100644
index 000000000..79e2c0cf2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCode.cpp
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirCode.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallSpecial.h"
+#include "B3BasicBlockUtils.h"
+#include "B3Procedure.h"
+#include "B3StackSlot.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+Code::Code(Procedure& proc)
+    : m_proc(proc)
+    , m_lastPhaseName("initial")
+{
+    // Come up with initial orderings of registers. The user may replace this with something else.
+    Arg::forEachType(
+        [&] (Arg::Type type) {
+            Vector result;
+            RegisterSet all = type == Arg::GP ? RegisterSet::allGPRs() : RegisterSet::allFPRs();
+            all.exclude(RegisterSet::stackRegisters());
+            all.exclude(RegisterSet::reservedHardwareRegisters());
+            RegisterSet calleeSave = RegisterSet::calleeSaveRegisters();
+            all.forEach(
+                [&] (Reg reg) {
+                    if (!calleeSave.get(reg))
+                        result.append(reg);
+                });
+            all.forEach(
+                [&] (Reg reg) {
+                    if (calleeSave.get(reg))
+                        result.append(reg);
+                });
+            setRegsInPriorityOrder(type, result);
+        });
+}
+
+Code::~Code()
+{
+}
+
+void Code::setRegsInPriorityOrder(Arg::Type type, const Vector& regs)
+{
+    regsInPriorityOrderImpl(type) = regs;
+    m_mutableRegs = RegisterSet();
+    Arg::forEachType(
+        [&] (Arg::Type type) {
+            for (Reg reg : regsInPriorityOrder(type))
+                m_mutableRegs.set(reg);
+        });
+}
+
+void Code::pinRegister(Reg reg)
+{
+    Vector& regs = regsInPriorityOrderImpl(Arg(Tmp(reg)).type());
+    regs.removeFirst(reg);
+    m_mutableRegs.clear(reg);
+    ASSERT(!regs.contains(reg));
+}
+
+BasicBlock* Code::addBlock(double frequency)
+{
+    std::unique_ptr block(new BasicBlock(m_blocks.size(), frequency));
+    BasicBlock* result = block.get();
+    m_blocks.append(WTFMove(block));
+    return result;
+}
+
+StackSlot* Code::addStackSlot(unsigned byteSize, StackSlotKind kind, B3::StackSlot* b3Slot)
+{
+    return m_stackSlots.addNew(byteSize, kind, b3Slot);
+}
+
+StackSlot* Code::addStackSlot(B3::StackSlot* b3Slot)
+{
+    return addStackSlot(b3Slot->byteSize(), StackSlotKind::Locked, b3Slot);
+}
+
+Special* Code::addSpecial(std::unique_ptr special)
+{
+    special->m_code = this;
+    return m_specials.add(WTFMove(special));
+}
+
+CCallSpecial* Code::cCallSpecial()
+{
+    if (!m_cCallSpecial) {
+        m_cCallSpecial = static_cast(
+            addSpecial(std::make_unique()));
+    }
+
+    return m_cCallSpecial;
+}
+
+bool Code::isEntrypoint(BasicBlock* block) const
+{
+    if (m_entrypoints.isEmpty())
+        return !block->index();
+    
+    for (const FrequentedBlock& entrypoint : m_entrypoints) {
+        if (entrypoint.block() == block)
+            return true;
+    }
+    return false;
+}
+
+void Code::resetReachability()
+{
+    clearPredecessors(m_blocks);
+    if (m_entrypoints.isEmpty())
+        updatePredecessorsAfter(m_blocks[0].get());
+    else {
+        for (const FrequentedBlock& entrypoint : m_entrypoints)
+            updatePredecessorsAfter(entrypoint.block());
+    }
+    
+    for (auto& block : m_blocks) {
+        if (isBlockDead(block.get()) && !isEntrypoint(block.get()))
+            block = nullptr;
+    }
+}
+
+void Code::dump(PrintStream& out) const
+{
+    if (!m_entrypoints.isEmpty())
+        out.print("Entrypoints: ", listDump(m_entrypoints), "\n");
+    for (BasicBlock* block : *this)
+        out.print(deepDump(block));
+    if (stackSlots().size()) {
+        out.print("Stack slots:\n");
+        for (StackSlot* slot : stackSlots())
+            out.print("    ", pointerDump(slot), ": ", deepDump(slot), "\n");
+    }
+    if (specials().size()) {
+        out.print("Specials:\n");
+        for (Special* special : specials())
+            out.print("    ", deepDump(special), "\n");
+    }
+    if (m_frameSize)
+        out.print("Frame size: ", m_frameSize, "\n");
+    if (m_callArgAreaSize)
+        out.print("Call arg area size: ", m_callArgAreaSize, "\n");
+    if (m_calleeSaveRegisters.size())
+        out.print("Callee saves: ", m_calleeSaveRegisters, "\n");
+}
+
+unsigned Code::findFirstBlockIndex(unsigned index) const
+{
+    while (index < size() && !at(index))
+        index++;
+    return index;
+}
+
+unsigned Code::findNextBlockIndex(unsigned index) const
+{
+    return findFirstBlockIndex(index + 1);
+}
+
+BasicBlock* Code::findNextBlock(BasicBlock* block) const
+{
+    unsigned index = findNextBlockIndex(block->index());
+    if (index < size())
+        return at(index);
+    return nullptr;
+}
+
+void Code::addFastTmp(Tmp tmp)
+{
+    m_fastTmps.add(tmp);
+}
+
+void* Code::addDataSection(size_t size)
+{
+    return m_proc.addDataSection(size);
+}
+
+unsigned Code::jsHash() const
+{
+    unsigned result = 0;
+    
+    for (BasicBlock* block : *this) {
+        result *= 1000001;
+        for (Inst& inst : *block) {
+            result *= 97;
+            result += inst.jsHash();
+        }
+        for (BasicBlock* successor : block->successorBlocks()) {
+            result *= 7;
+            result += successor->index();
+        }
+    }
+    for (StackSlot* slot : stackSlots()) {
+        result *= 101;
+        result += slot->jsHash();
+    }
+    
+    return result;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCode.h b/Source/JavaScriptCore/b3/air/AirCode.h
new file mode 100644
index 000000000..6d4a14722
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCode.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirBasicBlock.h"
+#include "AirSpecial.h"
+#include "AirStackSlot.h"
+#include "AirTmp.h"
+#include "B3SparseCollection.h"
+#include "CCallHelpers.h"
+#include "RegisterAtOffsetList.h"
+#include "StackAlignment.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace Air {
+
+class BlockInsertionSet;
+class CCallSpecial;
+
+typedef void WasmBoundsCheckGeneratorFunction(CCallHelpers&, GPRReg, unsigned);
+typedef SharedTask WasmBoundsCheckGenerator;
+
+// This is an IR that is very close to the bare metal. It requires about 40x more bytes than the
+// generated machine code - for example if you're generating 1MB of machine code, you need about
+// 40MB of Air.
+
+class Code {
+    WTF_MAKE_NONCOPYABLE(Code);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    ~Code();
+
+    Procedure& proc() { return m_proc; }
+    
+    const Vector& regsInPriorityOrder(Arg::Type type) const
+    {
+        switch (type) {
+        case Arg::GP:
+            return m_gpRegsInPriorityOrder;
+        case Arg::FP:
+            return m_fpRegsInPriorityOrder;
+        }
+        ASSERT_NOT_REACHED();
+    }
+    
+    void setRegsInPriorityOrder(Arg::Type, const Vector&);
+    
+    // This is the set of registers that Air is allowed to emit code to mutate. It's derived from
+    // regsInPriorityOrder. Any registers not in this set are said to be "pinned".
+    const RegisterSet& mutableRegs() const { return m_mutableRegs; }
+    
+    bool isPinned(Reg reg) const { return !mutableRegs().get(reg); }
+    
+    void pinRegister(Reg);
+
+    JS_EXPORT_PRIVATE BasicBlock* addBlock(double frequency = 1);
+
+    // Note that you can rely on stack slots always getting indices that are larger than the index
+    // of any prior stack slot. In fact, all stack slots you create in the future will have an index
+    // that is >= stackSlots().size().
+    JS_EXPORT_PRIVATE StackSlot* addStackSlot(
+        unsigned byteSize, StackSlotKind, B3::StackSlot* = nullptr);
+    StackSlot* addStackSlot(B3::StackSlot*);
+
+    Special* addSpecial(std::unique_ptr);
+
+    // This is the special you need to make a C call!
+    CCallSpecial* cCallSpecial();
+
+    Tmp newTmp(Arg::Type type)
+    {
+        switch (type) {
+        case Arg::GP:
+            return Tmp::gpTmpForIndex(m_numGPTmps++);
+        case Arg::FP:
+            return Tmp::fpTmpForIndex(m_numFPTmps++);
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    unsigned numTmps(Arg::Type type)
+    {
+        switch (type) {
+        case Arg::GP:
+            return m_numGPTmps;
+        case Arg::FP:
+            return m_numFPTmps;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    unsigned callArgAreaSizeInBytes() const { return m_callArgAreaSize; }
+
+    // You can call this before code generation to force a minimum call arg area size.
+    void requestCallArgAreaSizeInBytes(unsigned size)
+    {
+        m_callArgAreaSize = std::max(
+            m_callArgAreaSize,
+            static_cast(WTF::roundUpToMultipleOf(stackAlignmentBytes(), size)));
+    }
+
+    unsigned frameSize() const { return m_frameSize; }
+
+    // Only phases that do stack allocation are allowed to set this. Currently, only
+    // Air::allocateStack() does this.
+    void setFrameSize(unsigned frameSize)
+    {
+        m_frameSize = frameSize;
+    }
+
+    // Note that this is not the same thing as proc().numEntrypoints(). This value here may be zero
+    // until we lower EntrySwitch.
+    unsigned numEntrypoints() const { return m_entrypoints.size(); }
+    const Vector& entrypoints() const { return m_entrypoints; }
+    const FrequentedBlock& entrypoint(unsigned index) const { return m_entrypoints[index]; }
+    bool isEntrypoint(BasicBlock*) const;
+    
+    // This is used by lowerEntrySwitch().
+    template
+    void setEntrypoints(Vector&& vector)
+    {
+        m_entrypoints = std::forward(vector);
+    }
+    
+    CCallHelpers::Label entrypointLabel(unsigned index) const
+    {
+        return m_entrypointLabels[index];
+    }
+    
+    // This is used by generate().
+    template
+    void setEntrypointLabels(Vector&& vector)
+    {
+        m_entrypointLabels = std::forward(vector);
+    }
+
+    const RegisterAtOffsetList& calleeSaveRegisters() const { return m_calleeSaveRegisters; }
+    RegisterAtOffsetList& calleeSaveRegisters() { return m_calleeSaveRegisters; }
+
+    // Recomputes predecessors and deletes unreachable blocks.
+    void resetReachability();
+
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+    unsigned size() const { return m_blocks.size(); }
+    BasicBlock* at(unsigned index) const { return m_blocks[index].get(); }
+    BasicBlock* operator[](unsigned index) const { return at(index); }
+
+    // This is used by phases that optimize the block list. You shouldn't use this unless you really know
+    // what you're doing.
+    Vector>& blockList() { return m_blocks; }
+
+    // Finds the smallest index' such that at(index') != null and index' >= index.
+    JS_EXPORT_PRIVATE unsigned findFirstBlockIndex(unsigned index) const;
+
+    // Finds the smallest index' such that at(index') != null and index' > index.
+    unsigned findNextBlockIndex(unsigned index) const;
+
+    BasicBlock* findNextBlock(BasicBlock*) const;
+
+    class iterator {
+    public:
+        iterator()
+            : m_code(nullptr)
+            , m_index(0)
+        {
+        }
+
+        iterator(const Code& code, unsigned index)
+            : m_code(&code)
+            , m_index(m_code->findFirstBlockIndex(index))
+        {
+        }
+
+        BasicBlock* operator*()
+        {
+            return m_code->at(m_index);
+        }
+
+        iterator& operator++()
+        {
+            m_index = m_code->findFirstBlockIndex(m_index + 1);
+            return *this;
+        }
+
+        bool operator==(const iterator& other) const
+        {
+            return m_index == other.m_index;
+        }
+
+        bool operator!=(const iterator& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        const Code* m_code;
+        unsigned m_index;
+    };
+
+    iterator begin() const { return iterator(*this, 0); }
+    iterator end() const { return iterator(*this, size()); }
+
+    const SparseCollection& stackSlots() const { return m_stackSlots; }
+    SparseCollection& stackSlots() { return m_stackSlots; }
+
+    const SparseCollection& specials() const { return m_specials; }
+    SparseCollection& specials() { return m_specials; }
+
+    template
+    void forAllTmps(const Callback& callback) const
+    {
+        for (unsigned i = m_numGPTmps; i--;)
+            callback(Tmp::gpTmpForIndex(i));
+        for (unsigned i = m_numFPTmps; i--;)
+            callback(Tmp::fpTmpForIndex(i));
+    }
+
+    void addFastTmp(Tmp);
+    bool isFastTmp(Tmp tmp) const { return m_fastTmps.contains(tmp); }
+    
+    void* addDataSection(size_t);
+    
+    // The name has to be a string literal, since we don't do any memory management for the string.
+    void setLastPhaseName(const char* name)
+    {
+        m_lastPhaseName = name;
+    }
+
+    const char* lastPhaseName() const { return m_lastPhaseName; }
+
+    void setWasmBoundsCheckGenerator(RefPtr generator)
+    {
+        m_wasmBoundsCheckGenerator = generator;
+    }
+
+    RefPtr wasmBoundsCheckGenerator() const { return m_wasmBoundsCheckGenerator; }
+
+    // This is a hash of the code. You can use this if you want to put code into a hashtable, but
+    // it's mainly for validating the results from JSAir.
+    unsigned jsHash() const;
+
+private:
+    friend class ::JSC::B3::Procedure;
+    friend class BlockInsertionSet;
+    
+    Code(Procedure&);
+
+    Vector& regsInPriorityOrderImpl(Arg::Type type)
+    {
+        switch (type) {
+        case Arg::GP:
+            return m_gpRegsInPriorityOrder;
+        case Arg::FP:
+            return m_fpRegsInPriorityOrder;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    Procedure& m_proc; // Some meta-data, like byproducts, is stored in the Procedure.
+    Vector m_gpRegsInPriorityOrder;
+    Vector m_fpRegsInPriorityOrder;
+    RegisterSet m_mutableRegs;
+    SparseCollection m_stackSlots;
+    Vector> m_blocks;
+    SparseCollection m_specials;
+    HashSet m_fastTmps;
+    CCallSpecial* m_cCallSpecial { nullptr };
+    unsigned m_numGPTmps { 0 };
+    unsigned m_numFPTmps { 0 };
+    unsigned m_frameSize { 0 };
+    unsigned m_callArgAreaSize { 0 };
+    RegisterAtOffsetList m_calleeSaveRegisters;
+    Vector m_entrypoints; // This is empty until after lowerEntrySwitch().
+    Vector m_entrypointLabels; // This is empty until code generation.
+    RefPtr m_wasmBoundsCheckGenerator;
+    const char* m_lastPhaseName;
+};
+
+} } } // namespace JSC::B3::Air
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCustom.cpp b/Source/JavaScriptCore/b3/air/AirCustom.cpp
new file mode 100644
index 000000000..2a2df2fbd
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCustom.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirCustom.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirInstInlines.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool PatchCustom::isValidForm(Inst& inst)
+{
+    if (inst.args.size() < 1)
+        return false;
+    if (!inst.args[0].isSpecial())
+        return false;
+    if (!inst.args[0].special()->isValid(inst))
+        return false;
+    RegisterSet clobberedEarly = inst.extraEarlyClobberedRegs();
+    RegisterSet clobberedLate = inst.extraClobberedRegs();
+    bool ok = true;
+    inst.forEachTmp(
+        [&] (Tmp& tmp, Arg::Role role, Arg::Type, Arg::Width) {
+            if (!tmp.isReg())
+                return;
+            if (Arg::isLateDef(role) || Arg::isLateUse(role))
+                ok &= !clobberedLate.get(tmp.reg());
+            else
+                ok &= !clobberedEarly.get(tmp.reg());
+        });
+    return ok;
+}
+
+bool CCallCustom::isValidForm(Inst& inst)
+{
+    CCallValue* value = inst.origin->as();
+    if (!value)
+        return false;
+
+    if (inst.args.size() != (value->type() == Void ? 0 : 1) + value->numChildren())
+        return false;
+
+    // The arguments can only refer to the stack, tmps, or immediates.
+    for (Arg& arg : inst.args) {
+        if (!arg.isTmp() && !arg.isStackMemory() && !arg.isSomeImm())
+            return false;
+    }
+
+    unsigned offset = 0;
+
+    if (!inst.args[0].isGP())
+        return false;
+
+    // If there is a result then it cannot be an immediate.
+    if (value->type() != Void) {
+        if (inst.args[1].isSomeImm())
+            return false;
+        if (!inst.args[1].canRepresent(value))
+            return false;
+        offset++;
+    }
+
+    for (unsigned i = value->numChildren(); i-- > 1;) {
+        Value* child = value->child(i);
+        Arg arg = inst.args[offset + i];
+        if (!arg.canRepresent(child))
+            return false;
+    }
+
+    return true;
+}
+
+CCallHelpers::Jump CCallCustom::generate(Inst& inst, CCallHelpers&, GenerationContext&)
+{
+    dataLog("FATAL: Unlowered C call: ", inst, "\n");
+    UNREACHABLE_FOR_PLATFORM();
+    return CCallHelpers::Jump();
+}
+
+bool ShuffleCustom::isValidForm(Inst& inst)
+{
+    if (inst.args.size() % 3)
+        return false;
+
+    // A destination may only appear once. This requirement allows us to avoid the undefined behavior
+    // of having a destination that is supposed to get multiple inputs simultaneously. It also
+    // imposes some interesting constraints on the "shape" of the shuffle. If we treat a shuffle pair
+    // as an edge and the Args as nodes, then the single-destination requirement means that the
+    // shuffle graph consists of two kinds of subgraphs:
+    //
+    // - Spanning trees. We call these shifts. They can be executed as a sequence of Move
+    //   instructions and don't usually require scratch registers.
+    //
+    // - Closed loops. These loops consist of nodes that have one successor and one predecessor, so
+    //   there is no way to "get into" the loop from outside of it. These can be executed using swaps
+    //   or by saving one of the Args to a scratch register and executing it as a shift.
+    HashSet dsts;
+
+    for (unsigned i = 0; i < inst.args.size(); ++i) {
+        Arg arg = inst.args[i];
+        unsigned mode = i % 3;
+
+        if (mode == 2) {
+            // It's the width.
+            if (!arg.isWidthArg())
+                return false;
+            continue;
+        }
+
+        // The source can be an immediate.
+        if (!mode) {
+            if (arg.isSomeImm())
+                continue;
+
+            if (!arg.isCompatibleType(inst.args[i + 1]))
+                return false;
+        } else {
+            ASSERT(mode == 1);
+            if (!dsts.add(arg).isNewEntry)
+                return false;
+        }
+
+        if (arg.isTmp() || arg.isMemory())
+            continue;
+
+        return false;
+    }
+
+    // No destination register may appear in any address expressions. The lowering can't handle it
+    // and it's not useful for the way we end up using Shuffles. Normally, Shuffles only used for
+    // stack addresses and non-stack registers.
+    for (Arg& arg : inst.args) {
+        if (!arg.isMemory())
+            continue;
+        bool ok = true;
+        arg.forEachTmpFast(
+            [&] (Tmp tmp) {
+                if (dsts.contains(tmp))
+                    ok = false;
+            });
+        if (!ok)
+            return false;
+    }
+
+    return true;
+}
+
+CCallHelpers::Jump ShuffleCustom::generate(Inst& inst, CCallHelpers&, GenerationContext&)
+{
+    dataLog("FATAL: Unlowered shuffle: ", inst, "\n");
+    UNREACHABLE_FOR_PLATFORM();
+    return CCallHelpers::Jump();
+}
+
+bool WasmBoundsCheckCustom::isValidForm(Inst& inst)
+{
+    if (inst.args.size() != 2)
+        return false;
+    if (!inst.args[0].isTmp() && !inst.args[0].isSomeImm())
+        return false;
+
+    return inst.args[1].isReg();
+}
+
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirCustom.h b/Source/JavaScriptCore/b3/air/AirCustom.h
new file mode 100644
index 000000000..cddc03857
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCustom.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "AirInst.h"
+#include "AirSpecial.h"
+#include "B3ValueInlines.h"
+#include "B3WasmBoundsCheckValue.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+// This defines the behavior of custom instructions - i.e. those whose behavior cannot be
+// described using AirOpcode.opcodes. If you define an opcode as "custom Foo" in that file, then
+// you will need to create a "struct FooCustom" here that implements the custom behavior
+// methods.
+//
+// The customizability granted by the custom instruction mechanism is strictly less than what
+// you get using the Patch instruction and implementing a Special. However, that path requires
+// allocating a Special object and ensuring that it's the first operand. For many instructions,
+// that is not as convenient as using Custom, which makes the instruction look like any other
+// instruction. Note that both of those extra powers of the Patch instruction happen because we
+// special-case that instruction in many phases and analyses. Non-special-cased behaviors of
+// Patch are implemented using the custom instruction mechanism.
+//
+// Specials are still more flexible if you need to list extra clobbered registers and you'd like
+// that to be expressed as a bitvector rather than an arglist. They are also more flexible if
+// you need to carry extra state around with the instruction. Also, Specials mean that you
+// always have access to Code& even in methods that don't take a GenerationContext.
+
+// Definition of Patch instruction. Patch is used to delegate the behavior of the instruction to the
+// Special object, which will be the first argument to the instruction.
+struct PatchCustom {
+    template
+    static void forEachArg(Inst& inst, const Functor& functor)
+    {
+        // This is basically bogus, but it works for analyses that model Special as an
+        // immediate.
+        functor(inst.args[0], Arg::Use, Arg::GP, Arg::pointerWidth());
+        
+        inst.args[0].special()->forEachArg(inst, scopedLambda(functor));
+    }
+
+    template
+    static bool isValidFormStatic(Arguments...)
+    {
+        return false;
+    }
+
+    static bool isValidForm(Inst& inst);
+
+    static bool admitsStack(Inst& inst, unsigned argIndex)
+    {
+        if (!argIndex)
+            return false;
+        return inst.args[0].special()->admitsStack(inst, argIndex);
+    }
+
+    static std::optional shouldTryAliasingDef(Inst& inst)
+    {
+        return inst.args[0].special()->shouldTryAliasingDef(inst);
+    }
+    
+    static bool isTerminal(Inst& inst)
+    {
+        return inst.args[0].special()->isTerminal(inst);
+    }
+
+    static bool hasNonArgEffects(Inst& inst)
+    {
+        return inst.args[0].special()->hasNonArgEffects(inst);
+    }
+
+    static bool hasNonArgNonControlEffects(Inst& inst)
+    {
+        return inst.args[0].special()->hasNonArgNonControlEffects(inst);
+    }
+
+    static CCallHelpers::Jump generate(
+        Inst& inst, CCallHelpers& jit, GenerationContext& context)
+    {
+        return inst.args[0].special()->generate(inst, jit, context);
+    }
+};
+
+template
+struct CommonCustomBase {
+    static bool hasNonArgEffects(Inst& inst)
+    {
+        return Subtype::isTerminal(inst) || Subtype::hasNonArgNonControlEffects(inst);
+    }
+};
+
+// Definition of CCall instruction. CCall is used for hot path C function calls. It's lowered to a
+// Patch with an Air CCallSpecial along with code to marshal instructions. The lowering happens
+// before register allocation, so that the register allocator sees the clobbers.
+struct CCallCustom : public CommonCustomBase {
+    template
+    static void forEachArg(Inst& inst, const Functor& functor)
+    {
+        Value* value = inst.origin;
+
+        unsigned index = 0;
+
+        functor(inst.args[index++], Arg::Use, Arg::GP, Arg::pointerWidth()); // callee
+        
+        if (value->type() != Void) {
+            functor(
+                inst.args[index++], Arg::Def,
+                Arg::typeForB3Type(value->type()),
+                Arg::widthForB3Type(value->type()));
+        }
+
+        for (unsigned i = 1; i < value->numChildren(); ++i) {
+            Value* child = value->child(i);
+            functor(
+                inst.args[index++], Arg::Use,
+                Arg::typeForB3Type(child->type()),
+                Arg::widthForB3Type(child->type()));
+        }
+    }
+
+    template
+    static bool isValidFormStatic(Arguments...)
+    {
+        return false;
+    }
+
+    static bool isValidForm(Inst&);
+
+    static bool admitsStack(Inst&, unsigned)
+    {
+        return true;
+    }
+    
+    static bool isTerminal(Inst&)
+    {
+        return false;
+    }
+
+    static bool hasNonArgNonControlEffects(Inst&)
+    {
+        return true;
+    }
+
+    // This just crashes, since we expect C calls to be lowered before generation.
+    static CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&);
+};
+
+struct ColdCCallCustom : CCallCustom {
+    template
+    static void forEachArg(Inst& inst, const Functor& functor)
+    {
+        // This is just like a call, but uses become cold.
+        CCallCustom::forEachArg(
+            inst,
+            [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+                functor(arg, Arg::cooled(role), type, width);
+            });
+    }
+};
+
+struct ShuffleCustom : public CommonCustomBase {
+    template
+    static void forEachArg(Inst& inst, const Functor& functor)
+    {
+        unsigned limit = inst.args.size() / 3 * 3;
+        for (unsigned i = 0; i < limit; i += 3) {
+            Arg& src = inst.args[i + 0];
+            Arg& dst = inst.args[i + 1];
+            Arg& widthArg = inst.args[i + 2];
+            Arg::Width width = widthArg.width();
+            Arg::Type type = src.isGP() && dst.isGP() ? Arg::GP : Arg::FP;
+            functor(src, Arg::Use, type, width);
+            functor(dst, Arg::Def, type, width);
+            functor(widthArg, Arg::Use, Arg::GP, Arg::Width8);
+        }
+    }
+
+    template
+    static bool isValidFormStatic(Arguments...)
+    {
+        return false;
+    }
+
+    static bool isValidForm(Inst&);
+    
+    static bool admitsStack(Inst&, unsigned index)
+    {
+        switch (index % 3) {
+        case 0:
+        case 1:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    static bool isTerminal(Inst&)
+    {
+        return false;
+    }
+
+    static bool hasNonArgNonControlEffects(Inst&)
+    {
+        return false;
+    }
+
+    static CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&);
+};
+
+struct EntrySwitchCustom : public CommonCustomBase {
+    template
+    static void forEachArg(Inst&, const Func&)
+    {
+    }
+    
+    template
+    static bool isValidFormStatic(Arguments...)
+    {
+        return !sizeof...(Arguments);
+    }
+    
+    static bool isValidForm(Inst& inst)
+    {
+        return inst.args.isEmpty();
+    }
+    
+    static bool admitsStack(Inst&, unsigned)
+    {
+        return false;
+    }
+    
+    static bool isTerminal(Inst&)
+    {
+        return true;
+    }
+    
+    static bool hasNonArgNonControlEffects(Inst&)
+    {
+        return false;
+    }
+
+    static CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&)
+    {
+        // This should never be reached because we should have lowered EntrySwitch before
+        // generation.
+        UNREACHABLE_FOR_PLATFORM();
+        return CCallHelpers::Jump();
+    }
+};
+
+struct WasmBoundsCheckCustom : public CommonCustomBase {
+    template
+    static void forEachArg(Inst& inst, const Func& functor)
+    {
+        functor(inst.args[0], Arg::Use, Arg::GP, Arg::Width64);
+        functor(inst.args[1], Arg::Use, Arg::GP, Arg::Width64);
+    }
+
+    template
+    static bool isValidFormStatic(Arguments...)
+    {
+        return false;
+    }
+
+    static bool isValidForm(Inst&);
+
+    static bool admitsStack(Inst&, unsigned)
+    {
+        return false;
+    }
+
+    static bool isTerminal(Inst&)
+    {
+        return false;
+    }
+    
+    static bool hasNonArgNonControlEffects(Inst&)
+    {
+        return true;
+    }
+
+    static CCallHelpers::Jump generate(Inst& inst, CCallHelpers& jit, GenerationContext& context)
+    {
+        WasmBoundsCheckValue* value = inst.origin->as();
+        CCallHelpers::Jump outOfBounds = Inst(Air::Branch64, value, Arg::relCond(CCallHelpers::AboveOrEqual), inst.args[0], inst.args[1]).generate(jit, context);
+
+        context.latePaths.append(createSharedTask(
+            [outOfBounds, value] (CCallHelpers& jit, Air::GenerationContext& context) {
+                outOfBounds.link(&jit);
+                context.code->wasmBoundsCheckGenerator()->run(jit, value->pinnedGPR(), value->offset());
+            }));
+
+        // We said we were not a terminal.
+        return CCallHelpers::Jump();
+    }
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirDumpAsJS.cpp b/Source/JavaScriptCore/b3/air/AirDumpAsJS.cpp
new file mode 100644
index 000000000..3d8d6fb41
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirDumpAsJS.cpp
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirDumpAsJS.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+CString varNameForBlockAtIndex(unsigned index)
+{
+    return toCString("bb", index);
+}
+
+CString varName(BasicBlock* block)
+{
+    return varNameForBlockAtIndex(block->index());
+}
+
+CString varNameForStackSlotAtIndex(unsigned index)
+{
+    return toCString("slot", index);
+}
+
+CString varName(StackSlot* slot)
+{
+    return varNameForStackSlotAtIndex(slot->index());
+}
+
+CString varName(Reg reg)
+{
+    return toCString("Reg.", reg.debugName());
+}
+
+CString varNameForTmpWithTypeAndIndex(Arg::Type type, unsigned index)
+{
+    return toCString(type == Arg::FP ? "f" : "", "tmp", index);
+}
+
+CString varName(Tmp tmp)
+{
+    if (tmp.isReg())
+        return varName(tmp.reg());
+    return varNameForTmpWithTypeAndIndex(Arg(tmp).type(), tmp.tmpIndex());
+}
+
+} // anonymous namespace
+
+void dumpAsJS(Code& code, PrintStream& out)
+{
+    out.println("let code = new Code();");
+    
+    for (unsigned i = 0; i < code.size(); ++i)
+        out.println("let ", varNameForBlockAtIndex(i), " = code.addBlock();");
+    
+    out.println("let hash;");
+
+    for (unsigned i = 0; i < code.stackSlots().size(); ++i) {
+        StackSlot* slot = code.stackSlots()[i];
+        if (slot) {
+            out.println("let ", varName(slot), " = code.addStackSlot(", slot->byteSize(), ", ", slot->kind(), ");");
+            if (slot->offsetFromFP())
+                out.println(varName(slot), ".setOffsetFromFP(", slot->offsetFromFP(), ");");
+            out.println("hash = ", varName(slot), ".hash();");
+            out.println("if (hash != ", slot->jsHash(), ")");
+            out.println("    throw new Error(\"Bad hash: \" + hash);");
+        } else
+            out.println("code.addStackSlot(1, Spill);");
+    }
+    
+    Arg::forEachType(
+        [&] (Arg::Type type) {
+            for (unsigned i = code.numTmps(type); i--;) {
+                out.println(
+                    "let ", varNameForTmpWithTypeAndIndex(type, i), " = code.newTmp(", type, ");");
+            }
+        });
+    
+    out.println("let inst;");
+    out.println("let arg;");
+    
+    for (BasicBlock* block : code) {
+        for (FrequentedBlock successor : block->successors()) {
+            out.println(
+                varName(block), ".successors.push(new FrequentedBlock(",
+                varName(successor.block()), ", ", successor.frequency(), "));");
+        }
+        
+        for (BasicBlock* predecessor : block->predecessors())
+            out.println(varName(block), ".predecessors.push(", varName(predecessor), ");");
+        
+        for (Inst& inst : *block) {
+            // FIXME: This should do something for flags.
+            // https://bugs.webkit.org/show_bug.cgi?id=162751
+            out.println("inst = new Inst(", inst.kind.opcode, ");");
+            
+            inst.forEachArg(
+                [&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+                    switch (arg.kind()) {
+                    case Arg::Invalid:
+                        RELEASE_ASSERT_NOT_REACHED();
+                        break;
+                        
+                    case Arg::Tmp:
+                        out.println("arg = Arg.createTmp(", varName(arg.tmp()), ");");
+                        break;
+                        
+                    case Arg::Imm:
+                        out.println("arg = Arg.createImm(", arg.value(), ");");
+                        break;
+                        
+                    case Arg::BigImm:
+                        out.println(
+                            "arg = Arg.createBigImm(",
+                            static_cast(arg.value()), ", ",
+                            static_cast(arg.value() >> 32), ");");
+                        break;
+                        
+                    case Arg::BitImm:
+                        out.println("arg = Arg.createBitImm(", arg.value(), ");");
+                        break;
+                        
+                    case Arg::BitImm64:
+                        out.println(
+                            "arg = Arg.createBitImm64(",
+                            static_cast(arg.value()), ", ",
+                            static_cast(arg.value() >> 32), ");");
+                        break;
+                        
+                    case Arg::Addr:
+                        out.println(
+                            "arg = Arg.createAddr(", varName(arg.base()), ", ", arg.offset(), ");");
+                        break;
+                        
+                    case Arg::Stack:
+                        out.println(
+                            "arg = Arg.createStack(", varName(arg.stackSlot()), ", ", arg.offset(), ");");
+                        break;
+                        
+                    case Arg::CallArg:
+                        out.println("arg = Arg.createCallArg(", arg.offset(), ");");
+                        break;
+                        
+                    case Arg::Index:
+                        out.println(
+                            "arg = Arg.createIndex(", varName(arg.base()), ", ",
+                            varName(arg.index()), ", ", arg.scale(), ", ", arg.offset(), ");");
+                        break;
+                        
+                    case Arg::RelCond:
+                        out.println("arg = Arg.createRelCond(", arg.asRelationalCondition(), ");");
+                        break;
+                        
+                    case Arg::ResCond:
+                        out.println("arg = Arg.createResCond(", arg.asResultCondition(), ");");
+                        break;
+                        
+                    case Arg::DoubleCond:
+                        out.println("arg = Arg.createDoubleCond(", arg.asDoubleCondition(), ");");
+                        break;
+                        
+                    case Arg::Special:
+                        out.println("arg = Arg.createSpecial();");
+                        break;
+                        
+                    case Arg::WidthArg:
+                        out.println("arg = Arg.createWidthArg(", arg.width(), ");");
+                        break;
+                    }
+                    
+                    out.println("inst.args.push(arg);");
+                });
+            
+            if (inst.kind.opcode == Patch) {
+                if (inst.hasNonArgEffects())
+                    out.println("inst.patchHasNonArgEffects = true;");
+                
+                out.println("inst.extraEarlyClobberedRegs = new Set();");
+                out.println("inst.extraClobberedRegs = new Set();");
+                inst.extraEarlyClobberedRegs().forEach(
+                    [&] (Reg reg) {
+                        out.println("inst.extraEarlyClobberedRegs.add(", varName(reg), ");");
+                    });
+                inst.extraClobberedRegs().forEach(
+                    [&] (Reg reg) {
+                        out.println("inst.extraClobberedRegs.add(", varName(reg), ");");
+                    });
+                
+                out.println("inst.patchArgData = [];");
+                inst.forEachArg(
+                    [&] (Arg&, Arg::Role role, Arg::Type type, Arg::Width width) {
+                        out.println(
+                            "inst.patchArgData.push({role: Arg.", role, ", type: ", type,
+                            ", width: ", width, "});");
+                    });
+            }
+            
+            if (inst.kind.opcode == CCall || inst.kind.opcode == ColdCCall) {
+                out.println("inst.cCallType = ", inst.origin->type());
+                out.println("inst.cCallArgTypes = [];");
+                for (unsigned i = 1; i < inst.origin->numChildren(); ++i)
+                    out.println("inst.cCallArgTypes.push(", inst.origin->child(i)->type(), ");");
+            }
+            
+            out.println("hash = inst.hash();");
+            out.println("if (hash != ", inst.jsHash(), ")");
+            out.println("    throw new Error(\"Bad hash: \" + hash);");
+            
+            out.println(varName(block), ".append(inst);");
+        }
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirDumpAsJS.h b/Source/JavaScriptCore/b3/air/AirDumpAsJS.h
new file mode 100644
index 000000000..8895f5801
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirDumpAsJS.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is used for benchmarking. Various operations on Air are interesting from a benchmarking
+// standpoint. We can write some Air phases in JS and then use that to benchmark JS. The benchmark
+// is called JSAir, and it's in PerformanceTests/JSAir.
+void dumpAsJS(Code&, PrintStream&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.cpp b/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.cpp
new file mode 100644
index 000000000..ca36af93e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirEliminateDeadCode.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool eliminateDeadCode(Code& code)
+{
+    PhaseScope phaseScope(code, "eliminateDeadCode");
+
+    HashSet liveTmps;
+    IndexSet liveStackSlots;
+    bool changed;
+
+    auto isArgLive = [&] (const Arg& arg) -> bool {
+        switch (arg.kind()) {
+        case Arg::Tmp:
+            if (arg.isReg())
+                return true;
+            return liveTmps.contains(arg.tmp());
+        case Arg::Stack:
+            if (arg.stackSlot()->isLocked())
+                return true;
+            return liveStackSlots.contains(arg.stackSlot());
+        default:
+            return true;
+        }
+    };
+
+    auto addLiveArg = [&] (const Arg& arg) -> bool {
+        switch (arg.kind()) {
+        case Arg::Tmp:
+            if (arg.isReg())
+                return false;
+            return liveTmps.add(arg.tmp()).isNewEntry;
+        case Arg::Stack:
+            if (arg.stackSlot()->isLocked())
+                return false;
+            return liveStackSlots.add(arg.stackSlot());
+        default:
+            return false;
+        }
+    };
+
+    auto isInstLive = [&] (Inst& inst) -> bool {
+        if (inst.hasNonArgEffects())
+            return true;
+
+        // This instruction should be presumed dead, if its Args are all dead.
+        bool storesToLive = false;
+        inst.forEachArg(
+            [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
+                if (!Arg::isAnyDef(role))
+                    return;
+                if (role == Arg::Scratch)
+                    return;
+                storesToLive |= isArgLive(arg);
+            });
+        return storesToLive;
+    };
+
+    auto handleInst = [&] (Inst& inst) {
+        if (!isInstLive(inst))
+            return;
+
+        // We get here if the Inst is live. For simplicity we say that a live instruction forces
+        // liveness upon everything it mentions.
+        for (Arg& arg : inst.args) {
+            changed |= addLiveArg(arg);
+            arg.forEachTmpFast(
+                [&] (Tmp& tmp) {
+                    changed |= addLiveArg(tmp);
+                });
+        }
+    };
+
+    auto runForward = [&] () -> bool {
+        changed = false;
+        for (BasicBlock* block : code) {
+            for (Inst& inst : *block)
+                handleInst(inst);
+        }
+        return changed;
+    };
+
+    auto runBackward = [&] () -> bool {
+        changed = false;
+        for (unsigned blockIndex = code.size(); blockIndex--;) {
+            BasicBlock* block = code[blockIndex];
+            for (unsigned instIndex = block->size(); instIndex--;)
+                handleInst(block->at(instIndex));
+        }
+        return changed;
+    };
+
+    for (;;) {
+        // Propagating backward is most likely to be profitable.
+        if (!runBackward())
+            break;
+        if (!runBackward())
+            break;
+
+        // Occasionally propagating forward greatly reduces the likelihood of pathologies.
+        if (!runForward())
+            break;
+    }
+
+    unsigned removedInstCount = 0;
+    for (BasicBlock* block : code) {
+        removedInstCount += block->insts().removeAllMatching(
+            [&] (Inst& inst) -> bool {
+                return !isInstLive(inst);
+            });
+    }
+
+    return !!removedInstCount;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.h b/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.h
new file mode 100644
index 000000000..1b718f63d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This eliminates instructions that have no observable effect. These are instructions whose only
+// effect would be storing to some Arg, except that we proved that the location specified by the Arg
+// is never loaded from. The only Args for which we can do such analysis are non-Reg Tmps and
+// anonymous StackSlots.
+
+bool eliminateDeadCode(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirEmitShuffle.cpp b/Source/JavaScriptCore/b3/air/AirEmitShuffle.cpp
new file mode 100644
index 000000000..318471976
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirEmitShuffle.cpp
@@ -0,0 +1,543 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirEmitShuffle.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool verbose = false;
+
+template
+Tmp findPossibleScratch(Code& code, Arg::Type type, const Functor& functor) {
+    for (Reg reg : code.regsInPriorityOrder(type)) {
+        Tmp tmp(reg);
+        if (functor(tmp))
+            return tmp;
+    }
+    return Tmp();
+}
+
+Tmp findPossibleScratch(Code& code, Arg::Type type, const Arg& arg1, const Arg& arg2) {
+    return findPossibleScratch(
+        code, type,
+        [&] (Tmp tmp) -> bool {
+            return !arg1.usesTmp(tmp) && !arg2.usesTmp(tmp);
+        });
+}
+
+// Example: (a => b, b => a, a => c, b => d)
+struct Rotate {
+    Vector loop; // in the example, this is the loop: (a => b, b => a)
+    Vector fringe; // in the example, these are the associated shifts: (a => c, b => d)
+};
+
+} // anonymous namespace
+
+void ShufflePair::dump(PrintStream& out) const
+{
+    out.print(width(), ":", src(), "=>", dst());
+}
+
+Inst createShuffle(Value* origin, const Vector& pairs)
+{
+    Inst result(Shuffle, origin);
+    for (const ShufflePair& pair : pairs)
+        result.append(pair.src(), pair.dst(), Arg::widthArg(pair.width()));
+    return result;
+}
+
+Vector emitShuffle(
+    Code& code, Vector pairs, std::array scratches, Arg::Type type,
+    Value* origin)
+{
+    if (verbose) {
+        dataLog(
+            "Dealing with pairs: ", listDump(pairs), " and scratches ", scratches[0], ", ",
+            scratches[1], "\n");
+    }
+    
+    pairs.removeAllMatching(
+        [&] (const ShufflePair& pair) -> bool {
+            return pair.src() == pair.dst();
+        });
+    
+    // First validate that this is the kind of shuffle that we know how to deal with.
+#if !ASSERT_DISABLED
+    for (const ShufflePair& pair : pairs) {
+        ASSERT(pair.src().isType(type));
+        ASSERT(pair.dst().isType(type));
+        ASSERT(pair.dst().isTmp() || pair.dst().isMemory());
+    }
+#endif // !ASSERT_DISABLED
+
+    // There are two possible kinds of operations that we will do:
+    //
+    // - Shift. Example: (a => b, b => c). We emit this as "Move b, c; Move a, b". This only requires
+    //   scratch registers if there are memory->memory moves. We want to find as many of these as
+    //   possible because they are cheaper. Note that shifts can involve the same source mentioned
+    //   multiple times. Example: (a => b, a => c, b => d, b => e).
+    //
+    // - Rotate. Example: (a => b, b => a). We want to emit this as "Swap a, b", but that instruction
+    //   may not be available, in which case we may need a scratch register or a scratch memory
+    //   location. A gnarlier example is (a => b, b => c, c => a). We can emit this as "Swap b, c;
+    //   Swap a, b". Note that swapping has to be careful about differing widths.
+    //
+    // Note that a rotate can have "fringe". For example, we might have (a => b, b => a, a =>c,
+    // b => d). This has a rotate loop (a => b, b => a) and some fringe (a => c, b => d). We treat
+    // the whole thing as a single rotate.
+    //
+    // We will find multiple disjoint such operations. We can execute them in any order.
+
+    // We interpret these as Moves that should be executed backwards. All shifts are keyed by their
+    // starting source.
+    HashMap> shifts;
+
+    // We interpret these as Swaps over src()'s that should be executed backwards, i.e. for a list
+    // of size 3 we would do "Swap list[1].src(), list[2].src(); Swap list[0].src(), list[1].src()".
+    // Note that we actually can't do that if the widths don't match or other bad things happen.
+    // But, prior to executing all of that, we need to execute the fringe: the shifts comming off the
+    // rotate.
+    Vector rotates;
+
+    {
+        HashMap> mapping;
+        for (const ShufflePair& pair : pairs)
+            mapping.add(pair.src(), Vector()).iterator->value.append(pair);
+
+        Vector currentPairs;
+
+        while (!mapping.isEmpty()) {
+            ASSERT(currentPairs.isEmpty());
+            Arg originalSrc = mapping.begin()->key;
+            ASSERT(!shifts.contains(originalSrc));
+            if (verbose)
+                dataLog("Processing from ", originalSrc, "\n");
+            
+            GraphNodeWorklist worklist;
+            worklist.push(originalSrc);
+            while (Arg src = worklist.pop()) {
+                HashMap>::iterator iter = mapping.find(src);
+                if (iter == mapping.end()) {
+                    // With a shift it's possible that we previously built the tail of this shift.
+                    // See if that's the case now.
+                    if (verbose)
+                        dataLog("Trying to append shift at ", src, "\n");
+                    currentPairs.appendVector(shifts.take(src));
+                    continue;
+                }
+                Vector pairs = WTFMove(iter->value);
+                mapping.remove(iter);
+
+                for (const ShufflePair& pair : pairs) {
+                    currentPairs.append(pair);
+                    ASSERT(pair.src() == src);
+                    worklist.push(pair.dst());
+                }
+            }
+
+            ASSERT(currentPairs.size());
+            ASSERT(currentPairs[0].src() == originalSrc);
+
+            if (verbose)
+                dataLog("currentPairs = ", listDump(currentPairs), "\n");
+
+            bool isRotate = false;
+            for (const ShufflePair& pair : currentPairs) {
+                if (pair.dst() == originalSrc) {
+                    isRotate = true;
+                    break;
+                }
+            }
+
+            if (isRotate) {
+                if (verbose)
+                    dataLog("It's a rotate.\n");
+                Rotate rotate;
+
+                // The common case is that the rotate does not have fringe. The only way to
+                // check for this is to examine the whole rotate.
+                bool ok;
+                if (currentPairs.last().dst() == originalSrc) {
+                    ok = true;
+                    for (unsigned i = currentPairs.size() - 1; i--;)
+                        ok &= currentPairs[i].dst() == currentPairs[i + 1].src();
+                } else
+                    ok = false;
+                
+                if (ok)
+                    rotate.loop = WTFMove(currentPairs);
+                else {
+                    // This is the slow path. The rotate has fringe.
+                    
+                    HashMap dstMapping;
+                    for (const ShufflePair& pair : currentPairs)
+                        dstMapping.add(pair.dst(), pair);
+
+                    ShufflePair pair = dstMapping.take(originalSrc);
+                    for (;;) {
+                        rotate.loop.append(pair);
+
+                        auto iter = dstMapping.find(pair.src());
+                        if (iter == dstMapping.end())
+                            break;
+                        pair = iter->value;
+                        dstMapping.remove(iter);
+                    }
+
+                    rotate.loop.reverse();
+
+                    // Make sure that the fringe appears in the same order as how it appeared in the
+                    // currentPairs, since that's the DFS order.
+                    for (const ShufflePair& pair : currentPairs) {
+                        // But of course we only include it if it's not in the loop.
+                        if (dstMapping.contains(pair.dst()))
+                            rotate.fringe.append(pair);
+                    }
+                }
+                
+                // If the graph search terminates because we returned to the first source, then the
+                // pair list has to have a very particular shape.
+                for (unsigned i = rotate.loop.size() - 1; i--;)
+                    ASSERT(rotate.loop[i].dst() == rotate.loop[i + 1].src());
+                rotates.append(WTFMove(rotate));
+                currentPairs.resize(0);
+            } else {
+                if (verbose)
+                    dataLog("It's a shift.\n");
+                shifts.add(originalSrc, WTFMove(currentPairs));
+            }
+        }
+    }
+
+    if (verbose) {
+        dataLog("Shifts:\n");
+        for (auto& entry : shifts)
+            dataLog("    ", entry.key, ": ", listDump(entry.value), "\n");
+        dataLog("Rotates:\n");
+        for (auto& rotate : rotates)
+            dataLog("    loop = ", listDump(rotate.loop), ", fringe = ", listDump(rotate.fringe), "\n");
+    }
+
+    // In the worst case, we need two scratch registers. The way we do this is that the client passes
+    // us what scratch registers he happens to have laying around. We will need scratch registers in
+    // the following cases:
+    //
+    // - Shuffle pairs where both src and dst refer to memory.
+    // - Rotate when no Swap instruction is available.
+    //
+    // Lucky for us, we are guaranteed to have extra scratch registers anytime we have a Shift that
+    // ends with a register. We search for such a register right now.
+
+    auto moveForWidth = [&] (Arg::Width width) -> Opcode {
+        switch (width) {
+        case Arg::Width32:
+            return type == Arg::GP ? Move32 : MoveFloat;
+        case Arg::Width64:
+            return type == Arg::GP ? Move : MoveDouble;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    };
+
+    Opcode conservativeMove = moveForWidth(Arg::conservativeWidth(type));
+
+    // We will emit things in reverse. We maintain a list of packs of instructions, and then we emit
+    // append them together in reverse (for example the thing at the end of resultPacks is placed
+    // first). This is useful because the last thing we emit frees up its destination registers, so
+    // it affects how we emit things before it.
+    Vector> resultPacks;
+    Vector result;
+
+    auto commitResult = [&] () {
+        resultPacks.append(WTFMove(result));
+    };
+
+    auto getScratch = [&] (unsigned index, Tmp possibleScratch) -> Tmp {
+        if (scratches[index].isTmp())
+            return scratches[index].tmp();
+
+        if (!possibleScratch)
+            return Tmp();
+        result.append(Inst(conservativeMove, origin, possibleScratch, scratches[index]));
+        return possibleScratch;
+    };
+
+    auto returnScratch = [&] (unsigned index, Tmp tmp) {
+        if (Arg(tmp) != scratches[index])
+            result.append(Inst(conservativeMove, origin, scratches[index], tmp));
+    };
+
+    auto handleShiftPair = [&] (const ShufflePair& pair, unsigned scratchIndex) {
+        Opcode move = moveForWidth(pair.width());
+        
+        if (!isValidForm(move, pair.src().kind(), pair.dst().kind())) {
+            Tmp scratch =
+                getScratch(scratchIndex, findPossibleScratch(code, type, pair.src(), pair.dst()));
+            RELEASE_ASSERT(scratch);
+            if (isValidForm(move, pair.src().kind(), Arg::Tmp))
+                result.append(Inst(moveForWidth(pair.width()), origin, pair.src(), scratch));
+            else {
+                ASSERT(pair.src().isSomeImm());
+                ASSERT(move == Move32);
+                result.append(Inst(Move, origin, Arg::bigImm(pair.src().value()), scratch));
+            }
+            result.append(Inst(moveForWidth(pair.width()), origin, scratch, pair.dst()));
+            returnScratch(scratchIndex, scratch);
+            return;
+        }
+        
+        result.append(Inst(move, origin, pair.src(), pair.dst()));
+    };
+
+    auto handleShift = [&] (Vector& shift) {
+        // FIXME: We could optimize the spill behavior of the shifter by checking if any of the
+        // shifts need spills. If they do, then we could try to get a register out here. Note that
+        // this may fail where the current strategy succeeds: out here we need a register that does
+        // not interfere with any of the shifts, while the current strategy only needs to find a
+        // scratch register that does not interfer with a particular shift. So, this optimization
+        // will be opportunistic: if it succeeds, then the individual shifts can use that scratch,
+        // otherwise they will do what they do now.
+        
+        for (unsigned i = shift.size(); i--;)
+            handleShiftPair(shift[i], 0);
+
+        Arg lastDst = shift.last().dst();
+        if (lastDst.isTmp()) {
+            for (Arg& scratch : scratches) {
+                ASSERT(scratch != lastDst);
+                if (!scratch.isTmp()) {
+                    scratch = lastDst;
+                    break;
+                }
+            }
+        }
+    };
+
+    // First handle shifts whose last destination is a tmp because these free up scratch registers.
+    // These end up last in the final sequence, so the final destination of these shifts will be
+    // available as a scratch location for anything emitted prior (so, after, since we're emitting in
+    // reverse).
+    for (auto& entry : shifts) {
+        Vector& shift = entry.value;
+        if (shift.last().dst().isTmp())
+            handleShift(shift);
+        commitResult();
+    }
+
+    // Now handle the rest of the shifts.
+    for (auto& entry : shifts) {
+        Vector& shift = entry.value;
+        if (!shift.last().dst().isTmp())
+            handleShift(shift);
+        commitResult();
+    }
+
+    for (Rotate& rotate : rotates) {
+        if (!rotate.fringe.isEmpty()) {
+            // Make sure we do the fringe first! This won't clobber any of the registers that are
+            // part of the rotation.
+            handleShift(rotate.fringe);
+        }
+        
+        bool canSwap = false;
+        Opcode swap = Oops;
+        Arg::Width swapWidth = Arg::Width8; // bogus value
+
+        // Currently, the swap instruction is not available for floating point on any architecture we
+        // support.
+        if (type == Arg::GP) {
+            // Figure out whether we will be doing 64-bit swaps or 32-bit swaps. If we have a mix of
+            // widths we handle that by fixing up the relevant register with zero-extends.
+            swap = Swap32;
+            swapWidth = Arg::Width32;
+            bool hasMemory = false;
+            bool hasIndex = false;
+            for (ShufflePair& pair : rotate.loop) {
+                switch (pair.width()) {
+                case Arg::Width32:
+                    break;
+                case Arg::Width64:
+                    swap = Swap64;
+                    swapWidth = Arg::Width64;
+                    break;
+                default:
+                    RELEASE_ASSERT_NOT_REACHED();
+                    break;
+                }
+
+                hasMemory |= pair.src().isMemory() || pair.dst().isMemory();
+                hasIndex |= pair.src().isIndex() || pair.dst().isIndex();
+            }
+            
+            canSwap = isValidForm(swap, Arg::Tmp, Arg::Tmp);
+
+            // We can totally use swaps even if there are shuffles involving memory. But, we play it
+            // safe in that case. There are corner cases we don't handle, and our ability to do it is
+            // contingent upon swap form availability.
+            
+            if (hasMemory) {
+                canSwap &= isValidForm(swap, Arg::Tmp, Arg::Addr);
+                
+                // We don't take the swapping path if there is a mix of widths and some of the
+                // shuffles involve memory. That gets too confusing. We might be able to relax this
+                // to only bail if there are subwidth pairs involving memory, but I haven't thought
+                // about it very hard. Anyway, this case is not common: rotates involving memory
+                // don't arise for function calls, and they will only happen for rotates in user code
+                // if some of the variables get spilled. It's hard to imagine a program that rotates
+                // data around in variables while also doing a combination of uint32->uint64 and
+                // int64->int32 casts.
+                for (ShufflePair& pair : rotate.loop)
+                    canSwap &= pair.width() == swapWidth;
+            }
+
+            if (hasIndex)
+                canSwap &= isValidForm(swap, Arg::Tmp, Arg::Index);
+        }
+
+        if (canSwap) {
+            for (unsigned i = rotate.loop.size() - 1; i--;) {
+                Arg left = rotate.loop[i].src();
+                Arg right = rotate.loop[i + 1].src();
+
+                if (left.isMemory() && right.isMemory()) {
+                    // Note that this is a super rare outcome. Rotates are rare. Spills are rare.
+                    // Moving data between two spills is rare. To get here a lot of rare stuff has to
+                    // all happen at once.
+                    
+                    Tmp scratch = getScratch(0, findPossibleScratch(code, type, left, right));
+                    RELEASE_ASSERT(scratch);
+                    result.append(Inst(moveForWidth(swapWidth), origin, left, scratch));
+                    result.append(Inst(swap, origin, scratch, right));
+                    result.append(Inst(moveForWidth(swapWidth), origin, scratch, left));
+                    returnScratch(0, scratch);
+                    continue;
+                }
+
+                if (left.isMemory())
+                    std::swap(left, right);
+                
+                result.append(Inst(swap, origin, left, right));
+            }
+
+            for (ShufflePair pair : rotate.loop) {
+                if (pair.width() == swapWidth)
+                    continue;
+
+                RELEASE_ASSERT(pair.width() == Arg::Width32);
+                RELEASE_ASSERT(swapWidth == Arg::Width64);
+                RELEASE_ASSERT(pair.dst().isTmp());
+
+                // Need to do an extra zero extension.
+                result.append(Inst(Move32, origin, pair.dst(), pair.dst()));
+            }
+        } else {
+            // We can treat this as a shift so long as we take the last destination (i.e. first
+            // source) and save it first. Then we handle the first entry in the pair in the rotate
+            // specially, after we restore the last destination. This requires some special care to
+            // find a scratch register. It's possible that we have a rotate that uses the entire
+            // available register file.
+
+            Tmp scratch = findPossibleScratch(
+                code, type,
+                [&] (Tmp tmp) -> bool {
+                    for (ShufflePair pair : rotate.loop) {
+                        if (pair.src().usesTmp(tmp))
+                            return false;
+                        if (pair.dst().usesTmp(tmp))
+                            return false;
+                    }
+                    return true;
+                });
+
+            // NOTE: This is the most likely use of scratch registers.
+            scratch = getScratch(0, scratch);
+
+            // We may not have found a scratch register. When this happens, we can just use the spill
+            // slot directly.
+            Arg rotateSave = scratch ? Arg(scratch) : scratches[0];
+            
+            handleShiftPair(
+                ShufflePair(rotate.loop.last().dst(), rotateSave, rotate.loop[0].width()), 1);
+
+            for (unsigned i = rotate.loop.size(); i-- > 1;)
+                handleShiftPair(rotate.loop[i], 1);
+
+            handleShiftPair(
+                ShufflePair(rotateSave, rotate.loop[0].dst(), rotate.loop[0].width()), 1);
+
+            if (scratch)
+                returnScratch(0, scratch);
+        }
+
+        commitResult();
+    }
+
+    ASSERT(result.isEmpty());
+
+    for (unsigned i = resultPacks.size(); i--;)
+        result.appendVector(resultPacks[i]);
+
+    return result;
+}
+
+Vector emitShuffle(
+    Code& code, const Vector& pairs,
+    const std::array& gpScratch, const std::array& fpScratch,
+    Value* origin)
+{
+    Vector gpPairs;
+    Vector fpPairs;
+    for (const ShufflePair& pair : pairs) {
+        if (pair.src().isMemory() && pair.dst().isMemory() && pair.width() > Arg::pointerWidth()) {
+            // 8-byte memory-to-memory moves on a 32-bit platform are best handled as float moves.
+            fpPairs.append(pair);
+        } else if (pair.src().isGP() && pair.dst().isGP()) {
+            // This means that gpPairs gets memory-to-memory shuffles. The assumption is that we
+            // can do that more efficiently using GPRs, except in the special case above.
+            gpPairs.append(pair);
+        } else
+            fpPairs.append(pair);
+    }
+
+    Vector result;
+    result.appendVector(emitShuffle(code, gpPairs, gpScratch, Arg::GP, origin));
+    result.appendVector(emitShuffle(code, fpPairs, fpScratch, Arg::FP, origin));
+    return result;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirEmitShuffle.h b/Source/JavaScriptCore/b3/air/AirEmitShuffle.h
new file mode 100644
index 000000000..b2c3bb0c2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirEmitShuffle.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirInst.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class Value;
+
+namespace Air {
+
+class Code;
+
+class ShufflePair {
+public:
+    ShufflePair()
+    {
+    }
+    
+    ShufflePair(const Arg& src, const Arg& dst, Arg::Width width)
+        : m_src(src)
+        , m_dst(dst)
+        , m_width(width)
+    {
+    }
+
+    const Arg& src() const { return m_src; }
+    const Arg& dst() const { return m_dst; }
+
+    // The width determines the kind of move we do. You can only choose Width32 or Width64 right now.
+    // For GP, it picks between Move32 and Move. For FP, it picks between MoveFloat and MoveDouble.
+    Arg::Width width() const { return m_width; }
+
+    void dump(PrintStream&) const;
+    
+private:
+    Arg m_src;
+    Arg m_dst;
+    Arg::Width m_width { Arg::Width8 };
+};
+
+// Create a Shuffle instruction.
+Inst createShuffle(Value* origin, const Vector&);
+
+// Perform a shuffle of a given type. The scratch argument is mandatory. You should pass it as
+// follows: If you know that you have scratch registers or temporaries available - that is, they're
+// registers that are not mentioned in the shuffle, have the same type as the shuffle, and are not
+// live at the shuffle - then you can pass them. If you don't have scratch registers available or if
+// you don't feel like looking for them, you can pass memory locations. It's always safe to pass a
+// pair of memory locations, and replacing either memory location with a register can be viewed as an
+// optimization. It's a pretty important optimization. Some more notes:
+//
+// - We define scratch registers as things that are not live before the shuffle and are not one of
+//   the destinations of the shuffle. Not being live before the shuffle also means that they cannot
+//   be used for any of the sources of the shuffle.
+//
+// - A second scratch location is only needed when you have shuffle pairs where memory is used both
+//   as source and destination.
+//
+// - You're guaranteed not to need any scratch locations if there is a Swap instruction available for
+//   the type and you don't have any memory locations that are both the source and the destination of
+//   some pairs. GP supports Swap on x86 while FP never supports Swap.
+//
+// - Passing memory locations as scratch if are running emitShuffle() before register allocation is
+//   silly, since that will cause emitShuffle() to pick some specific registers when it does need
+//   scratch. One easy way to avoid that predicament is to ensure that you call emitShuffle() after
+//   register allocation. For this reason we could add a Shuffle instruction so that we can defer
+//   shufflings until after regalloc.
+//
+// - Shuffles with memory=>memory pairs are not very well tuned. You should avoid them if you want
+//   performance. If you need to do them, then making sure that you reserve a temporary is one way to
+//   get acceptable performance.
+//
+// NOTE: Use this method (and its friend below) to emit shuffles after register allocation. Before
+// register allocation it is much better to simply use the Shuffle instruction.
+Vector emitShuffle(
+    Code& code, Vector, std::array scratch, Arg::Type, Value* origin);
+
+// Perform a shuffle that involves any number of types. Pass scratch registers or memory locations
+// for each type according to the rules above.
+Vector emitShuffle(
+    Code& code, const Vector&,
+    const std::array& gpScratch, const std::array& fpScratch,
+    Value* origin);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirFixObviousSpills.cpp b/Source/JavaScriptCore/b3/air/AirFixObviousSpills.cpp
new file mode 100644
index 000000000..d000d6c5d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirFixObviousSpills.cpp
@@ -0,0 +1,569 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirFixObviousSpills.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool verbose = false;
+
+class FixObviousSpills {
+public:
+    FixObviousSpills(Code& code)
+        : m_code(code)
+        , m_atHead(code.size())
+    {
+    }
+
+    void run()
+    {
+        if (verbose)
+            dataLog("Code before fixObviousSpills:\n", m_code);
+        
+        computeAliases();
+        fixCode();
+    }
+
+private:
+    void computeAliases()
+    {
+        m_atHead[m_code[0]].wasVisited = true;
+        
+        bool changed = true;
+        while (changed) {
+            changed = false;
+            
+            for (BasicBlock* block : m_code) {
+                m_block = block;
+                m_state = m_atHead[block];
+                if (!m_state.wasVisited)
+                    continue;
+
+                if (verbose)
+                    dataLog("Executing block ", *m_block, ": ", m_state, "\n");
+                
+                for (m_instIndex = 0; m_instIndex < block->size(); ++m_instIndex)
+                    executeInst();
+
+                for (BasicBlock* successor : block->successorBlocks()) {
+                    State& toState = m_atHead[successor];
+                    if (toState.wasVisited)
+                        changed |= toState.merge(m_state);
+                    else {
+                        toState = m_state;
+                        changed = true;
+                    }
+                }
+            }
+        }
+    }
+
+    void fixCode()
+    {
+        for (BasicBlock* block : m_code) {
+            m_block = block;
+            m_state = m_atHead[block];
+            RELEASE_ASSERT(m_state.wasVisited);
+
+            for (m_instIndex = 0; m_instIndex < block->size(); ++m_instIndex) {
+                fixInst();
+                executeInst();
+            }
+        }
+    }
+
+    void executeInst()
+    {
+        Inst& inst = m_block->at(m_instIndex);
+
+        if (verbose)
+            dataLog("    Executing ", inst, ": ", m_state, "\n");
+
+        Inst::forEachDefWithExtraClobberedRegs(
+            &inst, &inst,
+            [&] (const Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+                if (verbose)
+                    dataLog("        Clobbering ", arg, "\n");
+                m_state.clobber(arg);
+            });
+
+        switch (inst.kind.opcode) {
+        case Move:
+            if (inst.args[0].isSomeImm()) {
+                if (inst.args[1].isReg())
+                    m_state.addAlias(RegConst(inst.args[1].reg(), inst.args[0].value()));
+                else if (isSpillSlot(inst.args[1]))
+                    m_state.addAlias(SlotConst(inst.args[1].stackSlot(), inst.args[0].value()));
+            } else if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+                if (std::optional constant = m_state.constantFor(inst.args[0]))
+                    m_state.addAlias(RegConst(inst.args[1].reg(), *constant));
+                m_state.addAlias(
+                    RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::AllBits));
+            } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+                if (std::optional constant = m_state.constantFor(inst.args[0]))
+                    m_state.addAlias(SlotConst(inst.args[1].stackSlot(), *constant));
+                m_state.addAlias(
+                    RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::AllBits));
+            }
+            break;
+
+        case Move32:
+            if (inst.args[0].isSomeImm()) {
+                if (inst.args[1].isReg())
+                    m_state.addAlias(RegConst(inst.args[1].reg(), static_cast(inst.args[0].value())));
+                else if (isSpillSlot(inst.args[1]))
+                    m_state.addAlias(SlotConst(inst.args[1].stackSlot(), static_cast(inst.args[0].value())));
+            } else if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+                if (std::optional constant = m_state.constantFor(inst.args[0]))
+                    m_state.addAlias(RegConst(inst.args[1].reg(), static_cast(*constant)));
+                m_state.addAlias(
+                    RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::ZExt32));
+            } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+                if (std::optional constant = m_state.constantFor(inst.args[0]))
+                    m_state.addAlias(SlotConst(inst.args[1].stackSlot(), static_cast(*constant)));
+                m_state.addAlias(
+                    RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::Match32));
+            }
+            break;
+
+        case MoveFloat:
+            if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+                m_state.addAlias(
+                    RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::Match32));
+            } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+                m_state.addAlias(
+                    RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::Match32));
+            }
+            break;
+
+        case MoveDouble:
+            if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+                m_state.addAlias(
+                    RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::AllBits));
+            } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+                m_state.addAlias(
+                    RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::AllBits));
+            }
+            break;
+
+        default:
+            break;
+        }
+    }
+
+    void fixInst()
+    {
+        Inst& inst = m_block->at(m_instIndex);
+
+        if (verbose)
+            dataLog("Fixing inst ", inst, ": ", m_state, "\n");
+        
+        // First handle some special instructions.
+        switch (inst.kind.opcode) {
+        case Move: {
+            if (inst.args[0].isBigImm() && inst.args[1].isReg()
+                && isValidForm(Add64, Arg::Imm, Arg::Tmp, Arg::Tmp)) {
+                // BigImm materializations are super expensive on both x86 and ARM. Let's try to
+                // materialize this bad boy using math instead. Note that we use unsigned math here
+                // since it's more deterministic.
+                uint64_t myValue = inst.args[0].value();
+                Reg myDest = inst.args[1].reg();
+                for (const RegConst& regConst : m_state.regConst) {
+                    uint64_t otherValue = regConst.constant;
+                    
+                    // Let's try add. That's the only thing that works on all platforms, since it's
+                    // the only cheap arithmetic op that x86 does in three operands. Long term, we
+                    // should add fancier materializations here for ARM if the BigImm is yuge.
+                    uint64_t delta = myValue - otherValue;
+                    
+                    if (Arg::isValidImmForm(delta)) {
+                        inst.kind = Add64;
+                        inst.args.resize(3);
+                        inst.args[0] = Arg::imm(delta);
+                        inst.args[1] = Tmp(regConst.reg);
+                        inst.args[2] = Tmp(myDest);
+                        return;
+                    }
+                }
+                return;
+            }
+            break;
+        }
+            
+        default:
+            break;
+        }
+
+        // Create a copy in case we invalidate the instruction. That doesn't happen often.
+        Inst instCopy = inst;
+
+        // The goal is to replace references to stack slots. We only care about early uses. We can't
+        // handle UseDefs. We could teach this to handle UseDefs if we inserted a store instruction
+        // after and we proved that the register aliased to the stack slot dies here. We can get that
+        // information from the liveness analysis. We also can't handle late uses, because we don't
+        // look at late clobbers when doing this.
+        bool didThings = false;
+        auto handleArg = [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width width) {
+            if (!isSpillSlot(arg))
+                return;
+            if (!Arg::isEarlyUse(role))
+                return;
+            if (Arg::isAnyDef(role))
+                return;
+            
+            // Try to get a register if at all possible.
+            if (const RegSlot* alias = m_state.getRegSlot(arg.stackSlot())) {
+                switch (width) {
+                case Arg::Width64:
+                    if (alias->mode != RegSlot::AllBits)
+                        return;
+                    if (verbose)
+                        dataLog("    Replacing ", arg, " with ", alias->reg, "\n");
+                    arg = Tmp(alias->reg);
+                    didThings = true;
+                    return;
+                case Arg::Width32:
+                    if (verbose)
+                        dataLog("    Replacing ", arg, " with ", alias->reg, " (subwidth case)\n");
+                    arg = Tmp(alias->reg);
+                    didThings = true;
+                    return;
+                default:
+                    return;
+                }
+            }
+
+            // Revert to immediate if that didn't work.
+            if (const SlotConst* alias = m_state.getSlotConst(arg.stackSlot())) {
+                if (verbose)
+                    dataLog("    Replacing ", arg, " with constant ", alias->constant, "\n");
+                if (Arg::isValidImmForm(alias->constant))
+                    arg = Arg::imm(alias->constant);
+                else
+                    arg = Arg::bigImm(alias->constant);
+                didThings = true;
+                return;
+            }
+        };
+        
+        inst.forEachArg(handleArg);
+        if (!didThings || inst.isValidForm())
+            return;
+        
+        // We introduced something invalid along the way. Back up and carefully handle each argument.
+        inst = instCopy;
+        ASSERT(inst.isValidForm());
+        inst.forEachArg(
+            [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+                Arg argCopy = arg;
+                handleArg(arg, role, type, width);
+                if (!inst.isValidForm())
+                    arg = argCopy;
+            });
+    }
+    
+    static bool isSpillSlot(const Arg& arg)
+    {
+        return arg.isStack() && arg.stackSlot()->isSpill();
+    }
+    
+    struct RegConst {
+        RegConst()
+        {
+        }
+        
+        RegConst(Reg reg, int64_t constant)
+            : reg(reg)
+            , constant(constant)
+        {
+        }
+
+        explicit operator bool() const
+        {
+            return !!reg;
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print(reg, "->", constant);
+        }
+        
+        Reg reg;
+        int64_t constant { 0 };
+    };
+
+    struct RegSlot {
+        enum Mode : int8_t {
+            AllBits,
+            ZExt32, // Register contains zero-extended contents of stack slot.
+            Match32 // Low 32 bits of register match low 32 bits of stack slot.
+        };
+        
+        RegSlot()
+        {
+        }
+
+        RegSlot(Reg reg, StackSlot* slot, Mode mode)
+            : slot(slot)
+            , reg(reg)
+            , mode(mode)
+        {
+        }
+
+        explicit operator bool() const
+        {
+            return slot && reg;
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print(pointerDump(slot), "->", reg);
+            switch (mode) {
+            case AllBits:
+                out.print("(AllBits)");
+                break;
+            case ZExt32:
+                out.print("(ZExt32)");
+                break;
+            case Match32:
+                out.print("(Match32)");
+                break;
+            }
+        }
+        
+        StackSlot* slot { nullptr };
+        Reg reg;
+        Mode mode { AllBits };
+    };
+
+    struct SlotConst {
+        SlotConst()
+        {
+        }
+
+        SlotConst(StackSlot* slot, int64_t constant)
+            : slot(slot)
+            , constant(constant)
+        {
+        }
+
+        explicit operator bool() const
+        {
+            return slot;
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print(pointerDump(slot), "->", constant);
+        }
+        
+        StackSlot* slot { nullptr };
+        int64_t constant { 0 };
+    };
+
+    struct State {
+        void addAlias(const RegConst& newAlias)
+        {
+            regConst.append(newAlias);
+        }
+        void addAlias(const RegSlot& newAlias)
+        {
+            regSlot.append(newAlias);
+        }
+        void addAlias(const SlotConst& newAlias)
+        {
+            slotConst.append(newAlias);
+        }
+
+        const RegConst* getRegConst(Reg reg) const
+        {
+            for (const RegConst& alias : regConst) {
+                if (alias.reg == reg)
+                    return &alias;
+            }
+            return nullptr;
+        }
+
+        const RegSlot* getRegSlot(Reg reg) const
+        {
+            for (const RegSlot& alias : regSlot) {
+                if (alias.reg == reg)
+                    return &alias;
+            }
+            return nullptr;
+        }
+
+        const RegSlot* getRegSlot(StackSlot* slot) const
+        {
+            for (const RegSlot& alias : regSlot) {
+                if (alias.slot == slot)
+                    return &alias;
+            }
+            return nullptr;
+        }
+
+        const RegSlot* getRegSlot(Reg reg, StackSlot* slot) const
+        {
+            for (const RegSlot& alias : regSlot) {
+                if (alias.reg == reg && alias.slot == slot)
+                    return &alias;
+            }
+            return nullptr;
+        }
+
+        const SlotConst* getSlotConst(StackSlot* slot) const
+        {
+            for (const SlotConst& alias : slotConst) {
+                if (alias.slot == slot)
+                    return &alias;
+            }
+            return nullptr;
+        }
+
+        std::optional constantFor(const Arg& arg)
+        {
+            if (arg.isReg()) {
+                if (const RegConst* alias = getRegConst(arg.reg()))
+                    return alias->constant;
+                return std::nullopt;
+            }
+            if (arg.isStack()) {
+                if (const SlotConst* alias = getSlotConst(arg.stackSlot()))
+                    return alias->constant;
+                return std::nullopt;
+            }
+            return std::nullopt;
+        }
+
+        void clobber(const Arg& arg)
+        {
+            if (arg.isReg()) {
+                regConst.removeAllMatching(
+                    [&] (const RegConst& alias) -> bool {
+                        return alias.reg == arg.reg();
+                    });
+                regSlot.removeAllMatching(
+                    [&] (const RegSlot& alias) -> bool {
+                        return alias.reg == arg.reg();
+                    });
+                return;
+            }
+            if (arg.isStack()) {
+                slotConst.removeAllMatching(
+                    [&] (const SlotConst& alias) -> bool {
+                        return alias.slot == arg.stackSlot();
+                    });
+                regSlot.removeAllMatching(
+                    [&] (const RegSlot& alias) -> bool {
+                        return alias.slot == arg.stackSlot();
+                    });
+            }
+        }
+
+        bool merge(const State& other)
+        {
+            bool changed = false;
+            
+            changed |= !!regConst.removeAllMatching(
+                [&] (RegConst& alias) -> bool {
+                    const RegConst* otherAlias = other.getRegConst(alias.reg);
+                    if (!otherAlias)
+                        return true;
+                    if (alias.constant != otherAlias->constant)
+                        return true;
+                    return false;
+                });
+
+            changed |= !!slotConst.removeAllMatching(
+                [&] (SlotConst& alias) -> bool {
+                    const SlotConst* otherAlias = other.getSlotConst(alias.slot);
+                    if (!otherAlias)
+                        return true;
+                    if (alias.constant != otherAlias->constant)
+                        return true;
+                    return false;
+                });
+
+            changed |= !!regSlot.removeAllMatching(
+                [&] (RegSlot& alias) -> bool {
+                    const RegSlot* otherAlias = other.getRegSlot(alias.reg, alias.slot);
+                    if (!otherAlias)
+                        return true;
+                    if (alias.mode != RegSlot::Match32 && alias.mode != otherAlias->mode) {
+                        alias.mode = RegSlot::Match32;
+                        changed = true;
+                    }
+                    return false;
+                });
+
+            return changed;
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print(
+                "{regConst = [", listDump(regConst), "], slotConst = [", listDump(slotConst),
+                "], regSlot = [", listDump(regSlot), "], wasVisited = ", wasVisited, "}");
+        }
+
+        Vector regConst;
+        Vector slotConst;
+        Vector regSlot;
+        bool wasVisited { false };
+    };
+
+    Code& m_code;
+    IndexMap m_atHead;
+    State m_state;
+    BasicBlock* m_block { nullptr };
+    unsigned m_instIndex { 0 };
+};
+
+} // anonymous namespace
+
+void fixObviousSpills(Code& code)
+{
+    PhaseScope phaseScope(code, "fixObviousSpills");
+
+    FixObviousSpills fixObviousSpills(code);
+    fixObviousSpills.run();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirFixObviousSpills.h b/Source/JavaScriptCore/b3/air/AirFixObviousSpills.h
new file mode 100644
index 000000000..fb8e41fe2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirFixObviousSpills.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is a forward flow phase that tracks equivalence between spills slots and registers. It
+// removes loads from spill slots in cases when the contents of the spill slot can be found in (or
+// computed from) a register.
+void fixObviousSpills(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.cpp b/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.cpp
new file mode 100644
index 000000000..b3d5d0b71
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirFixPartialRegisterStalls.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInst.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include "MacroAssembler.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool hasPartialXmmRegUpdate(const Inst& inst)
+{
+    switch (inst.kind.opcode) {
+    case ConvertDoubleToFloat:
+    case ConvertFloatToDouble:
+    case ConvertInt32ToDouble:
+    case ConvertInt64ToDouble:
+    case ConvertInt32ToFloat:
+    case ConvertInt64ToFloat:
+    case SqrtDouble:
+    case SqrtFloat:
+    case CeilDouble:
+    case CeilFloat:
+    case FloorDouble:
+    case FloorFloat:
+        return true;
+    default:
+        break;
+    }
+    return false;
+}
+
+bool isDependencyBreaking(const Inst& inst)
+{
+    // "xorps reg, reg" is used by the frontend to remove the dependency on its argument.
+    return inst.kind.opcode == MoveZeroToDouble;
+}
+
+// FIXME: find a good distance per architecture experimentally.
+// LLVM uses a distance of 16 but that comes from Nehalem.
+unsigned char minimumSafeDistance = 16;
+
+struct FPDefDistance {
+    FPDefDistance()
+    {
+        for (unsigned i = 0; i < MacroAssembler::numberOfFPRegisters(); ++i)
+            distance[i] = 255;
+    }
+
+    void reset(FPRReg reg)
+    {
+        unsigned index = MacroAssembler::fpRegisterIndex(reg);
+        distance[index] = 255;
+    }
+
+    void add(FPRReg reg, unsigned registerDistance)
+    {
+        unsigned index = MacroAssembler::fpRegisterIndex(reg);
+        if (registerDistance < distance[index])
+            distance[index] = static_cast(registerDistance);
+    }
+
+    bool updateFromPrecessor(FPDefDistance& precessorDistance, unsigned constantOffset = 0)
+    {
+        bool changed = false;
+        for (unsigned i = 0; i < MacroAssembler::numberOfFPRegisters(); ++i) {
+            unsigned regDistance = precessorDistance.distance[i] + constantOffset;
+            if (regDistance < minimumSafeDistance && regDistance < distance[i]) {
+                distance[i] = regDistance;
+                changed = true;
+            }
+        }
+        return changed;
+    }
+
+    unsigned char distance[MacroAssembler::numberOfFPRegisters()];
+};
+
+void updateDistances(Inst& inst, FPDefDistance& localDistance, unsigned& distanceToBlockEnd)
+{
+    --distanceToBlockEnd;
+
+    if (isDependencyBreaking(inst)) {
+        localDistance.reset(inst.args[0].tmp().fpr());
+        return;
+    }
+
+    inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Arg::Type, Arg::Width) {
+        ASSERT_WITH_MESSAGE(tmp.isReg(), "This phase must be run after register allocation.");
+
+        if (tmp.isFPR() && Arg::isAnyDef(role))
+            localDistance.add(tmp.fpr(), distanceToBlockEnd);
+    });
+}
+
+}
+
+void fixPartialRegisterStalls(Code& code)
+{
+    if (!isX86())
+        return;
+
+    PhaseScope phaseScope(code, "fixPartialRegisterStalls");
+
+    Vector candidates;
+
+    for (BasicBlock* block : code) {
+        for (const Inst& inst : *block) {
+            if (hasPartialXmmRegUpdate(inst)) {
+                candidates.append(block);
+                break;
+            }
+        }
+    }
+
+    // Fortunately, Partial Stalls are rarely used. Return early if no block
+    // cares about them.
+    if (candidates.isEmpty())
+        return;
+
+    // For each block, this provides the distance to the last instruction setting each register
+    // on block *entry*.
+    IndexMap lastDefDistance(code.size());
+
+    // Blocks with dirty distance at head.
+    IndexSet dirty;
+
+    // First, we compute the local distance for each block and push it to the successors.
+    for (BasicBlock* block : code) {
+        FPDefDistance localDistance;
+
+        unsigned distanceToBlockEnd = block->size();
+        for (Inst& inst : *block)
+            updateDistances(inst, localDistance, distanceToBlockEnd);
+
+        for (BasicBlock* successor : block->successorBlocks()) {
+            if (lastDefDistance[successor].updateFromPrecessor(localDistance))
+                dirty.add(successor);
+        }
+    }
+
+    // Now we propagate the minimums accross blocks.
+    bool changed;
+    do {
+        changed = false;
+
+        for (BasicBlock* block : code) {
+            if (!dirty.remove(block))
+                continue;
+
+            // Little shortcut: if the block is big enough, propagating it won't add any information.
+            if (block->size() >= minimumSafeDistance)
+                continue;
+
+            unsigned blockSize = block->size();
+            FPDefDistance& blockDistance = lastDefDistance[block];
+            for (BasicBlock* successor : block->successorBlocks()) {
+                if (lastDefDistance[successor].updateFromPrecessor(blockDistance, blockSize)) {
+                    dirty.add(successor);
+                    changed = true;
+                }
+            }
+        }
+    } while (changed);
+
+    // Finally, update each block as needed.
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : candidates) {
+        unsigned distanceToBlockEnd = block->size();
+        FPDefDistance& localDistance = lastDefDistance[block];
+
+        for (unsigned i = 0; i < block->size(); ++i) {
+            Inst& inst = block->at(i);
+
+            if (hasPartialXmmRegUpdate(inst)) {
+                RegisterSet defs;
+                RegisterSet uses;
+                inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Arg::Type, Arg::Width) {
+                    if (tmp.isFPR()) {
+                        if (Arg::isAnyDef(role))
+                            defs.set(tmp.fpr());
+                        if (Arg::isAnyUse(role))
+                            uses.set(tmp.fpr());
+                    }
+                });
+                // We only care about values we define but not use. Otherwise we have to wait
+                // for the value to be resolved anyway.
+                defs.exclude(uses);
+
+                defs.forEach([&] (Reg reg) {
+                    if (localDistance.distance[MacroAssembler::fpRegisterIndex(reg.fpr())] < minimumSafeDistance)
+                        insertionSet.insert(i, MoveZeroToDouble, inst.origin, Tmp(reg));
+                });
+            }
+
+            updateDistances(inst, localDistance, distanceToBlockEnd);
+        }
+        insertionSet.execute(block);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.h b/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.h
new file mode 100644
index 000000000..009327948
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// x86 has a pipelining hazard caused by false dependencies between instructions.
+//
+// Some instructions update only part of a register, they can only be scheduled after
+// the previous definition is computed. This problem can be avoided by the compiler
+// by explicitely resetting the entire register before executing the instruction with
+// partial update.
+//
+// See "Partial XMM Register Stalls" and "Dependency Breaking Idioms" in the manual.
+void fixPartialRegisterStalls(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirFrequentedBlock.h b/Source/JavaScriptCore/b3/air/AirFrequentedBlock.h
new file mode 100644
index 000000000..37cd28736
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirFrequentedBlock.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3GenericFrequentedBlock.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BasicBlock;
+
+typedef GenericFrequentedBlock FrequentedBlock;
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirGenerate.cpp b/Source/JavaScriptCore/b3/air/AirGenerate.cpp
new file mode 100644
index 000000000..a99f0501c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirGenerate.cpp
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirGenerate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirAllocateStack.h"
+#include "AirCode.h"
+#include "AirDumpAsJS.h"
+#include "AirEliminateDeadCode.h"
+#include "AirFixObviousSpills.h"
+#include "AirFixPartialRegisterStalls.h"
+#include "AirGenerationContext.h"
+#include "AirHandleCalleeSaves.h"
+#include "AirIteratedRegisterCoalescing.h"
+#include "AirLogRegisterPressure.h"
+#include "AirLowerAfterRegAlloc.h"
+#include "AirLowerEntrySwitch.h"
+#include "AirLowerMacros.h"
+#include "AirOpcodeUtils.h"
+#include "AirOptimizeBlockOrder.h"
+#include "AirReportUsedRegisters.h"
+#include "AirSimplifyCFG.h"
+#include "AirSpillEverything.h"
+#include "AirValidate.h"
+#include "B3Common.h"
+#include "B3Procedure.h"
+#include "B3TimingScope.h"
+#include "B3ValueInlines.h"
+#include "CCallHelpers.h"
+#include "DisallowMacroScratchRegisterUsage.h"
+#include "LinkBuffer.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+void prepareForGeneration(Code& code)
+{
+    TimingScope timingScope("Air::prepareForGeneration");
+    
+    // We don't expect the incoming code to have predecessors computed.
+    code.resetReachability();
+    
+    if (shouldValidateIR())
+        validate(code);
+
+    // If we're doing super verbose dumping, the phase scope of any phase will already do a dump.
+    if (shouldDumpIR(AirMode) && !shouldDumpIRAtEachPhase(AirMode)) {
+        dataLog("Initial air:\n");
+        dataLog(code);
+    }
+
+    lowerMacros(code);
+
+    // This is where we run our optimizations and transformations.
+    // FIXME: Add Air optimizations.
+    // https://bugs.webkit.org/show_bug.cgi?id=150456
+    
+    eliminateDeadCode(code);
+
+    // Register allocation for all the Tmps that do not have a corresponding machine register.
+    // After this phase, every Tmp has a reg.
+    //
+    // For debugging, you can use spillEverything() to put everything to the stack between each Inst.
+    if (Options::airSpillsEverything())
+        spillEverything(code);
+    else
+        iteratedRegisterCoalescing(code);
+
+    if (Options::logAirRegisterPressure()) {
+        dataLog("Register pressure after register allocation:\n");
+        logRegisterPressure(code);
+    }
+
+    // This replaces uses of spill slots with registers or constants if possible. It does this by
+    // minimizing the amount that we perturb the already-chosen register allocation. It may extend
+    // the live ranges of registers though.
+    fixObviousSpills(code);
+
+    lowerAfterRegAlloc(code);
+
+    // Prior to this point the prologue and epilogue is implicit. This makes it explicit. It also
+    // does things like identify which callee-saves we're using and saves them.
+    handleCalleeSaves(code);
+    
+    if (Options::dumpAirAsJSBeforeAllocateStack()) {
+        dataLog("Dumping Air as JS before allocateStack:\n");
+        dumpAsJS(code, WTF::dataFile());
+        dataLog("Air hash: ", code.jsHash(), "\n");
+    }
+
+    // This turns all Stack and CallArg Args into Addr args that use the frame pointer. It does
+    // this by first-fit allocating stack slots. It should be pretty darn close to optimal, so we
+    // shouldn't have to worry about this very much.
+    allocateStack(code);
+    
+    if (Options::dumpAirAfterAllocateStack()) {
+        dataLog("Dumping Air after allocateStack:\n");
+        dataLog(code);
+        dataLog("Air hash: ", code.jsHash(), "\n");
+    }
+
+    // If we coalesced moves then we can unbreak critical edges. This is the main reason for this
+    // phase.
+    simplifyCFG(code);
+
+    // This is needed to satisfy a requirement of B3::StackmapValue.
+    reportUsedRegisters(code);
+
+    // Attempt to remove false dependencies between instructions created by partial register changes.
+    // This must be executed as late as possible as it depends on the instructions order and register
+    // use. We _must_ run this after reportUsedRegisters(), since that kills variable assignments
+    // that seem dead. Luckily, this phase does not change register liveness, so that's OK.
+    fixPartialRegisterStalls(code);
+    
+    // Actually create entrypoints.
+    lowerEntrySwitch(code);
+    
+    // The control flow graph can be simplified further after we have lowered EntrySwitch.
+    simplifyCFG(code);
+
+    // This sorts the basic blocks in Code to achieve an ordering that maximizes the likelihood that a high
+    // frequency successor is also the fall-through target.
+    optimizeBlockOrder(code);
+
+    if (shouldValidateIR())
+        validate(code);
+
+    // Do a final dump of Air. Note that we have to do this even if we are doing per-phase dumping,
+    // since the final generation is not a phase.
+    if (shouldDumpIR(AirMode)) {
+        dataLog("Air after ", code.lastPhaseName(), ", before generation:\n");
+        dataLog(code);
+    }
+}
+
+void generate(Code& code, CCallHelpers& jit)
+{
+    TimingScope timingScope("Air::generate");
+
+    DisallowMacroScratchRegisterUsage disallowScratch(jit);
+
+    auto argFor = [&] (const RegisterAtOffset& entry) -> CCallHelpers::Address {
+        return CCallHelpers::Address(GPRInfo::callFrameRegister, entry.offset());
+    };
+    
+    // And now, we generate code.
+    GenerationContext context;
+    context.code = &code;
+    context.blockLabels.resize(code.size());
+    for (BasicBlock* block : code) {
+        if (block)
+            context.blockLabels[block] = Box::create();
+    }
+    IndexMap blockJumps(code.size());
+
+    auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
+        if (context.blockLabels[target]->isSet()) {
+            jump.linkTo(*context.blockLabels[target], &jit);
+            return;
+        }
+
+        blockJumps[target].append(jump);
+    };
+
+    PCToOriginMap& pcToOriginMap = code.proc().pcToOriginMap();
+    auto addItem = [&] (Inst& inst) {
+        if (!inst.origin) {
+            pcToOriginMap.appendItem(jit.labelIgnoringWatchpoints(), Origin());
+            return;
+        }
+        pcToOriginMap.appendItem(jit.labelIgnoringWatchpoints(), inst.origin->origin());
+    };
+
+    for (BasicBlock* block : code) {
+        context.currentBlock = block;
+        context.indexInBlock = UINT_MAX;
+        blockJumps[block].link(&jit);
+        CCallHelpers::Label label = jit.label();
+        *context.blockLabels[block] = label;
+
+        if (code.isEntrypoint(block)) {
+            jit.emitFunctionPrologue();
+            if (code.frameSize())
+                jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);
+            
+            for (const RegisterAtOffset& entry : code.calleeSaveRegisters()) {
+                if (entry.reg().isGPR())
+                    jit.storePtr(entry.reg().gpr(), argFor(entry));
+                else
+                    jit.storeDouble(entry.reg().fpr(), argFor(entry));
+            }
+        }
+        
+        ASSERT(block->size() >= 1);
+        for (unsigned i = 0; i < block->size() - 1; ++i) {
+            context.indexInBlock = i;
+            Inst& inst = block->at(i);
+            addItem(inst);
+            CCallHelpers::Jump jump = inst.generate(jit, context);
+            ASSERT_UNUSED(jump, !jump.isSet());
+        }
+
+        context.indexInBlock = block->size() - 1;
+        
+        if (block->last().kind.opcode == Jump
+            && block->successorBlock(0) == code.findNextBlock(block))
+            continue;
+
+        addItem(block->last());
+
+        if (isReturn(block->last().kind.opcode)) {
+            // We currently don't represent the full prologue/epilogue in Air, so we need to
+            // have this override.
+            if (code.frameSize()) {
+                for (const RegisterAtOffset& entry : code.calleeSaveRegisters()) {
+                    if (entry.reg().isGPR())
+                        jit.loadPtr(argFor(entry), entry.reg().gpr());
+                    else
+                        jit.loadDouble(argFor(entry), entry.reg().fpr());
+                }
+                jit.emitFunctionEpilogue();
+            } else
+                jit.emitFunctionEpilogueWithEmptyFrame();
+            jit.ret();
+            addItem(block->last());
+            continue;
+        }
+
+        CCallHelpers::Jump jump = block->last().generate(jit, context);
+        // The jump won't be set for patchpoints. It won't be set for Oops because then it won't have
+        // any successors.
+        if (jump.isSet()) {
+            switch (block->numSuccessors()) {
+            case 1:
+                link(jump, block->successorBlock(0));
+                break;
+            case 2:
+                link(jump, block->successorBlock(0));
+                if (block->successorBlock(1) != code.findNextBlock(block))
+                    link(jit.jump(), block->successorBlock(1));
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+        }
+        addItem(block->last());
+    }
+    
+    context.currentBlock = nullptr;
+    context.indexInBlock = UINT_MAX;
+    
+    Vector entrypointLabels(code.numEntrypoints());
+    for (unsigned i = code.numEntrypoints(); i--;)
+        entrypointLabels[i] = *context.blockLabels[code.entrypoint(i).block()];
+    code.setEntrypointLabels(WTFMove(entrypointLabels));
+
+    pcToOriginMap.appendItem(jit.label(), Origin());
+    // FIXME: Make late paths have Origins: https://bugs.webkit.org/show_bug.cgi?id=153689
+    for (auto& latePath : context.latePaths)
+        latePath->run(jit, context);
+    pcToOriginMap.appendItem(jit.label(), Origin());
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirGenerate.h b/Source/JavaScriptCore/b3/air/AirGenerate.h
new file mode 100644
index 000000000..60839bea5
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirGenerate.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC {
+
+class CCallHelpers;
+
+namespace B3 { namespace Air {
+
+class Code;
+
+// This takes an Air::Code that hasn't had any stack allocation and optionally hasn't had any
+// register allocation and does both of those things.
+JS_EXPORT_PRIVATE void prepareForGeneration(Code&);
+
+// This generates the code using the given CCallHelpers instance. Note that this may call callbacks
+// in the supplied code as it is generating.
+JS_EXPORT_PRIVATE void generate(Code&, CCallHelpers&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirGenerated.cpp b/Source/JavaScriptCore/b3/air/AirGenerated.cpp
new file mode 100644
index 000000000..6dd2304a9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirGenerated.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#if ENABLE(B3_JIT)
+
+// This is generated by opcode_generator.rb.
+#include "AirOpcodeGenerated.h"
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirGenerationContext.h b/Source/JavaScriptCore/b3/air/AirGenerationContext.h
new file mode 100644
index 000000000..f48b5bb8a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirGenerationContext.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "CCallHelpers.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+struct GenerationContext {
+    WTF_MAKE_NONCOPYABLE(GenerationContext);
+public:
+
+    GenerationContext() = default;
+
+    typedef void LatePathFunction(CCallHelpers&, GenerationContext&);
+    typedef SharedTask LatePath;
+
+    Vector> latePaths;
+    IndexMap> blockLabels;
+    BasicBlock* currentBlock { nullptr };
+    unsigned indexInBlock { UINT_MAX };
+    Code* code { nullptr };
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp b/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp
new file mode 100644
index 000000000..97cdfa1c9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirHandleCalleeSaves.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void handleCalleeSaves(Code& code)
+{
+    PhaseScope phaseScope(code, "handleCalleeSaves");
+
+    RegisterSet usedCalleeSaves;
+
+    for (BasicBlock* block : code) {
+        for (Inst& inst : *block) {
+            inst.forEachTmpFast(
+                [&] (Tmp& tmp) {
+                    // At first we just record all used regs.
+                    usedCalleeSaves.set(tmp.reg());
+                });
+
+            if (inst.kind.opcode == Patch)
+                usedCalleeSaves.merge(inst.extraClobberedRegs());
+        }
+    }
+
+    // Now we filter to really get the callee saves.
+    usedCalleeSaves.filter(RegisterSet::calleeSaveRegisters());
+    usedCalleeSaves.filter(code.mutableRegs());
+    usedCalleeSaves.exclude(RegisterSet::stackRegisters()); // We don't need to save FP here.
+
+    if (!usedCalleeSaves.numberOfSetRegisters())
+        return;
+
+    code.calleeSaveRegisters() = RegisterAtOffsetList(usedCalleeSaves);
+
+    size_t byteSize = 0;
+    for (const RegisterAtOffset& entry : code.calleeSaveRegisters())
+        byteSize = std::max(static_cast(-entry.offset()), byteSize);
+
+    StackSlot* savesArea = code.addStackSlot(byteSize, StackSlotKind::Locked);
+    // This is a bit weird since we could have already pinned a different stack slot to this
+    // area. Also, our runtime does not require us to pin the saves area. Maybe we shouldn't pin it?
+    savesArea->setOffsetFromFP(-byteSize);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h b/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h
new file mode 100644
index 000000000..b4b78a3b7
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This phase identifies callee-save registers and adds code to save/restore them in the
+// prologue/epilogue to the code. It's a mandatory phase.
+
+// FIXME: It would be cool to make this more interactive with the Air client and also more
+// powerful.
+// We should have shrink wrapping: https://bugs.webkit.org/show_bug.cgi?id=150458
+// We should make this interact with the client: https://bugs.webkit.org/show_bug.cgi?id=150459
+
+void handleCalleeSaves(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirInsertionSet.cpp b/Source/JavaScriptCore/b3/air/AirInsertionSet.cpp
new file mode 100644
index 000000000..452d4888f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirInsertionSet.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirInsertionSet.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+void InsertionSet::insertInsts(size_t index, Vector&& insts)
+{
+    for (Inst& inst : insts)
+        insertInst(index, WTFMove(inst));
+}
+
+void InsertionSet::execute(BasicBlock* block)
+{
+    bubbleSort(m_insertions.begin(), m_insertions.end());
+    executeInsertions(block->m_insts, m_insertions);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirInsertionSet.h b/Source/JavaScriptCore/b3/air/AirInsertionSet.h
new file mode 100644
index 000000000..84a791d40
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirInsertionSet.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirInst.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BasicBlock;
+class Code;
+
+typedef WTF::Insertion Insertion;
+
+class InsertionSet {
+public:
+    InsertionSet(Code& code)
+        : m_code(code)
+    {
+    }
+
+    Code& code() { return m_code; }
+
+    template
+    void appendInsertion(T&& insertion)
+    {
+        m_insertions.append(std::forward(insertion));
+    }
+
+    template
+    void insertInst(size_t index, Inst&& inst)
+    {
+        appendInsertion(Insertion(index, std::forward(inst)));
+    }
+
+    template 
+    void insertInsts(size_t index, const InstVector& insts)
+    {
+        for (const Inst& inst : insts)
+            insertInst(index, inst);
+    }
+    void insertInsts(size_t index, Vector&&);
+    
+    template
+    void insert(size_t index, Arguments&&... arguments)
+    {
+        insertInst(index, Inst(std::forward(arguments)...));
+    }
+
+    void execute(BasicBlock*);
+
+private:
+    Code& m_code;
+    Vector m_insertions;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirInst.cpp b/Source/JavaScriptCore/b3/air/AirInst.cpp
new file mode 100644
index 000000000..defb344b0
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirInst.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirInst.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirInstInlines.h"
+#include "B3Value.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool Inst::hasArgEffects()
+{
+    bool result = false;
+    forEachArg(
+        [&] (Arg&, Arg::Role role, Arg::Type, Arg::Width) {
+            if (Arg::isAnyDef(role))
+                result = true;
+        });
+    return result;
+}
+
+unsigned Inst::jsHash() const
+{
+    // FIXME: This should do something for flags.
+    // https://bugs.webkit.org/show_bug.cgi?id=162751
+    unsigned result = static_cast(kind.opcode);
+    
+    for (const Arg& arg : args)
+        result += arg.jsHash();
+    
+    return result;
+}
+
+void Inst::dump(PrintStream& out) const
+{
+    out.print(kind, " ", listDump(args));
+    if (origin) {
+        if (args.size())
+            out.print(", ");
+        out.print(*origin);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirInst.h b/Source/JavaScriptCore/b3/air/AirInst.h
new file mode 100644
index 000000000..f38c21df8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirInst.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirKind.h"
+#include "CCallHelpers.h"
+
+namespace JSC {
+
+class CCallHelpers;
+class RegisterSet;
+
+namespace B3 {
+
+class Value;
+
+namespace Air {
+
+struct GenerationContext;
+
+struct Inst {
+public:
+    typedef Vector ArgList;
+
+    Inst()
+        : origin(nullptr)
+    {
+    }
+    
+    Inst(Kind kind, Value* origin)
+        : origin(origin)
+        , kind(kind)
+    {
+    }
+    
+    template
+    Inst(Kind kind, Value* origin, Arg arg, Arguments... arguments)
+        : args{ arg, arguments... }
+        , origin(origin)
+        , kind(kind)
+    {
+    }
+
+    Inst(Kind kind, Value* origin, const ArgList& arguments)
+        : args(arguments)
+        , origin(origin)
+        , kind(kind)
+    {
+    }
+
+    Inst(Kind kind, Value* origin, ArgList&& arguments)
+        : args(WTFMove(arguments))
+        , origin(origin)
+        , kind(kind)
+    {
+    }
+
+    explicit operator bool() const { return origin || kind || args.size(); }
+
+    void append() { }
+    
+    template
+    void append(Arg arg, Arguments... arguments)
+    {
+        args.append(arg);
+        append(arguments...);
+    }
+
+    // Note that these functors all avoid using "const" because we want to use them for things that
+    // edit IR. IR is meant to be edited; if you're carrying around a "const Inst&" then you're
+    // probably doing it wrong.
+
+    // This only walks those Tmps that are explicitly mentioned, and it doesn't tell you their role
+    // or type.
+    template
+    void forEachTmpFast(const Functor& functor)
+    {
+        for (Arg& arg : args)
+            arg.forEachTmpFast(functor);
+    }
+
+    typedef void EachArgCallback(Arg&, Arg::Role, Arg::Type, Arg::Width);
+    
+    // Calls the functor with (arg, role, type, width). This function is auto-generated by
+    // opcode_generator.rb.
+    template
+    void forEachArg(const Functor&);
+
+    // Calls the functor with (tmp, role, type, width).
+    template
+    void forEachTmp(const Functor& functor)
+    {
+        forEachArg(
+            [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+                arg.forEachTmp(role, type, width, functor);
+            });
+    }
+
+    // Thing can be either Arg, Tmp, or StackSlot*.
+    template
+    void forEach(const Functor&);
+
+    // Reports any additional registers clobbered by this operation. Note that for efficiency,
+    // extraClobberedRegs() only works for the Patch opcode.
+    RegisterSet extraClobberedRegs();
+    RegisterSet extraEarlyClobberedRegs();
+
+    // Iterate over all Def's that happen at the end of an instruction. You supply a pair
+    // instructions. The instructions must appear next to each other, in that order, in some basic
+    // block. You can pass null for the first instruction when analyzing what happens at the top of
+    // a basic block. You can pass null for the second instruction when analyzing what happens at the
+    // bottom of a basic block.
+    template
+    static void forEachDef(Inst* prevInst, Inst* nextInst, const Functor&);
+
+    // Iterate over all Def's that happen at the end of this instruction, including extra clobbered
+    // registers. Note that Thing can only be Arg or Tmp when you use this functor.
+    template
+    static void forEachDefWithExtraClobberedRegs(Inst* prevInst, Inst* nextInst, const Functor&);
+
+    // Use this to report which registers are live. This should be done just before codegen. Note
+    // that for efficiency, reportUsedRegisters() only works for the Patch opcode.
+    void reportUsedRegisters(const RegisterSet&);
+
+    // Is this instruction in one of the valid forms right now? This function is auto-generated by
+    // opcode_generator.rb.
+    bool isValidForm();
+
+    // Assuming this instruction is in a valid form right now, will it still be in one of the valid
+    // forms if we put an Addr referencing the stack (or a StackSlot or CallArg, of course) in the
+    // given index? Spilling uses this: it walks the args by index to find Tmps that need spilling;
+    // if it finds one, it calls this to see if it can replace the Arg::Tmp with an Arg::Addr. If it
+    // finds a non-Tmp Arg, then it calls that Arg's forEachTmp to do a replacement that way.
+    //
+    // This function is auto-generated by opcode_generator.rb.
+    bool admitsStack(unsigned argIndex);
+    bool admitsStack(Arg&);
+    
+    // Defined by opcode_generator.rb.
+    bool isTerminal();
+
+    // Returns true if this instruction can have any effects other than control flow or arguments.
+    bool hasNonArgNonControlEffects();
+
+    // Returns true if this instruction can have any effects other than what is implied by arguments.
+    // For example, "Move $42, (%rax)" will return false because the effect of storing to (%rax) is
+    // implied by the second argument.
+    bool hasNonArgEffects();
+
+    // Tells you if this operation has arg effects.
+    bool hasArgEffects();
+    
+    // Tells you if this operation has non-control effects.
+    bool hasNonControlEffects() { return hasNonArgNonControlEffects() || hasArgEffects(); }
+
+    // Generate some code for this instruction. This is, like, literally our backend. If this is the
+    // terminal, it returns the jump that needs to be linked for the "then" case, with the "else"
+    // case being fall-through. This function is auto-generated by opcode_generator.rb.
+    CCallHelpers::Jump generate(CCallHelpers&, GenerationContext&);
+
+    // If source arguments benefits from being aliased to a destination argument,
+    // this return the index of the destination argument.
+    // The source are assumed to be at (index - 1) and (index - 2)
+    // For example,
+    //     Add Tmp1, Tmp2, Tmp3
+    // returns 2 if 0 and 1 benefit from aliasing to Tmp3.
+    std::optional shouldTryAliasingDef();
+    
+    // This computes a hash for comparing this to JSAir's Inst.
+    unsigned jsHash() const;
+
+    void dump(PrintStream&) const;
+
+    ArgList args;
+    Value* origin; // The B3::Value that this originated from.
+    Kind kind;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirInstInlines.h b/Source/JavaScriptCore/b3/air/AirInstInlines.h
new file mode 100644
index 000000000..2d3da626f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirInstInlines.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirInst.h"
+#include "AirOpcodeUtils.h"
+#include "AirSpecial.h"
+#include "AirStackSlot.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+template
+void Inst::forEach(const Functor& functor)
+{
+    forEachArg(
+        [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+            arg.forEach(role, type, width, functor);
+        });
+}
+
+inline RegisterSet Inst::extraClobberedRegs()
+{
+    ASSERT(kind.opcode == Patch);
+    return args[0].special()->extraClobberedRegs(*this);
+}
+
+inline RegisterSet Inst::extraEarlyClobberedRegs()
+{
+    ASSERT(kind.opcode == Patch);
+    return args[0].special()->extraEarlyClobberedRegs(*this);
+}
+
+template
+inline void Inst::forEachDef(Inst* prevInst, Inst* nextInst, const Functor& functor)
+{
+    if (prevInst) {
+        prevInst->forEach(
+            [&] (Thing& thing, Arg::Role role, Arg::Type argType, Arg::Width argWidth) {
+                if (Arg::isLateDef(role))
+                    functor(thing, role, argType, argWidth);
+            });
+    }
+
+    if (nextInst) {
+        nextInst->forEach(
+            [&] (Thing& thing, Arg::Role role, Arg::Type argType, Arg::Width argWidth) {
+                if (Arg::isEarlyDef(role))
+                    functor(thing, role, argType, argWidth);
+            });
+    }
+}
+
+template
+inline void Inst::forEachDefWithExtraClobberedRegs(
+    Inst* prevInst, Inst* nextInst, const Functor& functor)
+{
+    forEachDef(prevInst, nextInst, functor);
+
+    Arg::Role regDefRole;
+    
+    auto reportReg = [&] (Reg reg) {
+        Arg::Type type = reg.isGPR() ? Arg::GP : Arg::FP;
+        functor(Thing(reg), regDefRole, type, Arg::conservativeWidth(type));
+    };
+
+    if (prevInst && prevInst->kind.opcode == Patch) {
+        regDefRole = Arg::Def;
+        prevInst->extraClobberedRegs().forEach(reportReg);
+    }
+
+    if (nextInst && nextInst->kind.opcode == Patch) {
+        regDefRole = Arg::EarlyDef;
+        nextInst->extraEarlyClobberedRegs().forEach(reportReg);
+    }
+}
+
+inline void Inst::reportUsedRegisters(const RegisterSet& usedRegisters)
+{
+    ASSERT(kind.opcode == Patch);
+    args[0].special()->reportUsedRegisters(*this, usedRegisters);
+}
+
+inline bool Inst::admitsStack(Arg& arg)
+{
+    return admitsStack(&arg - &args[0]);
+}
+
+inline std::optional Inst::shouldTryAliasingDef()
+{
+    if (!isX86())
+        return std::nullopt;
+
+    switch (kind.opcode) {
+    case Add32:
+    case Add64:
+    case And32:
+    case And64:
+    case Mul32:
+    case Mul64:
+    case Or32:
+    case Or64:
+    case Xor32:
+    case Xor64:
+    case AndFloat:
+    case AndDouble:
+    case OrFloat:
+    case OrDouble:
+    case XorDouble:
+    case XorFloat:
+        if (args.size() == 3)
+            return 2;
+        break;
+    case AddDouble:
+    case AddFloat:
+    case MulDouble:
+    case MulFloat:
+#if CPU(X86) || CPU(X86_64)
+        if (MacroAssembler::supportsAVX())
+            return std::nullopt;
+#endif
+        if (args.size() == 3)
+            return 2;
+        break;
+    case BranchAdd32:
+    case BranchAdd64:
+        if (args.size() == 4)
+            return 3;
+        break;
+    case MoveConditionally32:
+    case MoveConditionally64:
+    case MoveConditionallyTest32:
+    case MoveConditionallyTest64:
+    case MoveConditionallyDouble:
+    case MoveConditionallyFloat:
+    case MoveDoubleConditionally32:
+    case MoveDoubleConditionally64:
+    case MoveDoubleConditionallyTest32:
+    case MoveDoubleConditionallyTest64:
+    case MoveDoubleConditionallyDouble:
+    case MoveDoubleConditionallyFloat:
+        if (args.size() == 6)
+            return 5;
+        break;
+        break;
+    case Patch:
+        return PatchCustom::shouldTryAliasingDef(*this);
+    default:
+        break;
+    }
+    return std::nullopt;
+}
+
+inline bool isShiftValid(const Inst& inst)
+{
+#if CPU(X86) || CPU(X86_64)
+    return inst.args[0] == Tmp(X86Registers::ecx);
+#else
+    UNUSED_PARAM(inst);
+    return true;
+#endif
+}
+
+inline bool isLshift32Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isLshift64Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRshift32Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRshift64Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isUrshift32Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isUrshift64Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRotateRight32Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRotateLeft32Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRotateRight64Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRotateLeft64Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isX86DivHelperValid(const Inst& inst)
+{
+#if CPU(X86) || CPU(X86_64)
+    return inst.args[0] == Tmp(X86Registers::eax)
+        && inst.args[1] == Tmp(X86Registers::edx);
+#else
+    UNUSED_PARAM(inst);
+    return false;
+#endif
+}
+
+inline bool isX86ConvertToDoubleWord32Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+inline bool isX86ConvertToQuadWord64Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+inline bool isX86Div32Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+inline bool isX86UDiv32Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+inline bool isX86Div64Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+inline bool isX86UDiv64Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp b/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp
new file mode 100644
index 000000000..7e81b5e01
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp
@@ -0,0 +1,1656 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirIteratedRegisterCoalescing.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPadInterference.h"
+#include "AirPhaseScope.h"
+#include "AirTmpInlines.h"
+#include "AirTmpWidth.h"
+#include "AirUseCounts.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool debug = false;
+bool traceDebug = false;
+bool reportStats = false;
+
+// The AbstractColoringAllocator defines all the code that is independant
+// from the type or register and can be shared when allocating registers.
+template
+class AbstractColoringAllocator {
+public:
+    AbstractColoringAllocator(const Vector& regsInPriorityOrder, IndexType lastPrecoloredRegisterIndex, unsigned tmpArraySize, const HashSet& unspillableTmp)
+        : m_regsInPriorityOrder(regsInPriorityOrder)
+        , m_lastPrecoloredRegisterIndex(lastPrecoloredRegisterIndex)
+        , m_unspillableTmps(unspillableTmp)
+    {
+        for (Reg reg : m_regsInPriorityOrder)
+            m_mutableRegs.set(reg);
+        
+        initializeDegrees(tmpArraySize);
+        
+        m_adjacencyList.resize(tmpArraySize);
+        m_moveList.resize(tmpArraySize);
+        m_coalescedTmps.fill(0, tmpArraySize);
+        m_isOnSelectStack.ensureSize(tmpArraySize);
+    }
+
+protected:
+    IndexType getAlias(IndexType tmpIndex) const
+    {
+        IndexType alias = tmpIndex;
+        while (IndexType nextAlias = m_coalescedTmps[alias])
+            alias = nextAlias;
+        return alias;
+    }
+
+    void addEdge(IndexType a, IndexType b)
+    {
+        if (a == b)
+            return;
+        addEdgeDistinct(a, b);
+    }
+
+    void makeWorkList()
+    {
+        IndexType firstNonRegIndex = m_lastPrecoloredRegisterIndex + 1;
+        for (IndexType i = firstNonRegIndex; i < m_degrees.size(); ++i) {
+            unsigned degree = m_degrees[i];
+            if (degree >= m_regsInPriorityOrder.size())
+                addToSpill(i);
+            else if (!m_moveList[i].isEmpty())
+                m_freezeWorklist.add(i);
+            else
+                m_simplifyWorklist.append(i);
+        }
+    }
+
+    void addToSpill(unsigned toSpill)
+    {
+        if (m_unspillableTmps.contains(toSpill))
+            return;
+
+        m_spillWorklist.add(toSpill);
+    }
+
+    // Low-degree vertex can always be colored: just pick any of the color taken by any
+    // other adjacent verices.
+    // The "Simplify" phase takes a low-degree out of the interference graph to simplify it.
+    void simplify()
+    {
+        IndexType lastIndex = m_simplifyWorklist.takeLast();
+
+        ASSERT(!m_selectStack.contains(lastIndex));
+        ASSERT(!m_isOnSelectStack.get(lastIndex));
+        m_selectStack.append(lastIndex);
+        m_isOnSelectStack.quickSet(lastIndex);
+
+        forEachAdjacent(lastIndex, [this](IndexType adjacentTmpIndex) {
+            decrementDegree(adjacentTmpIndex);
+        });
+    }
+
+    void freeze()
+    {
+        IndexType victimIndex = m_freezeWorklist.takeAny();
+        ASSERT_WITH_MESSAGE(getAlias(victimIndex) == victimIndex, "coalesce() should not leave aliased Tmp in the worklist.");
+        m_simplifyWorklist.append(victimIndex);
+        freezeMoves(victimIndex);
+    }
+
+    void freezeMoves(IndexType tmpIndex)
+    {
+        forEachNodeMoves(tmpIndex, [this, tmpIndex] (IndexType moveIndex) {
+            if (!m_activeMoves.quickClear(moveIndex))
+                m_worklistMoves.takeMove(moveIndex);
+
+            const MoveOperands& moveOperands = m_coalescingCandidates[moveIndex];
+            IndexType srcTmpIndex = moveOperands.srcIndex;
+            IndexType dstTmpIndex = moveOperands.dstIndex;
+
+            IndexType originalOtherTmp = srcTmpIndex != tmpIndex ? srcTmpIndex : dstTmpIndex;
+            IndexType otherTmpIndex = getAlias(originalOtherTmp);
+            if (m_degrees[otherTmpIndex] < m_regsInPriorityOrder.size() && !isMoveRelated(otherTmpIndex)) {
+                if (m_freezeWorklist.remove(otherTmpIndex))
+                    m_simplifyWorklist.append(otherTmpIndex);
+            }
+        });
+    }
+
+    void coalesce()
+    {
+        unsigned moveIndex = m_worklistMoves.takeLastMove();
+        const MoveOperands& moveOperands = m_coalescingCandidates[moveIndex];
+        IndexType u = getAlias(moveOperands.srcIndex);
+        IndexType v = getAlias(moveOperands.dstIndex);
+
+        if (isPrecolored(v))
+            std::swap(u, v);
+
+        if (traceDebug)
+            dataLog("Coalescing move at index ", moveIndex, " u = ", u, " v = ", v, "\n");
+
+        if (u == v) {
+            addWorkList(u);
+
+            if (traceDebug)
+                dataLog("    Coalesced\n");
+        } else if (isPrecolored(v)
+            || m_interferenceEdges.contains(InterferenceEdge(u, v))
+            || (u == m_framePointerIndex && m_interferesWithFramePointer.quickGet(v))) {
+            addWorkList(u);
+            addWorkList(v);
+
+            if (traceDebug)
+                dataLog("    Constrained\n");
+        } else if (canBeSafelyCoalesced(u, v)) {
+            combine(u, v);
+            addWorkList(u);
+            m_hasCoalescedNonTrivialMove = true;
+
+            if (traceDebug)
+                dataLog("    Safe Coalescing\n");
+        } else {
+            m_activeMoves.quickSet(moveIndex);
+
+            if (traceDebug)
+                dataLog("    Failed coalescing, added to active moves.\n");
+        }
+    }
+
+    void assignColors()
+    {
+        ASSERT(m_simplifyWorklist.isEmpty());
+        ASSERT(m_worklistMoves.isEmpty());
+        ASSERT(m_freezeWorklist.isEmpty());
+        ASSERT(m_spillWorklist.isEmpty());
+
+        // Reclaim as much memory as possible.
+        m_interferenceEdges.clear();
+        m_degrees.clear();
+        m_moveList.clear();
+        m_worklistMoves.clear();
+        m_simplifyWorklist.clear();
+        m_spillWorklist.clear();
+        m_freezeWorklist.clear();
+
+        // Try to color the Tmp on the stack.
+        m_coloredTmp.resize(m_adjacencyList.size());
+
+        while (!m_selectStack.isEmpty()) {
+            unsigned tmpIndex = m_selectStack.takeLast();
+            ASSERT(!isPrecolored(tmpIndex));
+            ASSERT(!m_coloredTmp[tmpIndex]);
+
+            RegisterSet coloredRegisters;
+            for (IndexType adjacentTmpIndex : m_adjacencyList[tmpIndex]) {
+                IndexType aliasTmpIndex = getAlias(adjacentTmpIndex);
+                Reg reg = m_coloredTmp[aliasTmpIndex];
+
+                ASSERT(!isPrecolored(aliasTmpIndex) || (isPrecolored(aliasTmpIndex) && reg));
+
+                if (reg)
+                    coloredRegisters.set(reg);
+            }
+
+            bool colorAssigned = false;
+            for (Reg reg : m_regsInPriorityOrder) {
+                if (!coloredRegisters.get(reg)) {
+                    m_coloredTmp[tmpIndex] = reg;
+                    colorAssigned = true;
+                    break;
+                }
+            }
+
+            if (!colorAssigned)
+                m_spilledTmps.append(tmpIndex);
+        }
+        m_selectStack.clear();
+
+        if (m_spilledTmps.isEmpty())
+            m_coalescedTmpsAtSpill.clear();
+        else
+            m_coloredTmp.clear();
+    }
+
+private:
+    void initializeDegrees(unsigned tmpArraySize)
+    {
+        m_degrees.resize(tmpArraySize);
+
+        // All precolored registers have  an "infinite" degree.
+        unsigned firstNonRegIndex = m_lastPrecoloredRegisterIndex + 1;
+        for (unsigned i = 0; i < firstNonRegIndex; ++i)
+            m_degrees[i] = std::numeric_limits::max();
+
+        memset(m_degrees.data() + firstNonRegIndex, 0, (tmpArraySize - firstNonRegIndex) * sizeof(unsigned));
+    }
+
+    void addEdgeDistinct(IndexType a, IndexType b)
+    {
+        ASSERT(a != b);
+        if (m_interferenceEdges.add(InterferenceEdge(a, b)).isNewEntry) {
+            if (!isPrecolored(a)) {
+                ASSERT(!m_adjacencyList[a].contains(b));
+                m_adjacencyList[a].append(b);
+                m_degrees[a]++;
+            }
+
+            if (!isPrecolored(b)) {
+                ASSERT(!m_adjacencyList[b].contains(a));
+                m_adjacencyList[b].append(a);
+                m_degrees[b]++;
+            }
+        }
+    }
+
+    void decrementDegree(IndexType tmpIndex)
+    {
+        ASSERT(m_degrees[tmpIndex]);
+
+        unsigned oldDegree = m_degrees[tmpIndex]--;
+        if (oldDegree == m_regsInPriorityOrder.size()) {
+            enableMovesOnValueAndAdjacents(tmpIndex);
+            m_spillWorklist.remove(tmpIndex);
+            if (isMoveRelated(tmpIndex))
+                m_freezeWorklist.add(tmpIndex);
+            else
+                m_simplifyWorklist.append(tmpIndex);
+        }
+    }
+
+
+    bool addEdgeDistinctWithoutDegreeChange(IndexType a, IndexType b)
+    {
+        ASSERT(a != b);
+        if (m_interferenceEdges.add(InterferenceEdge(a, b)).isNewEntry) {
+            if (!isPrecolored(a)) {
+                ASSERT(!m_adjacencyList[a].contains(b));
+                m_adjacencyList[a].append(b);
+            }
+
+            if (!isPrecolored(b)) {
+                ASSERT(!m_adjacencyList[b].contains(a));
+                m_adjacencyList[b].append(a);
+            }
+            return true;
+        }
+        return false;
+    }
+
+    bool isMoveRelated(IndexType tmpIndex)
+    {
+        for (unsigned moveIndex : m_moveList[tmpIndex]) {
+            if (m_activeMoves.quickGet(moveIndex) || m_worklistMoves.contains(moveIndex))
+                return true;
+        }
+        return false;
+    }
+
+    template
+    void forEachAdjacent(IndexType tmpIndex, Function function)
+    {
+        for (IndexType adjacentTmpIndex : m_adjacencyList[tmpIndex]) {
+            if (!hasBeenSimplified(adjacentTmpIndex))
+                function(adjacentTmpIndex);
+        }
+    }
+
+    bool hasBeenSimplified(IndexType tmpIndex)
+    {
+        return m_isOnSelectStack.quickGet(tmpIndex) || !!m_coalescedTmps[tmpIndex];
+    }
+
+    template
+    void forEachNodeMoves(IndexType tmpIndex, Function function)
+    {
+        for (unsigned moveIndex : m_moveList[tmpIndex]) {
+            if (m_activeMoves.quickGet(moveIndex) || m_worklistMoves.contains(moveIndex))
+                function(moveIndex);
+        }
+    }
+
+    void enableMovesOnValue(IndexType tmpIndex)
+    {
+        for (unsigned moveIndex : m_moveList[tmpIndex]) {
+            if (m_activeMoves.quickClear(moveIndex))
+                m_worklistMoves.returnMove(moveIndex);
+        }
+    }
+
+    void enableMovesOnValueAndAdjacents(IndexType tmpIndex)
+    {
+        enableMovesOnValue(tmpIndex);
+
+        forEachAdjacent(tmpIndex, [this] (IndexType adjacentTmpIndex) {
+            enableMovesOnValue(adjacentTmpIndex);
+        });
+    }
+
+    bool isPrecolored(IndexType tmpIndex)
+    {
+        return tmpIndex <= m_lastPrecoloredRegisterIndex;
+    }
+
+    void addWorkList(IndexType tmpIndex)
+    {
+        if (!isPrecolored(tmpIndex) && m_degrees[tmpIndex] < m_regsInPriorityOrder.size() && !isMoveRelated(tmpIndex)) {
+            m_freezeWorklist.remove(tmpIndex);
+            m_simplifyWorklist.append(tmpIndex);
+        }
+    }
+
+    void combine(IndexType u, IndexType v)
+    {
+        if (!m_freezeWorklist.remove(v))
+            m_spillWorklist.remove(v);
+
+        ASSERT(!m_coalescedTmps[v]);
+        m_coalescedTmps[v] = u;
+
+        auto& vMoves = m_moveList[v];
+        m_moveList[u].add(vMoves.begin(), vMoves.end());
+
+        forEachAdjacent(v, [this, u] (IndexType adjacentTmpIndex) {
+            if (addEdgeDistinctWithoutDegreeChange(adjacentTmpIndex, u)) {
+                // If we added a new edge between the adjacentTmp and u, it replaces the edge
+                // that existed with v.
+                // The degree of adjacentTmp remains the same since the edge just changed from u to v.
+                // All we need to do is update the degree of u.
+                if (!isPrecolored(u))
+                    m_degrees[u]++;
+            } else {
+                // If we already had an edge between the adjacentTmp and u, the degree of u
+                // is already correct. The degree of the adjacentTmp decreases since the edge
+                // with v is no longer relevant (we can think of it as merged with the edge with u).
+                decrementDegree(adjacentTmpIndex);
+            }
+        });
+
+        if (m_framePointerIndex && m_interferesWithFramePointer.quickGet(v))
+            m_interferesWithFramePointer.quickSet(u);
+
+        if (m_degrees[u] >= m_regsInPriorityOrder.size() && m_freezeWorklist.remove(u))
+            addToSpill(u);
+    }
+
+    bool canBeSafelyCoalesced(IndexType u, IndexType v)
+    {
+        ASSERT(!isPrecolored(v));
+        if (isPrecolored(u))
+            return precoloredCoalescingHeuristic(u, v);
+        return conservativeHeuristic(u, v);
+    }
+
+    bool conservativeHeuristic(IndexType u, IndexType v)
+    {
+        // This is using the Briggs' conservative coalescing rule:
+        // If the number of combined adjacent node with a degree >= K is less than K,
+        // it is safe to combine the two nodes. The reason is that we know that if the graph
+        // is colorable, we have fewer than K adjacents with high order and there is a color
+        // for the current node.
+        ASSERT(u != v);
+        ASSERT(!isPrecolored(u));
+        ASSERT(!isPrecolored(v));
+
+        const auto& adjacentsOfU = m_adjacencyList[u];
+        const auto& adjacentsOfV = m_adjacencyList[v];
+
+        if (adjacentsOfU.size() + adjacentsOfV.size() < m_regsInPriorityOrder.size()) {
+            // Shortcut: if the total number of adjacents is less than the number of register, the condition is always met.
+            return true;
+        }
+
+        HashSet highOrderAdjacents;
+
+        for (IndexType adjacentTmpIndex : adjacentsOfU) {
+            ASSERT(adjacentTmpIndex != v);
+            ASSERT(adjacentTmpIndex != u);
+            if (!hasBeenSimplified(adjacentTmpIndex) && m_degrees[adjacentTmpIndex] >= m_regsInPriorityOrder.size()) {
+                auto addResult = highOrderAdjacents.add(adjacentTmpIndex);
+                if (addResult.isNewEntry && highOrderAdjacents.size() >= m_regsInPriorityOrder.size())
+                    return false;
+            }
+        }
+        for (IndexType adjacentTmpIndex : adjacentsOfV) {
+            ASSERT(adjacentTmpIndex != u);
+            ASSERT(adjacentTmpIndex != v);
+            if (!hasBeenSimplified(adjacentTmpIndex) && m_degrees[adjacentTmpIndex] >= m_regsInPriorityOrder.size()) {
+                auto addResult = highOrderAdjacents.add(adjacentTmpIndex);
+                if (addResult.isNewEntry && highOrderAdjacents.size() >= m_regsInPriorityOrder.size())
+                    return false;
+            }
+        }
+
+        ASSERT(highOrderAdjacents.size() < m_regsInPriorityOrder.size());
+        return true;
+    }
+
+    bool precoloredCoalescingHeuristic(IndexType u, IndexType v)
+    {
+        if (traceDebug)
+            dataLog("    Checking precoloredCoalescingHeuristic\n");
+        ASSERT(isPrecolored(u));
+        ASSERT(!isPrecolored(v));
+        
+        // If u is a pinned register then it's always safe to coalesce. Note that when we call this,
+        // we have already proved that there is no interference between u and v.
+        if (!m_mutableRegs.get(m_coloredTmp[u]))
+            return true;
+
+        // If any adjacent of the non-colored node is not an adjacent of the colored node AND has a degree >= K
+        // there is a risk that this node needs to have the same color as our precolored node. If we coalesce such
+        // move, we may create an uncolorable graph.
+        const auto& adjacentsOfV = m_adjacencyList[v];
+        for (unsigned adjacentTmpIndex : adjacentsOfV) {
+            if (!isPrecolored(adjacentTmpIndex)
+                && !hasBeenSimplified(adjacentTmpIndex)
+                && m_degrees[adjacentTmpIndex] >= m_regsInPriorityOrder.size()
+                && !m_interferenceEdges.contains(InterferenceEdge(u, adjacentTmpIndex)))
+                return false;
+        }
+        return true;
+    }
+
+protected:
+#if PLATFORM(COCOA)
+#pragma mark -
+#endif
+
+    // Interference edges are not directed. An edge between any two Tmps is represented
+    // by the concatenated values of the smallest Tmp followed by the bigger Tmp.
+    class InterferenceEdge {
+    public:
+        InterferenceEdge()
+        {
+        }
+
+        InterferenceEdge(IndexType a, IndexType b)
+        {
+            ASSERT(a);
+            ASSERT(b);
+            ASSERT_WITH_MESSAGE(a != b, "A Tmp can never interfere with itself. Doing so would force it to be the superposition of two registers.");
+
+            if (b < a)
+                std::swap(a, b);
+            m_value = static_cast(a) << 32 | b;
+        }
+
+        InterferenceEdge(WTF::HashTableDeletedValueType)
+            : m_value(std::numeric_limits::max())
+        {
+        }
+
+        IndexType first() const
+        {
+            return m_value >> 32 & 0xffffffff;
+        }
+
+        IndexType second() const
+        {
+            return m_value & 0xffffffff;
+        }
+
+        bool operator==(const InterferenceEdge other) const
+        {
+            return m_value == other.m_value;
+        }
+
+        bool isHashTableDeletedValue() const
+        {
+            return *this == InterferenceEdge(WTF::HashTableDeletedValue);
+        }
+
+        unsigned hash() const
+        {
+            return WTF::IntHash::hash(m_value);
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print(first(), "<=>", second());
+        }
+
+    private:
+        uint64_t m_value { 0 };
+    };
+
+    struct InterferenceEdgeHash {
+        static unsigned hash(const InterferenceEdge& key) { return key.hash(); }
+        static bool equal(const InterferenceEdge& a, const InterferenceEdge& b) { return a == b; }
+        static const bool safeToCompareToEmptyOrDeleted = true;
+    };
+    typedef SimpleClassHashTraits InterferenceEdgeHashTraits;
+
+    const Vector& m_regsInPriorityOrder;
+    RegisterSet m_mutableRegs;
+    IndexType m_lastPrecoloredRegisterIndex { 0 };
+
+    // The interference graph.
+    HashSet m_interferenceEdges;
+    Vector, 0, UnsafeVectorOverflow> m_adjacencyList;
+    Vector m_degrees;
+
+    // Instead of keeping track of the move instructions, we just keep their operands around and use the index
+    // in the vector as the "identifier" for the move.
+    struct MoveOperands {
+        IndexType srcIndex;
+        IndexType dstIndex;
+    };
+    Vector m_coalescingCandidates;
+
+    // List of every move instruction associated with a Tmp.
+    Vector::Hash, WTF::UnsignedWithZeroKeyHashTraits>> m_moveList;
+
+    // Colors.
+    Vector m_coloredTmp;
+    Vector m_spilledTmps;
+
+    // Values that have been coalesced with an other value.
+    Vector m_coalescedTmps;
+
+    // The stack of Tmp removed from the graph and ready for coloring.
+    BitVector m_isOnSelectStack;
+    Vector m_selectStack;
+
+    IndexType m_framePointerIndex { 0 };
+    BitVector m_interferesWithFramePointer;
+
+    struct OrderedMoveSet {
+        unsigned addMove()
+        {
+            ASSERT(m_lowPriorityMoveList.isEmpty());
+            ASSERT(!m_firstLowPriorityMoveIndex);
+
+            unsigned nextIndex = m_positionInMoveList.size();
+            unsigned position = m_moveList.size();
+            m_moveList.append(nextIndex);
+            m_positionInMoveList.append(position);
+            return nextIndex;
+        }
+
+        void startAddingLowPriorityMoves()
+        {
+            ASSERT(m_lowPriorityMoveList.isEmpty());
+            m_firstLowPriorityMoveIndex = m_moveList.size();
+        }
+
+        unsigned addLowPriorityMove()
+        {
+            ASSERT(m_firstLowPriorityMoveIndex == m_moveList.size());
+
+            unsigned nextIndex = m_positionInMoveList.size();
+            unsigned position = m_lowPriorityMoveList.size();
+            m_lowPriorityMoveList.append(nextIndex);
+            m_positionInMoveList.append(position);
+
+            ASSERT(nextIndex >= m_firstLowPriorityMoveIndex);
+
+            return nextIndex;
+        }
+
+        bool isEmpty() const
+        {
+            return m_moveList.isEmpty() && m_lowPriorityMoveList.isEmpty();
+        }
+
+        bool contains(unsigned index)
+        {
+            return m_positionInMoveList[index] != std::numeric_limits::max();
+        }
+
+        void takeMove(unsigned moveIndex)
+        {
+            unsigned positionInMoveList = m_positionInMoveList[moveIndex];
+            if (positionInMoveList == std::numeric_limits::max())
+                return;
+
+            if (moveIndex < m_firstLowPriorityMoveIndex) {
+                ASSERT(m_moveList[positionInMoveList] == moveIndex);
+                unsigned lastIndex = m_moveList.last();
+                m_positionInMoveList[lastIndex] = positionInMoveList;
+                m_moveList[positionInMoveList] = lastIndex;
+                m_moveList.removeLast();
+            } else {
+                ASSERT(m_lowPriorityMoveList[positionInMoveList] == moveIndex);
+                unsigned lastIndex = m_lowPriorityMoveList.last();
+                m_positionInMoveList[lastIndex] = positionInMoveList;
+                m_lowPriorityMoveList[positionInMoveList] = lastIndex;
+                m_lowPriorityMoveList.removeLast();
+            }
+
+            m_positionInMoveList[moveIndex] = std::numeric_limits::max();
+
+            ASSERT(!contains(moveIndex));
+        }
+
+        unsigned takeLastMove()
+        {
+            ASSERT(!isEmpty());
+
+            unsigned lastIndex;
+            if (!m_moveList.isEmpty()) {
+                lastIndex = m_moveList.takeLast();
+                ASSERT(m_positionInMoveList[lastIndex] == m_moveList.size());
+            } else {
+                lastIndex = m_lowPriorityMoveList.takeLast();
+                ASSERT(m_positionInMoveList[lastIndex] == m_lowPriorityMoveList.size());
+            }
+            m_positionInMoveList[lastIndex] = std::numeric_limits::max();
+
+            ASSERT(!contains(lastIndex));
+            return lastIndex;
+        }
+
+        void returnMove(unsigned index)
+        {
+            // This assertion is a bit strict but that is how the move list should be used. The only kind of moves that can
+            // return to the list are the ones that we previously failed to coalesce with the conservative heuristics.
+            // Values should not be added back if they were never taken out when attempting coalescing.
+            ASSERT(!contains(index));
+
+            if (index < m_firstLowPriorityMoveIndex) {
+                unsigned position = m_moveList.size();
+                m_moveList.append(index);
+                m_positionInMoveList[index] = position;
+            } else {
+                unsigned position = m_lowPriorityMoveList.size();
+                m_lowPriorityMoveList.append(index);
+                m_positionInMoveList[index] = position;
+            }
+
+            ASSERT(contains(index));
+        }
+
+        void clear()
+        {
+            m_positionInMoveList.clear();
+            m_moveList.clear();
+            m_lowPriorityMoveList.clear();
+        }
+
+    private:
+        Vector m_positionInMoveList;
+        Vector m_moveList;
+        Vector m_lowPriorityMoveList;
+        unsigned m_firstLowPriorityMoveIndex { 0 };
+    };
+
+    // Work lists.
+    // Set of "move" enabled for possible coalescing.
+    OrderedMoveSet m_worklistMoves;
+    // Set of "move" not yet ready for coalescing.
+    BitVector m_activeMoves;
+    // Low-degree, non-Move related.
+    Vector m_simplifyWorklist;
+    // High-degree Tmp.
+    HashSet m_spillWorklist;
+    // Low-degree, Move related.
+    HashSet m_freezeWorklist;
+
+    bool m_hasSelectedSpill { false };
+    bool m_hasCoalescedNonTrivialMove { false };
+
+    // The mapping of Tmp to their alias for Moves that are always coalescing regardless of spilling.
+    Vector m_coalescedTmpsAtSpill;
+    
+    const HashSet& m_unspillableTmps;
+};
+
+// This perform all the tasks that are specific to certain register type.
+template
+class ColoringAllocator : public AbstractColoringAllocator {
+public:
+    ColoringAllocator(Code& code, TmpWidth& tmpWidth, const UseCounts& useCounts, const HashSet& unspillableTmp)
+        : AbstractColoringAllocator(code.regsInPriorityOrder(type), AbsoluteTmpMapper::lastMachineRegisterIndex(), tmpArraySize(code), unspillableTmp)
+        , m_code(code)
+        , m_tmpWidth(tmpWidth)
+        , m_useCounts(useCounts)
+    {
+        if (type == Arg::GP) {
+            m_framePointerIndex = AbsoluteTmpMapper::absoluteIndex(Tmp(MacroAssembler::framePointerRegister));
+            m_interferesWithFramePointer.ensureSize(tmpArraySize(code));
+        }
+
+        initializePrecoloredTmp();
+        build();
+        allocate();
+    }
+
+    Tmp getAlias(Tmp tmp) const
+    {
+        return AbsoluteTmpMapper::tmpFromAbsoluteIndex(getAlias(AbsoluteTmpMapper::absoluteIndex(tmp)));
+    }
+
+    // This tells you if a Move will be coalescable if the src and dst end up matching. This method
+    // relies on an analysis that is invalidated by register allocation, so you it's only meaningful to
+    // call this *before* replacing the Tmp's in this Inst with registers or spill slots.
+    bool mayBeCoalescable(const Inst& inst) const
+    {
+        return mayBeCoalescableImpl(inst, &m_tmpWidth);
+    }
+
+    bool isUselessMove(const Inst& inst) const
+    {
+        return mayBeCoalescableImpl(inst, nullptr) && inst.args[0].tmp() == inst.args[1].tmp();
+    }
+
+    Tmp getAliasWhenSpilling(Tmp tmp) const
+    {
+        ASSERT_WITH_MESSAGE(!m_spilledTmps.isEmpty(), "This function is only valid for coalescing during spilling.");
+
+        if (m_coalescedTmpsAtSpill.isEmpty())
+            return tmp;
+
+        unsigned aliasIndex = AbsoluteTmpMapper::absoluteIndex(tmp);
+        while (unsigned nextAliasIndex = m_coalescedTmpsAtSpill[aliasIndex])
+            aliasIndex = nextAliasIndex;
+
+        Tmp alias = AbsoluteTmpMapper::tmpFromAbsoluteIndex(aliasIndex);
+
+        ASSERT_WITH_MESSAGE(!m_spilledTmps.contains(aliasIndex) || alias == tmp, "The aliases at spill should always be colorable. Something went horribly wrong.");
+
+        return alias;
+    }
+
+    template
+    class IndexToTmpIteratorAdaptor {
+    public:
+        IndexToTmpIteratorAdaptor(IndexIterator&& indexIterator)
+            : m_indexIterator(WTFMove(indexIterator))
+        {
+        }
+
+        Tmp operator*() const { return AbsoluteTmpMapper::tmpFromAbsoluteIndex(*m_indexIterator); }
+        IndexToTmpIteratorAdaptor& operator++() { ++m_indexIterator; return *this; }
+
+        bool operator==(const IndexToTmpIteratorAdaptor& other) const
+        {
+            return m_indexIterator == other.m_indexIterator;
+        }
+
+        bool operator!=(const IndexToTmpIteratorAdaptor& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        IndexIterator m_indexIterator;
+    };
+
+    template
+    class IndexToTmpIterableAdaptor {
+    public:
+        IndexToTmpIterableAdaptor(const Collection& collection)
+            : m_collection(collection)
+        {
+        }
+
+        IndexToTmpIteratorAdaptor begin() const
+        {
+            return m_collection.begin();
+        }
+
+        IndexToTmpIteratorAdaptor end() const
+        {
+            return m_collection.end();
+        }
+
+    private:
+        const Collection& m_collection;
+    };
+
+    IndexToTmpIterableAdaptor> spilledTmps() const { return m_spilledTmps; }
+
+    bool requiresSpilling() const { return !m_spilledTmps.isEmpty(); }
+
+    Reg allocatedReg(Tmp tmp) const
+    {
+        ASSERT(!tmp.isReg());
+        ASSERT(m_coloredTmp.size());
+        ASSERT(tmp.isGP() == (type == Arg::GP));
+
+        Reg reg = m_coloredTmp[AbsoluteTmpMapper::absoluteIndex(tmp)];
+        if (!reg) {
+            dataLog("FATAL: No color for ", tmp, "\n");
+            dataLog("Code:\n");
+            dataLog(m_code);
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+        return reg;
+    }
+
+private:
+    static unsigned tmpArraySize(Code& code)
+    {
+        unsigned numTmps = code.numTmps(type);
+        return AbsoluteTmpMapper::absoluteIndex(numTmps);
+    }
+
+    void initializePrecoloredTmp()
+    {
+        m_coloredTmp.resize(m_lastPrecoloredRegisterIndex + 1);
+        for (unsigned i = 1; i <= m_lastPrecoloredRegisterIndex; ++i) {
+            Tmp tmp = AbsoluteTmpMapper::tmpFromAbsoluteIndex(i);
+            ASSERT(tmp.isReg());
+            m_coloredTmp[i] = tmp.reg();
+        }
+    }
+
+    bool mayBeCoalesced(Arg left, Arg right)
+    {
+        if (!left.isTmp() || !right.isTmp())
+            return false;
+
+        Tmp leftTmp = left.tmp();
+        Tmp rightTmp = right.tmp();
+
+        if (leftTmp == rightTmp)
+            return false;
+
+        if (leftTmp.isGP() != (type == Arg::GP) || rightTmp.isGP() != (type == Arg::GP))
+            return false;
+
+        unsigned leftIndex = AbsoluteTmpMapper::absoluteIndex(leftTmp);
+        unsigned rightIndex = AbsoluteTmpMapper::absoluteIndex(rightTmp);
+
+        return !m_interferenceEdges.contains(InterferenceEdge(leftIndex, rightIndex));
+    }
+
+    void addToLowPriorityCoalescingCandidates(Arg left, Arg right)
+    {
+        ASSERT(mayBeCoalesced(left, right));
+        Tmp leftTmp = left.tmp();
+        Tmp rightTmp = right.tmp();
+
+        unsigned leftIndex = AbsoluteTmpMapper::absoluteIndex(leftTmp);
+        unsigned rightIndex = AbsoluteTmpMapper::absoluteIndex(rightTmp);
+
+        unsigned nextMoveIndex = m_coalescingCandidates.size();
+        m_coalescingCandidates.append({ leftIndex, rightIndex });
+
+        unsigned newIndexInWorklist = m_worklistMoves.addLowPriorityMove();
+        ASSERT_UNUSED(newIndexInWorklist, newIndexInWorklist == nextMoveIndex);
+
+        ASSERT(nextMoveIndex <= m_activeMoves.size());
+        m_activeMoves.ensureSize(nextMoveIndex + 1);
+
+        m_moveList[leftIndex].add(nextMoveIndex);
+        m_moveList[rightIndex].add(nextMoveIndex);
+    }
+
+    void build()
+    {
+        TmpLiveness liveness(m_code);
+        for (BasicBlock* block : m_code) {
+            typename TmpLiveness::LocalCalc localCalc(liveness, block);
+            for (unsigned instIndex = block->size(); instIndex--;) {
+                Inst& inst = block->at(instIndex);
+                Inst* nextInst = block->get(instIndex + 1);
+                build(&inst, nextInst, localCalc);
+                localCalc.execute(instIndex);
+            }
+            build(nullptr, &block->at(0), localCalc);
+        }
+        buildLowPriorityMoveList();
+    }
+
+    void build(Inst* prevInst, Inst* nextInst, const typename TmpLiveness::LocalCalc& localCalc)
+    {
+        if (traceDebug)
+            dataLog("Building between ", pointerDump(prevInst), " and ", pointerDump(nextInst), ":\n");
+        Inst::forEachDefWithExtraClobberedRegs(
+            prevInst, nextInst,
+            [&] (const Tmp& arg, Arg::Role, Arg::Type argType, Arg::Width) {
+                if (argType != type)
+                    return;
+                
+                // All the Def()s interfere with each other and with all the extra clobbered Tmps.
+                // We should not use forEachDefWithExtraClobberedRegs() here since colored Tmps
+                // do not need interference edges in our implementation.
+                Inst::forEachDef(
+                    prevInst, nextInst,
+                    [&] (Tmp& otherArg, Arg::Role, Arg::Type argType, Arg::Width) {
+                        if (argType != type)
+                            return;
+                        
+                        if (traceDebug)
+                            dataLog("    Adding def-def edge: ", arg, ", ", otherArg, "\n");
+                        this->addEdge(arg, otherArg);
+                    });
+            });
+
+        if (prevInst && mayBeCoalescable(*prevInst)) {
+            // We do not want the Use() of this move to interfere with the Def(), even if it is live
+            // after the Move. If we were to add the interference edge, it would be impossible to
+            // coalesce the Move even if the two Tmp never interfere anywhere.
+            Tmp defTmp;
+            Tmp useTmp;
+            prevInst->forEachTmp([&defTmp, &useTmp] (Tmp& argTmp, Arg::Role role, Arg::Type, Arg::Width) {
+                if (Arg::isLateDef(role))
+                    defTmp = argTmp;
+                else {
+                    ASSERT(Arg::isEarlyUse(role));
+                    useTmp = argTmp;
+                }
+            });
+            ASSERT(defTmp);
+            ASSERT(useTmp);
+
+            unsigned nextMoveIndex = m_coalescingCandidates.size();
+            m_coalescingCandidates.append({ AbsoluteTmpMapper::absoluteIndex(useTmp), AbsoluteTmpMapper::absoluteIndex(defTmp) });
+
+            unsigned newIndexInWorklist = m_worklistMoves.addMove();
+            ASSERT_UNUSED(newIndexInWorklist, newIndexInWorklist == nextMoveIndex);
+
+            ASSERT(nextMoveIndex <= m_activeMoves.size());
+            m_activeMoves.ensureSize(nextMoveIndex + 1);
+
+            for (const Arg& arg : prevInst->args) {
+                auto& list = m_moveList[AbsoluteTmpMapper::absoluteIndex(arg.tmp())];
+                list.add(nextMoveIndex);
+            }
+
+            for (const Tmp& liveTmp : localCalc.live()) {
+                if (liveTmp != useTmp) {
+                    if (traceDebug)
+                        dataLog("    Adding def-live for coalescable: ", defTmp, ", ", liveTmp, "\n");
+                    addEdge(defTmp, liveTmp);
+                }
+            }
+
+            // The next instruction could have early clobbers or early def's. We need to consider
+            // those now.
+            addEdges(nullptr, nextInst, localCalc.live());
+        } else
+            addEdges(prevInst, nextInst, localCalc.live());
+    }
+
+    void buildLowPriorityMoveList()
+    {
+        if (!isX86())
+            return;
+
+        m_worklistMoves.startAddingLowPriorityMoves();
+        for (BasicBlock* block : m_code) {
+            for (Inst& inst : *block) {
+                if (std::optional defArgIndex = inst.shouldTryAliasingDef()) {
+                    Arg op1 = inst.args[*defArgIndex - 2];
+                    Arg op2 = inst.args[*defArgIndex - 1];
+                    Arg dest = inst.args[*defArgIndex];
+
+                    if (op1 == dest || op2 == dest)
+                        continue;
+
+                    if (mayBeCoalesced(op1, dest))
+                        addToLowPriorityCoalescingCandidates(op1, dest);
+                    if (op1 != op2 && mayBeCoalesced(op2, dest))
+                        addToLowPriorityCoalescingCandidates(op2, dest);
+                }
+            }
+        }
+    }
+
+    void addEdges(Inst* prevInst, Inst* nextInst, typename TmpLiveness::LocalCalc::Iterable liveTmps)
+    {
+        // All the Def()s interfere with everthing live.
+        Inst::forEachDefWithExtraClobberedRegs(
+            prevInst, nextInst,
+            [&] (const Tmp& arg, Arg::Role, Arg::Type argType, Arg::Width) {
+                if (argType != type)
+                    return;
+                
+                for (const Tmp& liveTmp : liveTmps) {
+                    ASSERT(liveTmp.isGP() == (type == Arg::GP));
+                    
+                    if (traceDebug)
+                        dataLog("    Adding def-live edge: ", arg, ", ", liveTmp, "\n");
+                    
+                    addEdge(arg, liveTmp);
+                }
+
+                if (type == Arg::GP && !arg.isGPR())
+                    m_interferesWithFramePointer.quickSet(AbsoluteTmpMapper::absoluteIndex(arg));
+            });
+    }
+
+    void addEdge(Tmp a, Tmp b)
+    {
+        ASSERT_WITH_MESSAGE(a.isGP() == b.isGP(), "An interference between registers of different types does not make sense, it can lead to non-colorable graphs.");
+
+        addEdge(AbsoluteTmpMapper::absoluteIndex(a), AbsoluteTmpMapper::absoluteIndex(b));
+    }
+
+    // Calling this without a tmpWidth will perform a more conservative coalescing analysis that assumes
+    // that Move32's are not coalescable.
+    static bool mayBeCoalescableImpl(const Inst& inst, TmpWidth* tmpWidth)
+    {
+        switch (type) {
+        case Arg::GP:
+            switch (inst.kind.opcode) {
+            case Move:
+            case Move32:
+                break;
+            default:
+                return false;
+            }
+            break;
+        case Arg::FP:
+            switch (inst.kind.opcode) {
+            case MoveFloat:
+            case MoveDouble:
+                break;
+            default:
+                return false;
+            }
+            break;
+        }
+
+        ASSERT_WITH_MESSAGE(inst.args.size() == 2, "We assume coalecable moves only have two arguments in a few places.");
+
+        if (!inst.args[0].isTmp() || !inst.args[1].isTmp())
+            return false;
+
+        ASSERT(inst.args[0].type() == type);
+        ASSERT(inst.args[1].type() == type);
+
+        // We can coalesce a Move32 so long as either of the following holds:
+        // - The input is already zero-filled.
+        // - The output only cares about the low 32 bits.
+        //
+        // Note that the input property requires an analysis over ZDef's, so it's only valid so long
+        // as the input gets a register. We don't know if the input gets a register, but we do know
+        // that if it doesn't get a register then we will still emit this Move32.
+        if (inst.kind.opcode == Move32) {
+            if (!tmpWidth)
+                return false;
+
+            if (tmpWidth->defWidth(inst.args[0].tmp()) > Arg::Width32
+                && tmpWidth->useWidth(inst.args[1].tmp()) > Arg::Width32)
+                return false;
+        }
+        
+        return true;
+    }
+
+    void selectSpill()
+    {
+        if (!m_hasSelectedSpill) {
+            m_hasSelectedSpill = true;
+
+            if (m_hasCoalescedNonTrivialMove)
+                m_coalescedTmpsAtSpill = m_coalescedTmps;
+        }
+
+        auto iterator = m_spillWorklist.begin();
+
+        RELEASE_ASSERT_WITH_MESSAGE(iterator != m_spillWorklist.end(), "selectSpill() called when there was no spill.");
+        RELEASE_ASSERT_WITH_MESSAGE(!m_unspillableTmps.contains(*iterator), "trying to spill unspillable tmp");
+
+        // Higher score means more desirable to spill. Lower scores maximize the likelihood that a tmp
+        // gets a register.
+        auto score = [&] (Tmp tmp) -> double {
+            // Air exposes the concept of "fast tmps", and we interpret that to mean that the tmp
+            // should always be in a register.
+            if (m_code.isFastTmp(tmp))
+                return 0;
+            
+            // All else being equal, the score should be directly related to the degree.
+            double degree = static_cast(m_degrees[AbsoluteTmpMapper::absoluteIndex(tmp)]);
+
+            // All else being equal, the score should be inversely related to the number of warm uses and
+            // defs.
+            const UseCounts::Counts* counts = m_useCounts[tmp];
+            if (!counts)
+                return std::numeric_limits::infinity();
+            
+            double uses = counts->numWarmUses + counts->numDefs;
+
+            // If it's a constant, then it's not as bad to spill. We can rematerialize it in many
+            // cases.
+            if (counts->numConstDefs == 1 && counts->numDefs == 1)
+                uses /= 2;
+
+            return degree / uses;
+        };
+
+        auto victimIterator = iterator;
+        double maxScore = score(AbsoluteTmpMapper::tmpFromAbsoluteIndex(*iterator));
+
+        ++iterator;
+        for (;iterator != m_spillWorklist.end(); ++iterator) {
+            double tmpScore = score(AbsoluteTmpMapper::tmpFromAbsoluteIndex(*iterator));
+            if (tmpScore > maxScore) {
+                ASSERT(!m_unspillableTmps.contains(*iterator));
+                victimIterator = iterator;
+                maxScore = tmpScore;
+            }
+        }
+
+        unsigned victimIndex = *victimIterator;
+        m_spillWorklist.remove(victimIterator);
+        m_simplifyWorklist.append(victimIndex);
+
+        freezeMoves(victimIndex);
+    }
+
+    void allocate()
+    {
+        ASSERT_WITH_MESSAGE(m_activeMoves.size() >= m_coalescingCandidates.size(), "The activeMove set should be big enough for the quick operations of BitVector.");
+
+        makeWorkList();
+
+        if (debug) {
+            dataLog("Interference: ", listDump(m_interferenceEdges), "\n");
+            dumpInterferenceGraphInDot(WTF::dataFile());
+            dataLog("Coalescing candidates:\n");
+            for (MoveOperands& moveOp : m_coalescingCandidates) {
+                dataLog("    ", AbsoluteTmpMapper::tmpFromAbsoluteIndex(moveOp.srcIndex),
+                    " -> ", AbsoluteTmpMapper::tmpFromAbsoluteIndex(moveOp.dstIndex), "\n");
+            }
+            dataLog("Initial work list\n");
+            dumpWorkLists(WTF::dataFile());
+        }
+
+        do {
+            if (traceDebug) {
+                dataLog("Before Graph simplification iteration\n");
+                dumpWorkLists(WTF::dataFile());
+            }
+
+            if (!m_simplifyWorklist.isEmpty())
+                simplify();
+            else if (!m_worklistMoves.isEmpty())
+                coalesce();
+            else if (!m_freezeWorklist.isEmpty())
+                freeze();
+            else if (!m_spillWorklist.isEmpty())
+                selectSpill();
+
+            if (traceDebug) {
+                dataLog("After Graph simplification iteration\n");
+                dumpWorkLists(WTF::dataFile());
+            }
+        } while (!m_simplifyWorklist.isEmpty() || !m_worklistMoves.isEmpty() || !m_freezeWorklist.isEmpty() || !m_spillWorklist.isEmpty());
+
+        assignColors();
+    }
+
+#if PLATFORM(COCOA)
+#pragma mark - Debugging helpers.
+#endif
+
+    void dumpInterferenceGraphInDot(PrintStream& out)
+    {
+        out.print("graph InterferenceGraph { \n");
+
+        HashSet tmpsWithInterferences;
+        for (const auto& edge : m_interferenceEdges) {
+            tmpsWithInterferences.add(AbsoluteTmpMapper::tmpFromAbsoluteIndex(edge.first()));
+            tmpsWithInterferences.add(AbsoluteTmpMapper::tmpFromAbsoluteIndex(edge.second()));
+        }
+
+        for (const auto& tmp : tmpsWithInterferences) {
+            unsigned tmpIndex = AbsoluteTmpMapper::absoluteIndex(tmp);
+            if (tmpIndex < m_degrees.size())
+                out.print("    ", tmp.internalValue(), " [label=\"", tmp, " (", m_degrees[tmpIndex], ")\"];\n");
+            else
+                out.print("    ", tmp.internalValue(), " [label=\"", tmp, "\"];\n");
+        }
+
+        for (const auto& edge : m_interferenceEdges)
+            out.print("    ", edge.first(), " -- ", edge.second(), ";\n");
+        out.print("}\n");
+    }
+
+    void dumpWorkLists(PrintStream& out)
+    {
+        out.print("Simplify work list:\n");
+        for (unsigned tmpIndex : m_simplifyWorklist)
+            out.print("    ", AbsoluteTmpMapper::tmpFromAbsoluteIndex(tmpIndex), "\n");
+        out.printf("Moves work list is empty? %d\n", m_worklistMoves.isEmpty());
+        out.print("Freeze work list:\n");
+        for (unsigned tmpIndex : m_freezeWorklist)
+            out.print("    ", AbsoluteTmpMapper::tmpFromAbsoluteIndex(tmpIndex), "\n");
+        out.print("Spill work list:\n");
+        for (unsigned tmpIndex : m_spillWorklist)
+            out.print("    ", AbsoluteTmpMapper::tmpFromAbsoluteIndex(tmpIndex), "\n");
+    }
+
+    using AbstractColoringAllocator::addEdge;
+    using AbstractColoringAllocator::getAlias;
+
+    Code& m_code;
+    TmpWidth& m_tmpWidth;
+    // FIXME: spilling should not type specific. It is only a side effect of using UseCounts.
+    const UseCounts& m_useCounts;
+};
+
+class IteratedRegisterCoalescing {
+public:
+    IteratedRegisterCoalescing(Code& code)
+        : m_code(code)
+        , m_useCounts(code)
+    {
+    }
+
+    void run()
+    {
+        padInterference(m_code);
+        
+        iteratedRegisterCoalescingOnType();
+        iteratedRegisterCoalescingOnType();
+
+        fixSpillsAfterTerminals();
+
+        if (reportStats)
+            dataLog("Num iterations = ", m_numIterations, "\n");
+    }
+
+private:
+    template
+    void iteratedRegisterCoalescingOnType()
+    {
+        HashSet unspillableTmps = computeUnspillableTmps();
+
+        // FIXME: If a Tmp is used only from a Scratch role and that argument is !admitsStack, then
+        // we should add the Tmp to unspillableTmps. That will help avoid relooping only to turn the
+        // Tmp into an unspillable Tmp.
+        // https://bugs.webkit.org/show_bug.cgi?id=152699
+        
+        while (true) {
+            ++m_numIterations;
+
+            if (traceDebug)
+                dataLog("Code at iteration ", m_numIterations, ":\n", m_code);
+
+            // FIXME: One way to optimize this code is to remove the recomputation inside the fixpoint.
+            // We need to recompute because spilling adds tmps, but we could just update tmpWidth when we
+            // add those tmps. Note that one easy way to remove the recomputation is to make any newly
+            // added Tmps get the same use/def widths that the original Tmp got. But, this may hurt the
+            // spill code we emit. Since we currently recompute TmpWidth after spilling, the newly
+            // created Tmps may get narrower use/def widths. On the other hand, the spiller already
+            // selects which move instruction to use based on the original Tmp's widths, so it may not
+            // matter than a subsequent iteration sees a coservative width for the new Tmps. Also, the
+            // recomputation may not actually be a performance problem; it's likely that a better way to
+            // improve performance of TmpWidth is to replace its HashMap with something else. It's
+            // possible that most of the TmpWidth overhead is from queries of TmpWidth rather than the
+            // recomputation, in which case speeding up the lookup would be a bigger win.
+            // https://bugs.webkit.org/show_bug.cgi?id=152478
+            m_tmpWidth.recompute(m_code);
+            
+            ColoringAllocator allocator(m_code, m_tmpWidth, m_useCounts, unspillableTmps);
+            if (!allocator.requiresSpilling()) {
+                assignRegistersToTmp(allocator);
+                if (traceDebug)
+                    dataLog("Successfull allocation at iteration ", m_numIterations, ":\n", m_code);
+
+                return;
+            }
+            addSpillAndFill(allocator, unspillableTmps);
+        }
+    }
+
+    template
+    HashSet computeUnspillableTmps()
+    {
+        HashSet unspillableTmps;
+
+        struct Range {
+            unsigned first { std::numeric_limits::max() };
+            unsigned last { 0 };
+            unsigned count { 0 };
+            unsigned admitStackCount { 0 };
+        };
+
+        unsigned numTmps = m_code.numTmps(type);
+        unsigned arraySize = AbsoluteTmpMapper::absoluteIndex(numTmps);
+
+        Vector ranges;
+        ranges.fill(Range(), arraySize);
+
+        unsigned globalIndex = 0;
+        for (BasicBlock* block : m_code) {
+            for (Inst& inst : *block) {
+                inst.forEachArg([&] (Arg& arg, Arg::Role, Arg::Type argType, Arg::Width) {
+                    if (arg.isTmp() && inst.admitsStack(arg)) {
+                        if (argType != type)
+                            return;
+
+                        Tmp tmp = arg.tmp();
+                        Range& range = ranges[AbsoluteTmpMapper::absoluteIndex(tmp)];
+                        range.count++;
+                        range.admitStackCount++;
+                        if (globalIndex < range.first) {
+                            range.first = globalIndex;
+                            range.last = globalIndex;
+                        } else
+                            range.last = globalIndex;
+
+                        return;
+                    }
+
+                    arg.forEachTmpFast([&] (Tmp& tmp) {
+                        if (tmp.isGP() != (type == Arg::GP))
+                            return;
+
+                        Range& range = ranges[AbsoluteTmpMapper::absoluteIndex(tmp)];
+                        range.count++;
+                        if (globalIndex < range.first) {
+                            range.first = globalIndex;
+                            range.last = globalIndex;
+                        } else
+                            range.last = globalIndex;
+                    });
+                });
+
+                ++globalIndex;
+            }
+            ++globalIndex;
+        }
+        for (unsigned i = AbsoluteTmpMapper::lastMachineRegisterIndex() + 1; i < ranges.size(); ++i) {
+            Range& range = ranges[i];
+            if (range.last - range.first <= 1 && range.count > range.admitStackCount)
+                unspillableTmps.add(i);
+        }
+
+        return unspillableTmps;
+    }
+
+    template
+    void assignRegistersToTmp(const ColoringAllocator& allocator)
+    {
+        for (BasicBlock* block : m_code) {
+            // Give Tmp a valid register.
+            for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+                Inst& inst = block->at(instIndex);
+
+                // The mayBeCoalescable() method will change its mind for some operations after we
+                // complete register allocation. So, we record this before starting.
+                bool mayBeCoalescable = allocator.mayBeCoalescable(inst);
+
+                // Move32 is cheaper if we know that it's equivalent to a Move. It's
+                // equivalent if the destination's high bits are not observable or if the source's high
+                // bits are all zero. Note that we don't have the opposite optimization for other
+                // architectures, which may prefer Move over Move32, because Move is canonical already.
+                if (type == Arg::GP && inst.kind.opcode == Move
+                    && inst.args[0].isTmp() && inst.args[1].isTmp()) {
+                    if (m_tmpWidth.useWidth(inst.args[1].tmp()) <= Arg::Width32
+                        || m_tmpWidth.defWidth(inst.args[0].tmp()) <= Arg::Width32)
+                        inst.kind.opcode = Move32;
+                }
+
+                inst.forEachTmpFast([&] (Tmp& tmp) {
+                    if (tmp.isReg() || tmp.isGP() == (type != Arg::GP))
+                        return;
+
+                    Tmp aliasTmp = allocator.getAlias(tmp);
+                    Tmp assignedTmp;
+                    if (aliasTmp.isReg())
+                        assignedTmp = Tmp(aliasTmp.reg());
+                    else {
+                        auto reg = allocator.allocatedReg(aliasTmp);
+                        ASSERT(reg);
+                        assignedTmp = Tmp(reg);
+                    }
+                    ASSERT(assignedTmp.isReg());
+                    tmp = assignedTmp;
+                });
+
+                if (mayBeCoalescable && inst.args[0].isTmp() && inst.args[1].isTmp()
+                    && inst.args[0].tmp() == inst.args[1].tmp())
+                    inst = Inst();
+            }
+
+            // Remove all the useless moves we created in this block.
+            block->insts().removeAllMatching([&] (const Inst& inst) {
+                return !inst;
+            });
+        }
+    }
+
+    static unsigned stackSlotMinimumWidth(Arg::Width width)
+    {
+        return width <= Arg::Width32 ? 4 : 8;
+    }
+
+    template
+    void addSpillAndFill(const ColoringAllocator& allocator, HashSet& unspillableTmps)
+    {
+        HashMap stackSlots;
+        for (Tmp tmp : allocator.spilledTmps()) {
+            // All the spilled values become unspillable.
+            unspillableTmps.add(AbsoluteTmpMapper::absoluteIndex(tmp));
+
+            // Allocate stack slot for each spilled value.
+            StackSlot* stackSlot = m_code.addStackSlot(
+                stackSlotMinimumWidth(m_tmpWidth.requiredWidth(tmp)), StackSlotKind::Spill);
+            bool isNewTmp = stackSlots.add(tmp, stackSlot).isNewEntry;
+            ASSERT_UNUSED(isNewTmp, isNewTmp);
+        }
+
+        // Rewrite the program to get rid of the spilled Tmp.
+        InsertionSet insertionSet(m_code);
+        for (BasicBlock* block : m_code) {
+            bool hasAliasedTmps = false;
+
+            for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+                Inst& inst = block->at(instIndex);
+
+                // The TmpWidth analysis will say that a Move only stores 32 bits into the destination,
+                // if the source only had 32 bits worth of non-zero bits. Same for the source: it will
+                // only claim to read 32 bits from the source if only 32 bits of the destination are
+                // read. Note that we only apply this logic if this turns into a load or store, since
+                // Move is the canonical way to move data between GPRs.
+                bool canUseMove32IfDidSpill = false;
+                bool didSpill = false;
+                if (type == Arg::GP && inst.kind.opcode == Move) {
+                    if ((inst.args[0].isTmp() && m_tmpWidth.width(inst.args[0].tmp()) <= Arg::Width32)
+                        || (inst.args[1].isTmp() && m_tmpWidth.width(inst.args[1].tmp()) <= Arg::Width32))
+                        canUseMove32IfDidSpill = true;
+                }
+
+                // Try to replace the register use by memory use when possible.
+                inst.forEachArg(
+                    [&] (Arg& arg, Arg::Role role, Arg::Type argType, Arg::Width width) {
+                        if (!arg.isTmp())
+                            return;
+                        if (argType != type)
+                            return;
+                        if (arg.isReg())
+                            return;
+                        
+                        auto stackSlotEntry = stackSlots.find(arg.tmp());
+                        if (stackSlotEntry == stackSlots.end())
+                            return;
+                        if (!inst.admitsStack(arg))
+                            return;
+                        
+                        // If the Tmp holds a constant then we want to rematerialize its
+                        // value rather than loading it from the stack. In order for that
+                        // optimization to kick in, we need to avoid placing the Tmp's stack
+                        // address into the instruction.
+                        if (!Arg::isColdUse(role)) {
+                            const UseCounts::Counts* counts = m_useCounts[arg.tmp()];
+                            if (counts && counts->numConstDefs == 1 && counts->numDefs == 1)
+                                return;
+                        }
+                        
+                        Arg::Width spillWidth = m_tmpWidth.requiredWidth(arg.tmp());
+                        if (Arg::isAnyDef(role) && width < spillWidth)
+                            return;
+                        ASSERT(inst.kind.opcode == Move || !(Arg::isAnyUse(role) && width > spillWidth));
+                        
+                        if (spillWidth != Arg::Width32)
+                            canUseMove32IfDidSpill = false;
+                        
+                        stackSlotEntry->value->ensureSize(
+                            canUseMove32IfDidSpill ? 4 : Arg::bytes(width));
+                        arg = Arg::stack(stackSlotEntry->value);
+                        didSpill = true;
+                    });
+
+                if (didSpill && canUseMove32IfDidSpill)
+                    inst.kind.opcode = Move32;
+
+                // For every other case, add Load/Store as needed.
+                inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Arg::Type argType, Arg::Width) {
+                    if (tmp.isReg() || argType != type)
+                        return;
+
+                    auto stackSlotEntry = stackSlots.find(tmp);
+                    if (stackSlotEntry == stackSlots.end()) {
+                        Tmp alias = allocator.getAliasWhenSpilling(tmp);
+                        if (alias != tmp) {
+                            tmp = alias;
+                            hasAliasedTmps = true;
+                        }
+                        return;
+                    }
+
+                    Arg::Width spillWidth = m_tmpWidth.requiredWidth(tmp);
+                    Opcode move = Oops;
+                    switch (stackSlotMinimumWidth(spillWidth)) {
+                    case 4:
+                        move = type == Arg::GP ? Move32 : MoveFloat;
+                        break;
+                    case 8:
+                        move = type == Arg::GP ? Move : MoveDouble;
+                        break;
+                    default:
+                        RELEASE_ASSERT_NOT_REACHED();
+                        break;
+                    }
+
+                    tmp = m_code.newTmp(type);
+                    unspillableTmps.add(AbsoluteTmpMapper::absoluteIndex(tmp));
+
+                    Arg arg = Arg::stack(stackSlotEntry->value);
+                    if (Arg::isAnyUse(role) && role != Arg::Scratch)
+                        insertionSet.insert(instIndex, move, inst.origin, arg, tmp);
+                    if (Arg::isAnyDef(role))
+                        insertionSet.insert(instIndex + 1, move, inst.origin, tmp, arg);
+                });
+            }
+            insertionSet.execute(block);
+
+            if (hasAliasedTmps) {
+                block->insts().removeAllMatching([&] (const Inst& inst) {
+                    return allocator.isUselessMove(inst);
+                });
+            }
+        }
+    }
+
+    void fixSpillsAfterTerminals()
+    {
+        // Because there may be terminals that produce values, IRC may
+        // want to spill those terminals. It'll happen to spill it after
+        // the terminal. If we left the graph in this state, it'd be invalid
+        // because a terminal must be the last instruction in a block.
+        // We fix that here.
+
+        InsertionSet insertionSet(m_code);
+
+        bool addedBlocks = false;
+
+        for (BasicBlock* block : m_code) {
+            unsigned terminalIndex = block->size();
+            bool foundTerminal = false;
+            while (terminalIndex--) {
+                if (block->at(terminalIndex).isTerminal()) {
+                    foundTerminal = true;
+                    break;
+                }
+            }
+            ASSERT_UNUSED(foundTerminal, foundTerminal);
+
+            if (terminalIndex == block->size() - 1)
+                continue;
+
+            // There must be instructions after the terminal because it's not the last instruction.
+            ASSERT(terminalIndex < block->size() - 1);
+            Vector instsToMove;
+            for (unsigned i = terminalIndex + 1; i < block->size(); i++)
+                instsToMove.append(block->at(i));
+            RELEASE_ASSERT(instsToMove.size());
+
+            for (FrequentedBlock& frequentedSuccessor : block->successors()) {
+                BasicBlock* successor = frequentedSuccessor.block();
+                // If successor's only predecessor is block, we can plant the spill inside
+                // the successor. Otherwise, we must split the critical edge and create
+                // a new block for the spill.
+                if (successor->numPredecessors() == 1) {
+                    insertionSet.insertInsts(0, instsToMove);
+                    insertionSet.execute(successor);
+                } else {
+                    addedBlocks = true;
+                    // FIXME: We probably want better block ordering here.
+                    BasicBlock* newBlock = m_code.addBlock();
+                    for (const Inst& inst : instsToMove)
+                        newBlock->appendInst(inst);
+                    newBlock->appendInst(Inst(Jump, instsToMove.last().origin));
+                    newBlock->successors().append(successor);
+                    frequentedSuccessor.block() = newBlock;
+                }
+            }
+
+            block->resize(terminalIndex + 1);
+        }
+
+        if (addedBlocks)
+            m_code.resetReachability();
+    }
+
+    Code& m_code;
+    TmpWidth m_tmpWidth;
+    UseCounts m_useCounts;
+    unsigned m_numIterations { 0 };
+};
+
+} // anonymous namespace
+
+void iteratedRegisterCoalescing(Code& code)
+{
+    PhaseScope phaseScope(code, "iteratedRegisterCoalescing");
+    
+    IteratedRegisterCoalescing iteratedRegisterCoalescing(code);
+    iteratedRegisterCoalescing.run();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h b/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h
new file mode 100644
index 000000000..ab689b35c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is a register allocation phase based on Andrew Appel's Iterated Register Coalescing
+// http://www.cs.cmu.edu/afs/cs/academic/class/15745-s07/www/papers/george.pdf
+void iteratedRegisterCoalescing(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirKind.cpp b/Source/JavaScriptCore/b3/air/AirKind.cpp
new file mode 100644
index 000000000..9fe252538
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirKind.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirKind.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+void Kind::dump(PrintStream& out) const
+{
+    out.print(opcode);
+    
+    CommaPrinter comma(", ", "<");
+    if (traps)
+        out.print(comma, "Traps");
+    if (comma.didPrint())
+        out.print(">");
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirKind.h b/Source/JavaScriptCore/b3/air/AirKind.h
new file mode 100644
index 000000000..e723d4683
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirKind.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef AirKind_h
+#define AirKind_h
+
+#if ENABLE(B3_JIT)
+
+#include "AirOpcode.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+// Air opcodes are always carried around with some flags. These flags are understood as having no
+// meaning if they are set for an opcode to which they do not apply. This makes sense, since Air
+// is a complex instruction set and most of these flags can apply to basically any opcode. In
+// fact, it's recommended to only represent something as a flag if you believe that it is largely
+// opcode-agnostic.
+
+struct Kind {
+    Kind(Opcode opcode)
+        : opcode(opcode)
+        , traps(false)
+    {
+    }
+    
+    Kind()
+        : Kind(Nop)
+    {
+    }
+    
+    bool operator==(const Kind& other) const
+    {
+        return opcode == other.opcode
+            && traps == other.traps;
+    }
+    
+    bool operator!=(const Kind& other) const
+    {
+        return !(*this == other);
+    }
+    
+    unsigned hash() const
+    {
+        return static_cast(opcode) + (static_cast(traps) << 16);
+    }
+    
+    explicit operator bool() const
+    {
+        return *this != Kind();
+    }
+    
+    void dump(PrintStream&) const;
+    
+    Opcode opcode;
+    
+    // This is an opcode-agnostic flag that indicates that we expect that this instruction will
+    // trap. This causes the compiler to assume that this side-exits and therefore has non-control
+    // non-arg effects. This also causes the compiler to tell you about all of these instructions.
+    // Note that this is just one of several ways of supporting trapping in Air, and it's the less
+    // precise variant because it's origin-based. This means that if an instruction was fused out
+    // of B3 values that had different origins, then the origin at which you'll appear to trap
+    // will be somewhat random. The upside of this approach is that it imposes by far the least
+    // overhead on the compiler.
+    // FIXME: Make this completely work.
+    // https://bugs.webkit.org/show_bug.cgi?id=162689
+    bool traps : 1;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+#endif // AirKind_h
+
diff --git a/Source/JavaScriptCore/b3/air/AirLiveness.h b/Source/JavaScriptCore/b3/air/AirLiveness.h
new file mode 100644
index 000000000..e727c36c9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLiveness.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirStackSlot.h"
+#include "AirTmpInlines.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+template
+struct TmpLivenessAdapter {
+    typedef Tmp Thing;
+    typedef HashSet IndexSet;
+
+    TmpLivenessAdapter(Code&) { }
+
+    static unsigned numIndices(Code& code)
+    {
+        unsigned numTmps = code.numTmps(adapterType);
+        return AbsoluteTmpMapper::absoluteIndex(numTmps);
+    }
+    static bool acceptsType(Arg::Type type) { return type == adapterType; }
+    static unsigned valueToIndex(Tmp tmp) { return AbsoluteTmpMapper::absoluteIndex(tmp); }
+    static Tmp indexToValue(unsigned index) { return AbsoluteTmpMapper::tmpFromAbsoluteIndex(index); }
+};
+
+struct StackSlotLivenessAdapter {
+    typedef StackSlot* Thing;
+    typedef HashSet::Hash, WTF::UnsignedWithZeroKeyHashTraits> IndexSet;
+
+    StackSlotLivenessAdapter(Code& code)
+        : m_code(code)
+    {
+    }
+
+    static unsigned numIndices(Code& code)
+    {
+        return code.stackSlots().size();
+    }
+    static bool acceptsType(Arg::Type) { return true; }
+    static unsigned valueToIndex(StackSlot* stackSlot) { return stackSlot->index(); }
+    StackSlot* indexToValue(unsigned index) { return m_code.stackSlots()[index]; }
+
+private:
+    Code& m_code;
+};
+
+struct RegLivenessAdapter {
+    typedef Reg Thing;
+    typedef BitVector IndexSet;
+
+    RegLivenessAdapter(Code&) { }
+
+    static unsigned numIndices(Code&)
+    {
+        return Reg::maxIndex() + 1;
+    }
+
+    static bool acceptsType(Arg::Type) { return true; }
+    static unsigned valueToIndex(Reg reg) { return reg.index(); }
+    Reg indexToValue(unsigned index) { return Reg::fromIndex(index); }
+};
+
+template
+class AbstractLiveness : public Adapter {
+    struct Workset;
+public:
+    typedef typename Adapter::Thing Thing;
+    
+    AbstractLiveness(Code& code)
+        : Adapter(code)
+        , m_workset(Adapter::numIndices(code))
+        , m_liveAtHead(code.size())
+        , m_liveAtTail(code.size())
+    {
+        // The liveAtTail of each block automatically contains the LateUse's of the terminal.
+        for (BasicBlock* block : code) {
+            typename Adapter::IndexSet& liveAtTail = m_liveAtTail[block];
+
+            block->last().forEach(
+                [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                    if (Arg::isLateUse(role) && Adapter::acceptsType(type))
+                        liveAtTail.add(Adapter::valueToIndex(thing));
+                });
+        }
+
+        // Blocks with new live values at tail.
+        BitVector dirtyBlocks;
+        for (size_t blockIndex = 0; blockIndex < code.size(); ++blockIndex)
+            dirtyBlocks.set(blockIndex);
+
+        bool changed;
+        do {
+            changed = false;
+
+            for (size_t blockIndex = code.size(); blockIndex--;) {
+                BasicBlock* block = code.at(blockIndex);
+                if (!block)
+                    continue;
+
+                if (!dirtyBlocks.quickClear(blockIndex))
+                    continue;
+
+                LocalCalc localCalc(*this, block);
+                for (size_t instIndex = block->size(); instIndex--;)
+                    localCalc.execute(instIndex);
+
+                // Handle the early def's of the first instruction.
+                block->at(0).forEach(
+                    [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                        if (Arg::isEarlyDef(role) && Adapter::acceptsType(type))
+                            m_workset.remove(Adapter::valueToIndex(thing));
+                    });
+
+                Vector& liveAtHead = m_liveAtHead[block];
+
+                // We only care about Tmps that were discovered in this iteration. It is impossible
+                // to remove a live value from the head.
+                // We remove all the values we already knew about so that we only have to deal with
+                // what is new in LiveAtHead.
+                if (m_workset.size() == liveAtHead.size())
+                    m_workset.clear();
+                else {
+                    for (unsigned liveIndexAtHead : liveAtHead)
+                        m_workset.remove(liveIndexAtHead);
+                }
+
+                if (m_workset.isEmpty())
+                    continue;
+
+                liveAtHead.reserveCapacity(liveAtHead.size() + m_workset.size());
+                for (unsigned newValue : m_workset)
+                    liveAtHead.uncheckedAppend(newValue);
+
+                for (BasicBlock* predecessor : block->predecessors()) {
+                    typename Adapter::IndexSet& liveAtTail = m_liveAtTail[predecessor];
+                    for (unsigned newValue : m_workset) {
+                        if (liveAtTail.add(newValue)) {
+                            if (!dirtyBlocks.quickSet(predecessor->index()))
+                                changed = true;
+                        }
+                    }
+                }
+            }
+        } while (changed);
+    }
+
+    // This calculator has to be run in reverse.
+    class LocalCalc {
+    public:
+        LocalCalc(AbstractLiveness& liveness, BasicBlock* block)
+            : m_liveness(liveness)
+            , m_block(block)
+        {
+            auto& workset = liveness.m_workset;
+            workset.clear();
+            typename Adapter::IndexSet& liveAtTail = liveness.m_liveAtTail[block];
+            for (unsigned index : liveAtTail)
+                workset.add(index);
+        }
+
+        struct Iterator {
+            Iterator(Adapter& adapter, IndexSparseSet::const_iterator sparceSetIterator)
+                : m_adapter(adapter)
+                , m_sparceSetIterator(sparceSetIterator)
+            {
+            }
+
+            Iterator& operator++()
+            {
+                ++m_sparceSetIterator;
+                return *this;
+            }
+
+            typename Adapter::Thing operator*() const
+            {
+                return m_adapter.indexToValue(*m_sparceSetIterator);
+            }
+
+            bool operator==(const Iterator& other) { return m_sparceSetIterator == other.m_sparceSetIterator; }
+            bool operator!=(const Iterator& other) { return m_sparceSetIterator != other.m_sparceSetIterator; }
+
+        private:
+            Adapter& m_adapter;
+            IndexSparseSet::const_iterator m_sparceSetIterator;
+        };
+
+        struct Iterable {
+            Iterable(AbstractLiveness& liveness)
+                : m_liveness(liveness)
+            {
+            }
+
+            Iterator begin() const { return Iterator(m_liveness, m_liveness.m_workset.begin()); }
+            Iterator end() const { return Iterator(m_liveness, m_liveness.m_workset.end()); }
+            
+            bool contains(const typename Adapter::Thing& thing) const
+            {
+                return m_liveness.m_workset.contains(Adapter::valueToIndex(thing));
+            }
+
+        private:
+            AbstractLiveness& m_liveness;
+        };
+
+        Iterable live() const
+        {
+            return Iterable(m_liveness);
+        }
+
+        bool isLive(const typename Adapter::Thing& thing) const
+        {
+            return live().contains(thing);
+        }
+
+        void execute(unsigned instIndex)
+        {
+            Inst& inst = m_block->at(instIndex);
+            auto& workset = m_liveness.m_workset;
+
+            // First handle the early def's of the next instruction.
+            if (instIndex + 1 < m_block->size()) {
+                Inst& nextInst = m_block->at(instIndex + 1);
+                nextInst.forEach(
+                    [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                        if (Arg::isEarlyDef(role) && Adapter::acceptsType(type))
+                            workset.remove(Adapter::valueToIndex(thing));
+                    });
+            }
+            
+            // Then handle def's.
+            inst.forEach(
+                [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                    if (Arg::isLateDef(role) && Adapter::acceptsType(type))
+                        workset.remove(Adapter::valueToIndex(thing));
+                });
+
+            // Then handle use's.
+            inst.forEach(
+                [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                    if (Arg::isEarlyUse(role) && Adapter::acceptsType(type))
+                        workset.add(Adapter::valueToIndex(thing));
+                });
+
+            // And finally, handle the late use's of the previous instruction.
+            if (instIndex) {
+                Inst& prevInst = m_block->at(instIndex - 1);
+                prevInst.forEach(
+                    [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                        if (Arg::isLateUse(role) && Adapter::acceptsType(type))
+                            workset.add(Adapter::valueToIndex(thing));
+                    });
+            }
+        }
+
+    private:
+        AbstractLiveness& m_liveness;
+        BasicBlock* m_block;
+    };
+
+    const Vector& rawLiveAtHead(BasicBlock* block)
+    {
+        return m_liveAtHead[block];
+    }
+
+    template
+    class Iterable {
+    public:
+        Iterable(AbstractLiveness& liveness, const UnderlyingIterable& iterable)
+            : m_liveness(liveness)
+            , m_iterable(iterable)
+        {
+        }
+
+        class iterator {
+        public:
+            iterator()
+                : m_liveness(nullptr)
+                , m_iter()
+            {
+            }
+            
+            iterator(AbstractLiveness& liveness, typename UnderlyingIterable::const_iterator iter)
+                : m_liveness(&liveness)
+                , m_iter(iter)
+            {
+            }
+
+            typename Adapter::Thing operator*()
+            {
+                return m_liveness->indexToValue(*m_iter);
+            }
+
+            iterator& operator++()
+            {
+                ++m_iter;
+                return *this;
+            }
+
+            bool operator==(const iterator& other) const
+            {
+                ASSERT(m_liveness == other.m_liveness);
+                return m_iter == other.m_iter;
+            }
+
+            bool operator!=(const iterator& other) const
+            {
+                return !(*this == other);
+            }
+
+        private:
+            AbstractLiveness* m_liveness;
+            typename UnderlyingIterable::const_iterator m_iter;
+        };
+
+        iterator begin() const { return iterator(m_liveness, m_iterable.begin()); }
+        iterator end() const { return iterator(m_liveness, m_iterable.end()); }
+
+        bool contains(const typename Adapter::Thing& thing) const
+        {
+            return m_liveness.m_workset.contains(Adapter::valueToIndex(thing));
+        }
+
+    private:
+        AbstractLiveness& m_liveness;
+        const UnderlyingIterable& m_iterable;
+    };
+
+    Iterable> liveAtHead(BasicBlock* block)
+    {
+        return Iterable>(*this, m_liveAtHead[block]);
+    }
+
+    Iterable liveAtTail(BasicBlock* block)
+    {
+        return Iterable(*this, m_liveAtTail[block]);
+    }
+
+    IndexSparseSet& workset() { return m_workset; }
+
+private:
+    friend class LocalCalc;
+    friend struct LocalCalc::Iterable;
+
+    IndexSparseSet m_workset;
+    IndexMap> m_liveAtHead;
+    IndexMap m_liveAtTail;
+};
+
+template
+using TmpLiveness = AbstractLiveness>;
+
+typedef AbstractLiveness> GPLiveness;
+typedef AbstractLiveness> FPLiveness;
+typedef AbstractLiveness StackSlotLiveness;
+typedef AbstractLiveness RegLiveness;
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.cpp b/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.cpp
new file mode 100644
index 000000000..dbbb257c1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirLogRegisterPressure.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void logRegisterPressure(Code& code)
+{
+    const unsigned totalColumns = 200;
+    const unsigned registerColumns = 100;
+    
+    RegLiveness liveness(code);
+
+    for (BasicBlock* block : code) {
+        RegLiveness::LocalCalc localCalc(liveness, block);
+
+        block->dumpHeader(WTF::dataFile());
+
+        Vector instDumps;
+        for (unsigned instIndex = block->size(); instIndex--;) {
+            Inst& inst = block->at(instIndex);
+            Inst* prevInst = block->get(instIndex - 1);
+
+            localCalc.execute(instIndex);
+
+            RegisterSet set;
+            set.setAll(localCalc.live());
+            Inst::forEachDefWithExtraClobberedRegs(
+                prevInst, &inst,
+                [&] (Reg reg, Arg::Role, Arg::Type, Arg::Width) {
+                    set.set(reg);
+                });
+
+            StringPrintStream instOut;
+            StringPrintStream lineOut;
+            lineOut.print("   ");
+            if (set.numberOfSetRegisters()) {
+                set.forEach(
+                    [&] (Reg reg) {
+                        CString text = toCString(" ", reg);
+                        if (text.length() + lineOut.length() > totalColumns) {
+                            instOut.print(lineOut.toCString(), "\n");
+                            lineOut.reset();
+                            lineOut.print("       ");
+                        }
+                        lineOut.print(text);
+                    });
+                lineOut.print(":");
+            }
+            if (lineOut.length() > registerColumns) {
+                instOut.print(lineOut.toCString(), "\n");
+                lineOut.reset();
+            }
+            while (lineOut.length() < registerColumns)
+                lineOut.print(" ");
+            lineOut.print(" ");
+            lineOut.print(inst);
+            instOut.print(lineOut.toCString(), "\n");
+            instDumps.append(instOut.toCString());
+        }
+
+        for (unsigned i = instDumps.size(); i--;)
+            dataLog(instDumps[i]);
+        
+        block->dumpFooter(WTF::dataFile());
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.h b/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.h
new file mode 100644
index 000000000..3f7c3e24c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Dumps the registers that are used at each instruction.
+void logRegisterPressure(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp b/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp
new file mode 100644
index 000000000..e0018734b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirLowerAfterRegAlloc.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCCallingConvention.h"
+#include "AirCode.h"
+#include "AirEmitShuffle.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPhaseScope.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+#include "RegisterSet.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool verbose = false;
+    
+} // anonymous namespace
+
+void lowerAfterRegAlloc(Code& code)
+{
+    PhaseScope phaseScope(code, "lowerAfterRegAlloc");
+
+    if (verbose)
+        dataLog("Code before lowerAfterRegAlloc:\n", code);
+
+    HashMap usedRegisters;
+
+    RegLiveness liveness(code);
+    for (BasicBlock* block : code) {
+        RegLiveness::LocalCalc localCalc(liveness, block);
+
+        for (unsigned instIndex = block->size(); instIndex--;) {
+            Inst& inst = block->at(instIndex);
+            
+            RegisterSet set;
+
+            bool isRelevant = inst.kind.opcode == Shuffle || inst.kind.opcode == ColdCCall;
+            
+            if (isRelevant) {
+                for (Reg reg : localCalc.live())
+                    set.set(reg);
+            }
+            
+            localCalc.execute(instIndex);
+
+            if (isRelevant)
+                usedRegisters.add(&inst, set);
+        }
+    }
+
+    auto getScratches = [&] (RegisterSet set, Arg::Type type) -> std::array {
+        std::array result;
+        for (unsigned i = 0; i < 2; ++i) {
+            bool found = false;
+            for (Reg reg : code.regsInPriorityOrder(type)) {
+                if (!set.get(reg)) {
+                    result[i] = Tmp(reg);
+                    set.set(reg);
+                    found = true;
+                    break;
+                }
+            }
+            if (!found) {
+                result[i] = Arg::stack(
+                    code.addStackSlot(
+                        Arg::bytes(Arg::conservativeWidth(type)),
+                        StackSlotKind::Spill));
+            }
+        }
+        return result;
+    };
+
+    // Now transform the code.
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+            Inst& inst = block->at(instIndex);
+
+            switch (inst.kind.opcode) {
+            case Shuffle: {
+                RegisterSet set = usedRegisters.get(&inst);
+                Vector pairs;
+                for (unsigned i = 0; i < inst.args.size(); i += 3) {
+                    Arg src = inst.args[i + 0];
+                    Arg dst = inst.args[i + 1];
+                    Arg::Width width = inst.args[i + 2].width();
+
+                    // The used register set contains things live after the shuffle. But
+                    // emitShuffle() wants a scratch register that is not just dead but also does not
+                    // interfere with either sources or destinations.
+                    auto excludeRegisters = [&] (Tmp tmp) {
+                        if (tmp.isReg())
+                            set.set(tmp.reg());
+                    };
+                    src.forEachTmpFast(excludeRegisters);
+                    dst.forEachTmpFast(excludeRegisters);
+                    
+                    pairs.append(ShufflePair(src, dst, width));
+                }
+                std::array gpScratch = getScratches(set, Arg::GP);
+                std::array fpScratch = getScratches(set, Arg::FP);
+                insertionSet.insertInsts(
+                    instIndex, emitShuffle(code, pairs, gpScratch, fpScratch, inst.origin));
+                inst = Inst();
+                break;
+            }
+
+            case ColdCCall: {
+                CCallValue* value = inst.origin->as();
+                Kind oldKind = inst.kind;
+
+                RegisterSet liveRegs = usedRegisters.get(&inst);
+                RegisterSet regsToSave = liveRegs;
+                regsToSave.exclude(RegisterSet::calleeSaveRegisters());
+                regsToSave.exclude(RegisterSet::stackRegisters());
+                regsToSave.exclude(RegisterSet::reservedHardwareRegisters());
+
+                RegisterSet preUsed = regsToSave;
+                Vector destinations = computeCCallingConvention(code, value);
+                Tmp result = cCallResult(value->type());
+                Arg originalResult = result ? inst.args[1] : Arg();
+                
+                Vector pairs;
+                for (unsigned i = 0; i < destinations.size(); ++i) {
+                    Value* child = value->child(i);
+                    Arg src = inst.args[result ? (i >= 1 ? i + 1 : i) : i ];
+                    Arg dst = destinations[i];
+                    Arg::Width width = Arg::widthForB3Type(child->type());
+                    pairs.append(ShufflePair(src, dst, width));
+
+                    auto excludeRegisters = [&] (Tmp tmp) {
+                        if (tmp.isReg())
+                            preUsed.set(tmp.reg());
+                    };
+                    src.forEachTmpFast(excludeRegisters);
+                    dst.forEachTmpFast(excludeRegisters);
+                }
+
+                std::array gpScratch = getScratches(preUsed, Arg::GP);
+                std::array fpScratch = getScratches(preUsed, Arg::FP);
+                
+                // Also need to save all live registers. Don't need to worry about the result
+                // register.
+                if (originalResult.isReg())
+                    regsToSave.clear(originalResult.reg());
+                Vector stackSlots;
+                regsToSave.forEach(
+                    [&] (Reg reg) {
+                        Tmp tmp(reg);
+                        Arg arg(tmp);
+                        Arg::Width width = Arg::conservativeWidth(arg.type());
+                        StackSlot* stackSlot =
+                            code.addStackSlot(Arg::bytes(width), StackSlotKind::Spill);
+                        pairs.append(ShufflePair(arg, Arg::stack(stackSlot), width));
+                        stackSlots.append(stackSlot);
+                    });
+
+                if (verbose)
+                    dataLog("Pre-call pairs for ", inst, ": ", listDump(pairs), "\n");
+                
+                insertionSet.insertInsts(
+                    instIndex, emitShuffle(code, pairs, gpScratch, fpScratch, inst.origin));
+
+                inst = buildCCall(code, inst.origin, destinations);
+                if (oldKind.traps)
+                    inst.kind.traps = true;
+
+                // Now we need to emit code to restore registers.
+                pairs.resize(0);
+                unsigned stackSlotIndex = 0;
+                regsToSave.forEach(
+                    [&] (Reg reg) {
+                        Tmp tmp(reg);
+                        Arg arg(tmp);
+                        Arg::Width width = Arg::conservativeWidth(arg.type());
+                        StackSlot* stackSlot = stackSlots[stackSlotIndex++];
+                        pairs.append(ShufflePair(Arg::stack(stackSlot), arg, width));
+                    });
+                if (result) {
+                    ShufflePair pair(result, originalResult, Arg::widthForB3Type(value->type()));
+                    pairs.append(pair);
+                }
+
+                // For finding scratch registers, we need to account for the possibility that
+                // the result is dead.
+                if (originalResult.isReg())
+                    liveRegs.set(originalResult.reg());
+
+                gpScratch = getScratches(liveRegs, Arg::GP);
+                fpScratch = getScratches(liveRegs, Arg::FP);
+                
+                insertionSet.insertInsts(
+                    instIndex + 1, emitShuffle(code, pairs, gpScratch, fpScratch, inst.origin));
+                break;
+            }
+
+            default:
+                break;
+            }
+        }
+
+        insertionSet.execute(block);
+
+        block->insts().removeAllMatching(
+            [&] (Inst& inst) -> bool {
+                return !inst;
+            });
+    }
+
+    if (verbose)
+        dataLog("Code after lowerAfterRegAlloc:\n", code);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.h b/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.h
new file mode 100644
index 000000000..d8234a7e6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This lowers Shuffle and ColdCCall instructions. This phase is designed to be run after register
+// allocation.
+
+void lowerAfterRegAlloc(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.cpp b/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.cpp
new file mode 100644
index 000000000..e14641da6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirLowerEntrySwitch.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBlockWorklist.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void lowerEntrySwitch(Code& code)
+{
+    PhaseScope phaseScope(code, "lowerEntrySwitch");
+    
+    // Figure out the set of blocks that should be duplicated.
+    BlockWorklist worklist;
+    for (BasicBlock* block : code) {
+        if (block->last().kind.opcode == EntrySwitch)
+            worklist.push(block);
+    }
+    
+    // It's possible that we don't have any EntrySwitches. That's fine.
+    if (worklist.seen().isEmpty()) {
+        Vector entrypoints(code.proc().numEntrypoints(), FrequentedBlock(code[0]));
+        code.setEntrypoints(WTFMove(entrypoints));
+        return;
+    }
+    
+    while (BasicBlock* block = worklist.pop())
+        worklist.pushAll(block->predecessors());
+    
+    RELEASE_ASSERT(worklist.saw(code[0]));
+    
+    Vector entrypointFrequencies(code.proc().numEntrypoints(), FrequencyClass::Rare);
+    for (BasicBlock* block : code) {
+        if (block->last().kind.opcode != EntrySwitch)
+            continue;
+        for (unsigned entrypointIndex = code.proc().numEntrypoints(); entrypointIndex--;) {
+            entrypointFrequencies[entrypointIndex] = maxFrequency(
+                entrypointFrequencies[entrypointIndex],
+                block->successor(entrypointIndex).frequency());
+        }
+    }
+    
+    auto fixEntrySwitch = [&] (BasicBlock* block, unsigned entrypointIndex) {
+        if (block->last().kind.opcode != EntrySwitch)
+            return;
+        FrequentedBlock target = block->successor(entrypointIndex);
+        block->last().kind.opcode = Jump;
+        block->successors().resize(1);
+        block->successor(0) = target;
+    };
+    
+    // Now duplicate them.
+    Vector entrypoints;
+    entrypoints.append(FrequentedBlock(code[0], entrypointFrequencies[0]));
+    IndexMap map(code.size());
+    for (unsigned entrypointIndex = 1; entrypointIndex < code.proc().numEntrypoints(); ++entrypointIndex) {
+        map.clear();
+        for (BasicBlock* block : worklist.seen().values(code))
+            map[block] = code.addBlock(block->frequency());
+        entrypoints.append(FrequentedBlock(map[code[0]], entrypointFrequencies[entrypointIndex]));
+        for (BasicBlock* block : worklist.seen().values(code)) {
+            BasicBlock* newBlock = map[block];
+            for (const Inst& inst : *block)
+                newBlock->appendInst(inst);
+            newBlock->successors() = block->successors();
+            for (BasicBlock*& successor : newBlock->successorBlocks()) {
+                if (BasicBlock* replacement = map[successor])
+                    successor = replacement;
+            }
+            fixEntrySwitch(newBlock, entrypointIndex);
+        }
+    }
+    for (BasicBlock* block : worklist.seen().values(code))
+        fixEntrySwitch(block, 0);
+    
+    code.setEntrypoints(WTFMove(entrypoints));
+    code.resetReachability();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.h b/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.h
new file mode 100644
index 000000000..ff3500727
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Converts code that seems to have one entrypoint and emulates multiple entrypoints with
+// EntrySwitch into code that really has multiple entrypoints. This is accomplished by duplicating
+// the backwards transitive closure from all EntrySwitches.
+void lowerEntrySwitch(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirLowerMacros.cpp b/Source/JavaScriptCore/b3/air/AirLowerMacros.cpp
new file mode 100644
index 000000000..b086b7b08
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerMacros.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirLowerMacros.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallingConvention.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void lowerMacros(Code& code)
+{
+    PhaseScope phaseScope(code, "lowerMacros");
+
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+            Inst& inst = block->at(instIndex);
+
+            switch (inst.kind.opcode) {
+            case CCall: {
+                CCallValue* value = inst.origin->as();
+                Kind oldKind = inst.kind;
+
+                Vector destinations = computeCCallingConvention(code, value);
+
+                Inst shuffleArguments(Shuffle, value);
+                unsigned offset = value->type() == Void ? 0 : 1;
+                for (unsigned i = 1; i < destinations.size(); ++i) {
+                    Value* child = value->child(i);
+                    shuffleArguments.args.append(inst.args[offset + i]);
+                    shuffleArguments.args.append(destinations[i]);
+                    shuffleArguments.args.append(Arg::widthArg(Arg::widthForB3Type(child->type())));
+                }
+                insertionSet.insertInst(instIndex, WTFMove(shuffleArguments));
+
+                // Indicate that we're using our original callee argument.
+                destinations[0] = inst.args[0];
+
+                // Save where the original instruction put its result.
+                Arg resultDst = value->type() == Void ? Arg() : inst.args[1];
+                
+                inst = buildCCall(code, inst.origin, destinations);
+                if (oldKind.traps)
+                    inst.kind.traps = true;
+
+                Tmp result = cCallResult(value->type());
+                switch (value->type()) {
+                case Void:
+                    break;
+                case Float:
+                    insertionSet.insert(instIndex + 1, MoveFloat, value, result, resultDst);
+                    break;
+                case Double:
+                    insertionSet.insert(instIndex + 1, MoveDouble, value, result, resultDst);
+                    break;
+                case Int32:
+                    insertionSet.insert(instIndex + 1, Move32, value, result, resultDst);
+                    break;
+                case Int64:
+                    insertionSet.insert(instIndex + 1, Move, value, result, resultDst);
+                    break;
+                }
+                break;
+            }
+
+            default:
+                break;
+            }
+        }
+        insertionSet.execute(block);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirLowerMacros.h b/Source/JavaScriptCore/b3/air/AirLowerMacros.h
new file mode 100644
index 000000000..2dcd76dfe
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerMacros.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Air has some opcodes that are very high-level and are meant to reduce the amount of low-level
+// knowledge in the B3->Air lowering. The current example is CCall.
+
+void lowerMacros(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirOpcode.opcodes b/Source/JavaScriptCore/b3/air/AirOpcode.opcodes
new file mode 100644
index 000000000..e82c9f5bf
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirOpcode.opcodes
@@ -0,0 +1,943 @@
+# Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# Syllabus:
+#
+# Examples of some roles, types, and widths:
+# U:G:32 => use of the low 32 bits of a general-purpose register or value
+# D:G:32 => def of the low 32 bits of a general-purpose register or value
+# UD:G:32 => use and def of the low 32 bits of a general-purpose register or value
+# U:G:64 => use of the low 64 bits of a general-purpose register or value
+# ZD:G:32 => def of all bits of a general-purpose register, where all but the low 32 bits are guaranteed to be zeroed.
+# UA:G:Ptr => UseAddr (see comment in Arg.h)
+# U:F:32 => use of a float register or value
+# U:F:64 => use of a double register or value
+# D:F:32 => def of a float register or value
+# UD:F:32 => use and def of a float register or value
+# S:F:32 => scratch float register.
+#
+# Argument kinds:
+# Tmp => temporary or register
+# Imm => 32-bit immediate int
+# BigImm => TrustedImm64
+# Addr => address as temporary/register+offset
+# Index => BaseIndex address
+# Abs => AbsoluteAddress
+#
+# The parser views these things as keywords, and understands that they fall into two distinct classes
+# of things. So, although this file uses a particular indentation style, none of the whitespace or
+# even newlines are meaningful to the parser. For example, you could write:
+#
+# Foo42 U:G:32, UD:F:32 Imm, Tmp Addr, Tmp
+#
+# And the parser would know that this is the same as:
+#
+# Foo42 U:G:32, UD:F:32
+#     Imm, Tmp
+#     Addr, Tmp
+#
+# I.e. a two-form instruction that uses a GPR or an int immediate and uses+defs a float register.
+#
+# Any opcode or opcode form can be preceded with an architecture list, which restricts the opcode to the
+# union of those architectures. For example, if this is the only overload of the opcode, then it makes the
+# opcode only available on x86_64:
+#
+# x86_64: Fuzz UD:G:64, D:G:64
+#     Tmp, Tmp
+#     Tmp, Addr
+#
+# But this only restricts the two-operand form, the other form is allowed on all architectures:
+#
+# x86_64: Fuzz UD:G:64, D:G:64
+#     Tmp, Tmp
+#     Tmp, Addr
+# Fuzz UD:G:Ptr, D:G:Ptr, U:F:Ptr
+#     Tmp, Tmp, Tmp
+#     Tmp, Addr, Tmp
+#
+# And you can also restrict individual forms:
+#
+# Thingy UD:G:32, D:G:32
+#     Tmp, Tmp
+#     arm64: Tmp, Addr
+#
+# Additionally, you can have an intersection between the architectures of the opcode overload and the
+# form. In this example, the version that takes an address is only available on armv7 while the other
+# versions are available on armv7 or x86_64:
+#
+# x86_64 armv7: Buzz U:G:32, UD:F:32
+#     Tmp, Tmp
+#     Imm, Tmp
+#     armv7: Addr, Tmp
+#
+# Finally, you can specify architectures using helpful architecture groups. Here are all of the
+# architecture keywords that we support:
+#
+# x86: means x86-32 or x86-64.
+# x86_32: means just x86-32.
+# x86_64: means just x86-64.
+# arm: means armv7 or arm64.
+# armv7: means just armv7.
+# arm64: means just arm64.
+# 32: means x86-32 or armv7.
+# 64: means x86-64 or arm64.
+
+# Note that the opcodes here have a leading capital (Add32) but must correspond to MacroAssembler
+# API that has a leading lower-case (add32).
+
+Nop
+
+Add32 U:G:32, U:G:32, ZD:G:32
+    Imm, Tmp, Tmp
+    Tmp, Tmp, Tmp
+
+Add32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Imm, Addr
+    x86: Imm, Index
+    Imm, Tmp
+    x86: Addr, Tmp
+    x86: Tmp, Addr
+    x86: Tmp, Index
+
+x86: Add8 U:G:8, UD:G:8
+    Imm, Addr
+    Imm, Index
+    Tmp, Addr
+    Tmp, Index
+
+x86: Add16 U:G:16, UD:G:16
+    Imm, Addr
+    Imm, Index
+    Tmp, Addr
+    Tmp, Index
+
+64: Add64 U:G:64, UD:G:64
+    Tmp, Tmp
+    x86: Imm, Addr
+    Imm, Tmp
+    x86: Addr, Tmp
+    x86: Tmp, Addr
+
+64: Add64 U:G:64, U:G:64, D:G:64
+    Imm, Tmp, Tmp
+    Tmp, Tmp, Tmp
+
+AddDouble U:F:64, U:F:64, D:F:64
+    Tmp, Tmp, Tmp
+    x86: Addr, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Index, Tmp, Tmp
+
+x86: AddDouble U:F:64, UD:F:64
+    Tmp, Tmp
+    Addr, Tmp
+
+AddFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+    x86: Addr, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Index, Tmp, Tmp
+
+x86: AddFloat U:F:32, UD:F:32
+    Tmp, Tmp
+    Addr, Tmp
+
+Sub32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Imm, Addr
+    Imm, Tmp
+    x86: Addr, Tmp
+    x86: Tmp, Addr
+
+arm64: Sub32 U:G:32, U:G:32, D:G:32
+    Tmp, Tmp, Tmp
+
+64: Sub64 U:G:64, UD:G:64
+    Tmp, Tmp
+    x86: Imm, Addr
+    Imm, Tmp
+    x86: Addr, Tmp
+    x86: Tmp, Addr
+
+arm64: Sub64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+
+SubDouble U:F:64, U:F:64, D:F:64
+    arm64: Tmp, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Tmp, Index, Tmp
+
+x86: SubDouble U:F:64, UD:F:64
+    Tmp, Tmp
+    Addr, Tmp
+
+SubFloat U:F:32, U:F:32, D:F:32
+    arm64: Tmp, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Tmp, Index, Tmp
+
+x86: SubFloat U:F:32, UD:F:32
+    Tmp, Tmp
+    Addr, Tmp
+
+Neg32 UZD:G:32
+    Tmp
+    x86: Addr
+
+64: Neg64 UD:G:64
+    Tmp
+
+arm64: NegateDouble U:F:64, D:F:64
+    Tmp, Tmp
+
+arm64: NegateFloat U:F:32, D:F:32
+    Tmp, Tmp
+
+Mul32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+Mul32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    x86: Addr, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Imm, Tmp, Tmp
+
+64: Mul64 U:G:64, UD:G:64
+    Tmp, Tmp
+
+Mul64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+
+arm64: MultiplyAdd32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplyAdd64 U:G:64, U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplySub32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplySub64 U:G:64, U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplyNeg32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+
+arm64: MultiplyNeg64 U:G:64, U:G:64, ZD:G:64
+    Tmp, Tmp, Tmp
+
+arm64: Div32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+
+arm64: UDiv32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+
+arm64: Div64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+
+arm64: UDiv64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+
+MulDouble U:F:64, U:F:64, D:F:64
+    Tmp, Tmp, Tmp
+    x86: Addr, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Index, Tmp, Tmp
+
+x86: MulDouble U:F:64, UD:F:64
+    Tmp, Tmp
+    Addr, Tmp
+
+MulFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+    x86: Addr, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Index, Tmp, Tmp
+
+x86: MulFloat U:F:32, UD:F:32
+    Tmp, Tmp
+    Addr, Tmp
+
+arm64: DivDouble U:F:64, U:F:32, D:F:64
+    Tmp, Tmp, Tmp
+
+x86: DivDouble U:F:64, UD:F:64
+    Tmp, Tmp
+    Addr, Tmp
+
+arm64: DivFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+
+x86: DivFloat U:F:32, UD:F:32
+    Tmp, Tmp
+    Addr, Tmp
+
+x86: X86ConvertToDoubleWord32 U:G:32, ZD:G:32
+    Tmp*, Tmp*
+
+x86_64: X86ConvertToQuadWord64 U:G:64, D:G:64
+    Tmp*, Tmp*
+
+x86: X86Div32 UZD:G:32, UZD:G:32, U:G:32
+    Tmp*, Tmp*, Tmp
+
+x86: X86UDiv32 UZD:G:32, UZD:G:32, U:G:32
+    Tmp*, Tmp*, Tmp
+
+x86_64: X86Div64 UZD:G:64, UZD:G:64, U:G:64
+    Tmp*, Tmp*, Tmp
+
+x86_64: X86UDiv64 UZD:G:64, UZD:G:64, U:G:64
+    Tmp*, Tmp*, Tmp
+
+Lea32 UA:G:32, D:G:32
+    Addr, Tmp
+    x86: Index, Tmp as x86Lea32
+
+Lea64 UA:G:64, D:G:64
+    Addr, Tmp
+    x86: Index, Tmp as x86Lea64
+
+And32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    arm64: BitImm, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Addr, Tmp, Tmp
+
+And32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Imm, Tmp
+    x86: Tmp, Addr
+    x86: Addr, Tmp
+    x86: Imm, Addr
+
+64: And64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    arm64: BitImm64, Tmp, Tmp
+
+x86_64: And64 U:G:64, UD:G:64
+    Tmp, Tmp
+    x86: Imm, Tmp
+
+AndDouble U:F:64, U:F:64, D:F:64
+    Tmp, Tmp, Tmp
+
+x86: AndDouble U:F:64, UD:F:64
+    Tmp, Tmp
+
+AndFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+
+x86: AndFloat U:F:32, UD:F:32
+    Tmp, Tmp
+
+OrDouble U:F:64, U:F:64, D:F:64
+    Tmp, Tmp, Tmp
+
+x86: OrDouble U:F:64, UD:F:64
+    Tmp, Tmp
+
+OrFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+
+x86: OrFloat U:F:32, UD:F:32
+    Tmp, Tmp
+
+x86: XorDouble U:F:64, U:F:64, D:F:64
+    Tmp, Tmp, Tmp
+
+x86: XorDouble U:F:64, UD:F:64
+    Tmp, Tmp
+
+x86: XorFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+
+x86: XorFloat U:F:32, UD:F:32
+    Tmp, Tmp
+
+arm64: Lshift32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86:Lshift32 U:G:32, UZD:G:32
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: Lshift64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86_64: Lshift64 U:G:64, UD:G:64
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: Rshift32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86: Rshift32 U:G:32, UZD:G:32
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: Rshift64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86_64: Rshift64 U:G:64, UD:G:64
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: Urshift32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86: Urshift32 U:G:32, UZD:G:32
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: Urshift64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86_64: Urshift64 U:G:64, UD:G:64
+    Tmp*, Tmp
+    Imm, Tmp
+
+x86_64: RotateRight32 U:G:32, UZD:G:32
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: RotateRight32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86_64: RotateRight64 U:G:64, UD:G:64
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: RotateRight64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86_64: RotateLeft32 U:G:32, UZD:G:32
+    Tmp*, Tmp
+    Imm, Tmp
+
+x86_64: RotateLeft64 U:G:64, UD:G:64
+    Tmp*, Tmp
+    Imm, Tmp
+
+Or32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    arm64: BitImm, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Addr, Tmp, Tmp
+
+Or32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Imm, Tmp
+    x86: Tmp, Addr
+    x86: Addr, Tmp
+    x86: Imm, Addr
+
+64: Or64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    arm64: BitImm64, Tmp, Tmp
+
+64: Or64 U:G:64, UD:G:64
+    Tmp, Tmp
+    x86: Imm, Tmp
+
+Xor32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    arm64: BitImm, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Addr, Tmp, Tmp
+
+Xor32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Imm, Tmp
+    x86: Tmp, Addr
+    x86: Addr, Tmp
+    x86: Imm, Addr
+
+64: Xor64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    arm64: BitImm64, Tmp, Tmp
+
+64: Xor64 U:G:64, UD:G:64
+    Tmp, Tmp
+    x86: Tmp, Addr
+    x86: Imm, Tmp
+
+arm64: Not32 U:G:32, ZD:G:32
+    Tmp, Tmp
+
+x86: Not32 UZD:G:32
+    Tmp
+    Addr
+
+arm64: Not64 U:G:64, D:G:64
+    Tmp, Tmp
+
+x86: Not64 UD:G:64
+    Tmp
+    Addr
+
+arm64: AbsDouble U:F:64, D:F:64
+    Tmp, Tmp
+
+arm64: AbsFloat U:F:32, D:F:32
+    Tmp, Tmp
+
+CeilDouble U:F:64, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+CeilFloat U:F:32, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+FloorDouble U:F:64, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+FloorFloat U:F:32, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+SqrtDouble U:F:64, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+SqrtFloat U:F:32, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+ConvertInt32ToDouble U:G:32, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+64: ConvertInt64ToDouble U:G:64, D:F:64
+    Tmp, Tmp
+    x86_64: Addr, Tmp
+
+ConvertInt32ToFloat U:G:32, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+64: ConvertInt64ToFloat U:G:64, D:F:32
+    Tmp, Tmp
+    x86_64: Addr, Tmp
+
+CountLeadingZeros32 U:G:32, ZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+64: CountLeadingZeros64 U:G:64, D:G:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+ConvertDoubleToFloat U:F:64, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+ConvertFloatToDouble U:F:32, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+# Note that Move operates over the full register size, which is either 32-bit or 64-bit depending on
+# the platform. I'm not entirely sure that this is a good thing; it might be better to just have a
+# Move64 instruction. OTOH, our MacroAssemblers already have this notion of "move()" that basically
+# means movePtr.
+Move U:G:Ptr, D:G:Ptr
+    Tmp, Tmp
+    Imm, Tmp as signExtend32ToPtr
+    BigImm, Tmp
+    Addr, Tmp as loadPtr # This means that "Move Addr, Tmp" is code-generated as "load" not "move".
+    Index, Tmp as loadPtr
+    Tmp, Addr as storePtr
+    Tmp, Index as storePtr
+    x86: Imm, Addr as storePtr
+
+x86: Swap32 UD:G:32, UD:G:32
+    Tmp, Tmp
+    Tmp, Addr
+
+x86_64: Swap64 UD:G:64, UD:G:64
+    Tmp, Tmp
+    Tmp, Addr
+
+Move32 U:G:32, ZD:G:32
+    Tmp, Tmp as zeroExtend32ToPtr
+    Addr, Tmp as load32
+    Index, Tmp as load32
+    Tmp, Addr as store32
+    Tmp, Index as store32
+    x86: Imm, Tmp as zeroExtend32ToPtr
+    x86: Imm, Addr as store32
+    x86: Imm, Index as store32
+
+StoreZero32 U:G:32
+    Addr
+    Index
+
+SignExtend32ToPtr U:G:32, D:G:Ptr
+    Tmp, Tmp
+
+ZeroExtend8To32 U:G:8, ZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp as load8
+    x86: Index, Tmp as load8
+
+SignExtend8To32 U:G:8, ZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp as load8SignedExtendTo32
+    x86: Index, Tmp as load8SignedExtendTo32
+
+ZeroExtend16To32 U:G:16, ZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp as load16
+    x86: Index, Tmp as load16
+
+SignExtend16To32 U:G:16, ZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp as load16SignedExtendTo32
+    x86: Index, Tmp as load16SignedExtendTo32
+
+MoveFloat U:F:32, D:F:32
+    Tmp, Tmp as moveDouble
+    Addr, Tmp as loadFloat
+    Index, Tmp as loadFloat
+    Tmp, Addr as storeFloat
+    Tmp, Index as storeFloat
+
+MoveDouble U:F:64, D:F:64
+    Tmp, Tmp
+    Addr, Tmp as loadDouble
+    Index, Tmp as loadDouble
+    Tmp, Addr as storeDouble
+    Tmp, Index as storeDouble
+
+MoveZeroToDouble D:F:64
+    Tmp
+
+64: Move64ToDouble U:G:64, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp as loadDouble
+    Index, Tmp as loadDouble
+
+Move32ToFloat U:G:32, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp as loadFloat
+    Index, Tmp as loadFloat
+
+64: MoveDoubleTo64 U:F:64, D:G:64
+    Tmp, Tmp
+    Addr, Tmp as load64
+    Index, Tmp as load64
+
+MoveFloatTo32 U:F:32, D:G:32
+    Tmp, Tmp
+    Addr, Tmp as load32
+    Index, Tmp as load32
+
+Load8 U:G:8, ZD:G:32
+    Addr, Tmp
+    Index, Tmp
+
+Store8 U:G:8, D:G:8
+    Tmp, Index
+    Tmp, Addr
+    x86: Imm, Index
+    x86: Imm, Addr
+
+Load8SignedExtendTo32 U:G:8, ZD:G:32
+    Addr, Tmp
+    Index, Tmp
+
+Load16 U:G:16, ZD:G:32
+    Addr, Tmp
+    Index, Tmp
+
+Load16SignedExtendTo32 U:G:16, ZD:G:32
+    Addr, Tmp
+    Index, Tmp
+
+Store16 U:G:16, D:G:16
+    Tmp, Index
+    Tmp, Addr
+
+Compare32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    RelCond, Tmp, Tmp, Tmp
+    RelCond, Tmp, Imm, Tmp
+
+64: Compare64 U:G:32, U:G:64, U:G:64, ZD:G:32
+    RelCond, Tmp, Tmp, Tmp
+    x86: RelCond, Tmp, Imm, Tmp
+
+Test32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    x86: ResCond, Addr, Imm, Tmp
+    ResCond, Tmp, Tmp, Tmp
+    ResCond, Tmp, BitImm, Tmp
+
+64: Test64 U:G:32, U:G:64, U:G:64, ZD:G:32
+    x86: ResCond, Tmp, Imm, Tmp
+    ResCond, Tmp, Tmp, Tmp
+
+CompareDouble U:G:32, U:F:64, U:F:64, ZD:G:32
+    DoubleCond, Tmp, Tmp, Tmp
+
+CompareFloat U:G:32, U:F:32, U:F:32, ZD:G:32
+    DoubleCond, Tmp, Tmp, Tmp
+
+# Note that branches have some logic in AirOptimizeBlockOrder.cpp. If you add new branches, please make sure
+# you opt them into the block order optimizations.
+
+Branch8 U:G:32, U:G:8, U:G:8 /branch
+    x86: RelCond, Addr, Imm
+    x86: RelCond, Index, Imm
+
+Branch32 U:G:32, U:G:32, U:G:32 /branch
+    x86: RelCond, Addr, Imm
+    RelCond, Tmp, Tmp
+    RelCond, Tmp, Imm
+    x86: RelCond, Tmp, Addr
+    x86: RelCond, Addr, Tmp
+    x86: RelCond, Index, Imm
+
+64: Branch64 U:G:32, U:G:64, U:G:64 /branch
+    RelCond, Tmp, Tmp
+    RelCond, Tmp, Imm
+    x86: RelCond, Tmp, Addr
+    x86: RelCond, Addr, Tmp
+    x86: RelCond, Addr, Imm
+    x86: RelCond, Index, Tmp
+
+BranchTest8 U:G:32, U:G:8, U:G:8 /branch
+    x86: ResCond, Addr, BitImm
+    x86: ResCond, Index, BitImm
+
+BranchTest32 U:G:32, U:G:32, U:G:32 /branch
+    ResCond, Tmp, Tmp
+    ResCond, Tmp, BitImm
+    x86: ResCond, Addr, BitImm
+    x86: ResCond, Index, BitImm
+
+# Warning: forms that take an immediate will sign-extend their immediate. You probably want
+# BranchTest32 in most cases where you use an immediate.
+64: BranchTest64 U:G:32, U:G:64, U:G:64 /branch
+    ResCond, Tmp, Tmp
+    arm64: ResCond, Tmp, BitImm64
+    x86: ResCond, Tmp, BitImm
+    x86: ResCond, Addr, BitImm
+    x86: ResCond, Addr, Tmp
+    x86: ResCond, Index, BitImm
+
+BranchDouble U:G:32, U:F:64, U:F:64 /branch
+    DoubleCond, Tmp, Tmp
+
+BranchFloat U:G:32, U:F:32, U:F:32 /branch
+    DoubleCond, Tmp, Tmp
+
+BranchAdd32 U:G:32, U:G:32, U:G:32, ZD:G:32 /branch
+    ResCond, Tmp, Tmp, Tmp
+    x86:ResCond, Tmp, Addr, Tmp
+    x86:ResCond, Addr, Tmp, Tmp
+
+BranchAdd32 U:G:32, U:G:32, UZD:G:32 /branch
+    ResCond, Tmp, Tmp
+    ResCond, Imm, Tmp
+    x86: ResCond, Imm, Addr
+    x86: ResCond, Tmp, Addr
+    x86: ResCond, Addr, Tmp
+
+BranchAdd64 U:G:32, U:G:64, U:G:64, ZD:G:64 /branch
+    ResCond, Tmp, Tmp, Tmp
+    x86:ResCond, Tmp, Addr, Tmp
+    x86:ResCond, Addr, Tmp, Tmp
+
+64: BranchAdd64 U:G:32, U:G:64, UD:G:64 /branch
+    ResCond, Imm, Tmp
+    ResCond, Tmp, Tmp
+    x86:ResCond, Addr, Tmp
+
+x86: BranchMul32 U:G:32, U:G:32, UZD:G:32 /branch
+    ResCond, Tmp, Tmp
+    ResCond, Addr, Tmp
+
+x86: BranchMul32 U:G:32, U:G:32, U:G:32, ZD:G:32 /branch
+    ResCond, Tmp, Imm, Tmp
+
+arm64: BranchMul32 U:G:32, U:G:32, U:G:32, S:G:32, S:G:32, ZD:G:32 /branch
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+x86_64: BranchMul64 U:G:32, U:G:64, UZD:G:64 /branch
+    ResCond, Tmp, Tmp
+
+arm64: BranchMul64 U:G:32, U:G:64, U:G:64, S:G:64, S:G:64, ZD:G:64 /branch
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+BranchSub32 U:G:32, U:G:32, UZD:G:32 /branch
+    ResCond, Tmp, Tmp
+    ResCond, Imm, Tmp
+    x86: ResCond, Imm, Addr
+    x86: ResCond, Tmp, Addr
+    x86: ResCond, Addr, Tmp
+
+64: BranchSub64 U:G:32, U:G:64, UD:G:64 /branch
+    ResCond, Imm, Tmp
+    ResCond, Tmp, Tmp
+
+BranchNeg32 U:G:32, UZD:G:32 /branch
+    ResCond, Tmp
+
+64: BranchNeg64 U:G:32, UZD:G:64 /branch
+    ResCond, Tmp
+
+MoveConditionally32 U:G:32, U:G:32, U:G:32, U:G:Ptr, UD:G:Ptr
+    RelCond, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionally32 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+64: MoveConditionally64 U:G:32, U:G:64, U:G:64, U:G:Ptr, UD:G:Ptr
+    RelCond, Tmp, Tmp, Tmp, Tmp
+
+64: MoveConditionally64 U:G:32, U:G:64, U:G:64, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+MoveConditionallyTest32 U:G:32, U:G:32, U:G:32, U:G:Ptr, UD:G:Ptr
+    ResCond, Tmp, Tmp, Tmp, Tmp
+    x86: ResCond, Tmp, Imm, Tmp, Tmp
+
+MoveConditionallyTest32 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    ResCond, Tmp, BitImm, Tmp, Tmp, Tmp
+
+64: MoveConditionallyTest64 U:G:32, U:G:64, U:G:64, U:G:Ptr, UD:G:Ptr
+    ResCond, Tmp, Tmp, Tmp, Tmp
+    x86: ResCond, Tmp, Imm, Tmp, Tmp
+
+64: MoveConditionallyTest64 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    x86_64: ResCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+MoveConditionallyDouble U:G:32, U:F:64, U:F:64, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyDouble U:G:32, U:F:64, U:F:64, U:G:Ptr, UD:G:Ptr
+    DoubleCond, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyFloat U:G:32, U:F:32, U:F:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyFloat U:G:32, U:F:32, U:F:32, U:G:Ptr, UD:G:Ptr
+    DoubleCond, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionally32 U:G:32, U:G:32, U:G:32, U:F:64, U:F:64, D:F:64
+    RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+    x86: RelCond, Addr, Imm, Tmp, Tmp, Tmp
+    x86: RelCond, Tmp, Addr, Tmp, Tmp, Tmp
+    x86: RelCond, Addr, Tmp, Tmp, Tmp, Tmp
+    x86: RelCond, Index, Imm, Tmp, Tmp, Tmp
+
+64: MoveDoubleConditionally64 U:G:32, U:G:64, U:G:64, U:F:64, U:F:64, D:F:64
+    RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+    x86_64: RelCond, Tmp, Addr, Tmp, Tmp, Tmp
+    x86_64: RelCond, Addr, Tmp, Tmp, Tmp, Tmp
+    x86_64: RelCond, Addr, Imm, Tmp, Tmp, Tmp
+    x86_64: RelCond, Index, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyTest32 U:G:32, U:G:32, U:G:32, U:F:64, U:F:64, D:F:64
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    ResCond, Tmp, BitImm, Tmp, Tmp, Tmp
+    x86: ResCond, Addr, Imm, Tmp, Tmp, Tmp
+    x86: ResCond, Index, Imm, Tmp, Tmp, Tmp
+
+# Warning: forms that take an immediate will sign-extend their immediate. You probably want
+# MoveDoubleConditionallyTest32 in most cases where you use an immediate.
+64: MoveDoubleConditionallyTest64 U:G:32, U:G:64, U:G:64, U:F:64, U:F:64, D:F:64
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    x86_64: ResCond, Tmp, Imm, Tmp, Tmp, Tmp
+    x86_64: ResCond, Addr, Imm, Tmp, Tmp, Tmp
+    x86_64: ResCond, Addr, Tmp, Tmp, Tmp, Tmp
+    x86_64: ResCond, Index, Imm, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyDouble U:G:32, U:F:64, U:F:64, U:F:64, U:F:64, D:F:64
+    DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyFloat U:G:32, U:F:32, U:F:32, U:F:64, U:F:64, D:F:64
+    DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MemoryFence /effects
+StoreFence /effects
+LoadFence /effects
+
+Jump /branch
+
+RetVoid /return
+
+Ret32 U:G:32 /return
+    Tmp
+
+64: Ret64 U:G:64 /return
+    Tmp
+
+RetFloat U:F:32 /return
+    Tmp
+
+RetDouble U:F:64 /return
+    Tmp
+
+Oops /terminal
+
+# This is a terminal but we express it as a Custom because we don't want it to have a code
+# generator.
+custom EntrySwitch
+
+# A Shuffle is a multi-source, multi-destination move. It simultaneously does multiple moves at once.
+# The moves are specified as triplets of src, dst, and width. For example you can request a swap this
+# way:
+#     Shuffle %tmp1, %tmp2, 64, %tmp2, %tmp1, 64
+custom Shuffle
+
+# Air allows for exotic behavior. A Patch's behavior is determined entirely by the Special operand,
+# which must be the first operand.
+custom Patch
+
+# Instructions used for lowering C calls. These don't make it to Air generation. They get lowered to
+# something else first. The origin Value must be a CCallValue.
+custom CCall
+custom ColdCCall
+
+# This is a special wasm opcode that branches to a trap handler. This uses the generator located to Air::Code
+# to produce the side-exit code.
+custom WasmBoundsCheck
+
diff --git a/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.cpp b/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.cpp
new file mode 100644
index 000000000..11ca3f3d4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirOptimizeBlockOrder.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBlockWorklist.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+class SortedSuccessors {
+public:
+    SortedSuccessors()
+    {
+    }
+
+    void append(BasicBlock* block)
+    {
+        m_successors.append(block);
+    }
+
+    void process(BlockWorklist& worklist)
+    {
+        // We prefer a stable sort, and we don't want it to go off the rails if we see NaN. Also, the number
+        // of successors is bounded. In fact, it currently cannot be more than 2. :-)
+        bubbleSort(
+            m_successors.begin(), m_successors.end(),
+            [] (BasicBlock* left, BasicBlock* right) {
+                return left->frequency() < right->frequency();
+            });
+
+        // Pushing the successors in ascending order of frequency ensures that the very next block we visit
+        // is our highest-frequency successor (unless that successor has already been visited).
+        for (unsigned i = 0; i < m_successors.size(); ++i)
+            worklist.push(m_successors[i]);
+        
+        m_successors.resize(0);
+    }
+
+private:
+    Vector m_successors;
+};
+
+} // anonymous namespace
+
+Vector blocksInOptimizedOrder(Code& code)
+{
+    Vector blocksInOrder;
+
+    BlockWorklist fastWorklist;
+    SortedSuccessors sortedSuccessors;
+    SortedSuccessors sortedSlowSuccessors;
+    
+    // We expect entrypoint lowering to have already happened.
+    RELEASE_ASSERT(code.numEntrypoints());
+
+    auto appendSuccessor = [&] (const FrequentedBlock& block) {
+        if (block.isRare())
+            sortedSlowSuccessors.append(block.block());
+        else
+            sortedSuccessors.append(block.block());
+    };
+    
+    // For everything but the first entrypoint, we push them in order of frequency and frequency
+    // class.
+    for (unsigned i = 1; i < code.numEntrypoints(); ++i)
+        appendSuccessor(code.entrypoint(i));
+    
+    // Always push the primary successor last so that it gets highest priority.
+    fastWorklist.push(code.entrypoint(0).block());
+    
+    while (BasicBlock* block = fastWorklist.pop()) {
+        blocksInOrder.append(block);
+        for (FrequentedBlock& successor : block->successors())
+            appendSuccessor(successor);
+        sortedSuccessors.process(fastWorklist);
+    }
+
+    BlockWorklist slowWorklist;
+    sortedSlowSuccessors.process(slowWorklist);
+
+    while (BasicBlock* block = slowWorklist.pop()) {
+        // We might have already processed this block.
+        if (fastWorklist.saw(block))
+            continue;
+        
+        blocksInOrder.append(block);
+        for (BasicBlock* successor : block->successorBlocks())
+            sortedSuccessors.append(successor);
+        sortedSuccessors.process(slowWorklist);
+    }
+
+    ASSERT(fastWorklist.isEmpty());
+    ASSERT(slowWorklist.isEmpty());
+
+    return blocksInOrder;
+}
+
+void optimizeBlockOrder(Code& code)
+{
+    PhaseScope phaseScope(code, "optimizeBlockOrder");
+
+    Vector blocksInOrder = blocksInOptimizedOrder(code);
+    
+    // Place blocks into Code's block list according to the ordering in blocksInOrder. We do this by leaking
+    // all of the blocks and then readopting them.
+    for (auto& entry : code.blockList())
+        entry.release();
+
+    code.blockList().resize(0);
+
+    for (unsigned i = 0; i < blocksInOrder.size(); ++i) {
+        BasicBlock* block = blocksInOrder[i];
+        block->setIndex(i);
+        code.blockList().append(std::unique_ptr(block));
+    }
+
+    // Finally, flip any branches that we recognize. It's most optimal if the taken successor does not point
+    // at the next block.
+    for (BasicBlock* block : code) {
+        Inst& branch = block->last();
+
+        // It's somewhat tempting to just say that if the block has two successors and the first arg is
+        // invertible, then we can do the optimization. But that's wagging the dog. The fact that an
+        // instruction happens to have an argument that is invertible doesn't mean it's a branch, even though
+        // it is true that currently only branches have invertible arguments. It's also tempting to say that
+        // the /branch flag in AirOpcode.opcodes tells us that something is a branch - except that there,
+        // /branch also means Jump. The approach taken here means that if you add new branch instructions and
+        // forget about this phase, then at worst your new instructions won't opt into the inversion
+        // optimization.  You'll probably realize that as soon as you look at the disassembly, and it
+        // certainly won't cause any correctness issues.
+        
+        switch (branch.kind.opcode) {
+        case Branch8:
+        case Branch32:
+        case Branch64:
+        case BranchTest8:
+        case BranchTest32:
+        case BranchTest64:
+        case BranchFloat:
+        case BranchDouble:
+        case BranchAdd32:
+        case BranchAdd64:
+        case BranchMul32:
+        case BranchMul64:
+        case BranchSub32:
+        case BranchSub64:
+        case BranchNeg32:
+        case BranchNeg64:
+            if (code.findNextBlock(block) == block->successorBlock(0) && branch.args[0].isInvertible()) {
+                std::swap(block->successor(0), block->successor(1));
+                branch.args[0] = branch.args[0].inverted();
+            }
+            break;
+            
+        default:
+            break;
+        }
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.h b/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.h
new file mode 100644
index 000000000..3911fcc8d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BasicBlock;
+class Code;
+
+// Returns a list of blocks sorted according to what would be the current optimal order. This shares
+// some properties with a pre-order traversal. In particular, each block will appear after at least
+// one of its predecessors.
+Vector blocksInOptimizedOrder(Code&);
+
+// Reorders the basic blocks to keep hot blocks at the top, and maximize the likelihood that a frequently
+// taken edge is just a fall-through.
+
+void optimizeBlockOrder(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirPadInterference.cpp b/Source/JavaScriptCore/b3/air/AirPadInterference.cpp
new file mode 100644
index 000000000..91de56bc8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirPadInterference.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirPadInterference.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void padInterference(Code& code)
+{
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        bool prevHadLate = false;
+        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+            Inst& inst = block->at(instIndex);
+            
+            bool hasEarlyDef = false;
+            bool hasLate = false;
+            inst.forEachArg(
+                [&] (Arg&, Arg::Role role, Arg::Type, Arg::Width) {
+                    switch (role) {
+                    case Arg::EarlyDef:
+                        hasEarlyDef = true;
+                        break;
+                    case Arg::LateUse:
+                    case Arg::Def:
+                    case Arg::ZDef:
+                    case Arg::LateColdUse:
+                    case Arg::UseDef:
+                    case Arg::UseZDef:
+                        hasLate = true;
+                        break;
+                    case Arg::Scratch:
+                        hasEarlyDef = true;
+                        hasLate = true;
+                        break;
+                    case Arg::Use:
+                    case Arg::ColdUse:
+                    case Arg::UseAddr:
+                        break;
+                    }
+                });
+            if (inst.kind.opcode == Patch) {
+                hasEarlyDef |= !inst.extraEarlyClobberedRegs().isEmpty();
+                hasLate |= !inst.extraClobberedRegs().isEmpty();
+            }
+            
+            if (hasEarlyDef && prevHadLate)
+                insertionSet.insert(instIndex, Nop, inst.origin);
+            
+            prevHadLate = hasLate;
+        }
+        insertionSet.execute(block);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirPadInterference.h b/Source/JavaScriptCore/b3/air/AirPadInterference.h
new file mode 100644
index 000000000..18f80832f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirPadInterference.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This isn't a phase - it's meant to be a utility that other phases use. Air reasons about liveness by
+// reasoning about interference at boundaries between instructions. This can go wrong - for example, a
+// late use in one instruction doesn't actually interfere with an early def of the next instruction, but
+// Air thinks that it does. This is convenient because it works great in the most common case: early uses
+// and late defs. In practice, only the register allocators need to use this, since only they need to be
+// able to color the interference graph using a bounded number of colors.
+//
+// See https://bugs.webkit.org/show_bug.cgi?id=163548#c2 for more info.
+
+void padInterference(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirPhaseScope.cpp b/Source/JavaScriptCore/b3/air/AirPhaseScope.cpp
new file mode 100644
index 000000000..062ea2483
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirPhaseScope.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirPhaseScope.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirValidate.h"
+#include "B3Common.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+PhaseScope::PhaseScope(Code& code, const char* name)
+    : m_code(code)
+    , m_name(name)
+    , m_timingScope(name)
+{
+    if (shouldDumpIRAtEachPhase(AirMode)) {
+        dataLog("Air after ", code.lastPhaseName(), ", before ", name, ":\n");
+        dataLog(code);
+    }
+
+    if (shouldSaveIRBeforePhase())
+        m_dumpBefore = toCString(code);
+}
+
+PhaseScope::~PhaseScope()
+{
+    m_code.setLastPhaseName(m_name);
+    if (shouldValidateIRAtEachPhase())
+        validate(m_code, m_dumpBefore.data());
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirPhaseScope.h b/Source/JavaScriptCore/b3/air/AirPhaseScope.h
new file mode 100644
index 000000000..71f788fce
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirPhaseScope.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3TimingScope.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+class PhaseScope {
+    WTF_MAKE_NONCOPYABLE(PhaseScope);
+public:
+    PhaseScope(Code&, const char* name);
+    ~PhaseScope(); // this does validation
+
+private:
+    Code& m_code;
+    const char* m_name;
+    TimingScope m_timingScope;
+    CString m_dumpBefore;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.cpp b/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.cpp
new file mode 100644
index 000000000..bb0aeab77
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirReportUsedRegisters.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPhaseScope.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void reportUsedRegisters(Code& code)
+{
+    PhaseScope phaseScope(code, "reportUsedRegisters");
+
+    RegLiveness liveness(code);
+
+    for (BasicBlock* block : code) {
+        RegLiveness::LocalCalc localCalc(liveness, block);
+
+        for (unsigned instIndex = block->size(); instIndex--;) {
+            Inst& inst = block->at(instIndex);
+
+            // Kill dead assignments to registers. For simplicity we say that a store is killable if
+            // it has only late defs and those late defs are to registers that are dead right now.
+            if (!inst.hasNonArgEffects()) {
+                bool canDelete = true;
+                inst.forEachArg(
+                    [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
+                        if (Arg::isEarlyDef(role)) {
+                            canDelete = false;
+                            return;
+                        }
+                        if (!Arg::isLateDef(role))
+                            return;
+                        if (!arg.isReg()) {
+                            canDelete = false;
+                            return;
+                        }
+                        if (localCalc.isLive(arg.reg())) {
+                            canDelete = false;
+                            return;
+                        }
+                    });
+                if (canDelete)
+                    inst = Inst();
+            }
+            
+            if (inst.kind.opcode == Patch) {
+                RegisterSet registerSet;
+                for (Reg reg : localCalc.live())
+                    registerSet.set(reg);
+                inst.reportUsedRegisters(registerSet);
+            }
+            localCalc.execute(instIndex);
+        }
+        
+        block->insts().removeAllMatching(
+            [&] (const Inst& inst) -> bool {
+                return !inst;
+            });
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.h b/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.h
new file mode 100644
index 000000000..ea175dcf4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Performs a liveness analysis over registers and reports the live registers to every Special. Takes
+// the opportunity to kill dead assignments to registers, since it has access to register liveness.
+
+void reportUsedRegisters(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirSimplifyCFG.cpp b/Source/JavaScriptCore/b3/air/AirSimplifyCFG.cpp
new file mode 100644
index 000000000..c66f63feb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSimplifyCFG.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirSimplifyCFG.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool simplifyCFG(Code& code)
+{
+    const bool verbose = false;
+    
+    PhaseScope phaseScope(code, "simplifyCFG");
+    
+    // We have three easy simplification rules:
+    //
+    // 1) If a successor is a block that just jumps to another block, then jump directly to
+    //    that block.
+    //
+    // 2) If all successors are the same and the operation has no effects, then use a jump
+    //    instead.
+    //
+    // 3) If you jump to a block that is not you and has one predecessor, then merge.
+    //
+    // Note that because of the first rule, this phase may introduce critical edges. That's fine.
+    // If you need broken critical edges, then you have to break them yourself.
+
+    bool result = false;
+    for (;;) {
+        if (verbose) {
+            dataLog("Air before an iteration of simplifyCFG:\n");
+            dataLog(code);
+        }
+        
+        bool changed = false;
+        for (BasicBlock* block : code) {
+            // We rely on predecessors being conservatively correct. Verify this here.
+            if (shouldValidateIRAtEachPhase()) {
+                for (BasicBlock* block : code) {
+                    for (BasicBlock* successor : block->successorBlocks())
+                        RELEASE_ASSERT(successor->containsPredecessor(block));
+                }
+            }
+
+            // We don't care about blocks that don't have successors.
+            if (!block->numSuccessors())
+                continue;
+
+            // First check if any of the successors of this block can be forwarded over.
+            for (BasicBlock*& successor : block->successorBlocks()) {
+                if (successor != block
+                    && successor->size() == 1
+                    && successor->last().kind.opcode == Jump) {
+                    BasicBlock* newSuccessor = successor->successorBlock(0);
+                    if (newSuccessor != successor) {
+                        if (verbose) {
+                            dataLog(
+                                "Replacing ", pointerDump(block), "->", pointerDump(successor),
+                                " with ", pointerDump(block), "->", pointerDump(newSuccessor), "\n");
+                        }
+                        // Note that we do not do replacePredecessor() because the block we're
+                        // skipping will still have newSuccessor as its successor.
+                        newSuccessor->addPredecessor(block);
+                        successor = newSuccessor;
+                        changed = true;
+                    }
+                }
+            }
+
+            // Now check if the block's terminal can be replaced with a jump. The terminal must not
+            // have weird effects.
+            if (block->numSuccessors() > 1 
+                && !block->last().hasNonControlEffects()) {
+                // All of the successors must be the same.
+                bool allSame = true;
+                BasicBlock* firstSuccessor = block->successorBlock(0);
+                for (unsigned i = 1; i < block->numSuccessors(); ++i) {
+                    if (block->successorBlock(i) != firstSuccessor) {
+                        allSame = false;
+                        break;
+                    }
+                }
+                if (allSame) {
+                    if (verbose)
+                        dataLog("Changing ", pointerDump(block), "'s terminal to a Jump.\n");
+                    block->last() = Inst(Jump, block->last().origin);
+                    block->successors().resize(1);
+                    block->successors()[0].frequency() = FrequencyClass::Normal;
+                    changed = true;
+                }
+            }
+
+            // Finally handle jumps to a block with one predecessor.
+            if (block->numSuccessors() == 1
+                && !block->last().hasNonControlEffects()) {
+                BasicBlock* successor = block->successorBlock(0);
+                if (successor != block && successor->numPredecessors() == 1) {
+                    RELEASE_ASSERT(successor->predecessor(0) == block);
+
+                    // We can merge the two blocks because the predecessor only jumps to the successor
+                    // and the successor is only reachable from the predecessor.
+
+                    // Remove the terminal.
+                    Value* origin = block->insts().takeLast().origin;
+
+                    // Append the full contents of the successor to the predecessor.
+                    block->insts().reserveCapacity(block->size() + successor->size());
+                    for (Inst& inst : *successor)
+                        block->appendInst(WTFMove(inst));
+
+                    // Make sure that our successors are the successor's successors.
+                    block->successors() = WTFMove(successor->successors());
+
+                    // Make sure that the successor has nothing left in it except an oops.
+                    successor->resize(1);
+                    successor->last() = Inst(Oops, origin);
+                    successor->successors().clear();
+
+                    // Ensure that the predecessors of block's new successors know what's up.
+                    for (BasicBlock* newSuccessor : block->successorBlocks())
+                        newSuccessor->replacePredecessor(successor, block);
+
+                    if (verbose)
+                        dataLog("Merged ", pointerDump(block), "->", pointerDump(successor), "\n");
+                    changed = true;
+                }
+            }
+        }
+
+        if (!changed)
+            break;
+        result = true;
+        code.resetReachability();
+    }
+
+    return result;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/Source/JavaScriptCore/b3/air/AirSimplifyCFG.h b/Source/JavaScriptCore/b3/air/AirSimplifyCFG.h
new file mode 100644
index 000000000..7ac510d4b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSimplifyCFG.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Simplifies the control flow graph by removing jump-only blocks and merging jumps.
+
+bool simplifyCFG(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirSpecial.cpp b/Source/JavaScriptCore/b3/air/AirSpecial.cpp
new file mode 100644
index 000000000..e825767b0
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSpecial.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+const char* const Special::dumpPrefix = "&";
+
+Special::Special()
+{
+}
+
+Special::~Special()
+{
+}
+
+CString Special::name() const
+{
+    StringPrintStream out;
+    dumpImpl(out);
+    return out.toCString();
+}
+
+std::optional Special::shouldTryAliasingDef(Inst&)
+{
+    return std::nullopt;
+}
+
+bool Special::isTerminal(Inst&)
+{
+    return false;
+}
+
+bool Special::hasNonArgEffects(Inst&)
+{
+    return true;
+}
+
+bool Special::hasNonArgNonControlEffects(Inst&)
+{
+    return true;
+}
+
+void Special::dump(PrintStream& out) const
+{
+    out.print(dumpPrefix);
+    dumpImpl(out);
+    if (m_index != UINT_MAX)
+        out.print(m_index);
+}
+
+void Special::deepDump(PrintStream& out) const
+{
+    out.print(*this, ": ");
+    deepDumpImpl(out);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirSpecial.h b/Source/JavaScriptCore/b3/air/AirSpecial.h
new file mode 100644
index 000000000..480cbfcba
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSpecial.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirInst.h"
+#include "B3SparseCollection.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+struct GenerationContext;
+
+class Special {
+    WTF_MAKE_NONCOPYABLE(Special);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    static const char* const dumpPrefix;
+    
+    Special();
+    virtual ~Special();
+
+    Code& code() const { return *m_code; }
+
+    CString name() const;
+
+    virtual void forEachArg(Inst&, const ScopedLambda&) = 0;
+    virtual bool isValid(Inst&) = 0;
+    virtual bool admitsStack(Inst&, unsigned argIndex) = 0;
+    virtual std::optional shouldTryAliasingDef(Inst&);
+
+    // This gets called on for each Inst that uses this Special. Note that there is no way to
+    // guarantee that a Special gets used from just one Inst, because Air might taildup late. So,
+    // if you want to pass this information down to generate(), then you have to either:
+    //
+    // 1) Generate Air that starts with a separate Special per Patch Inst, and then merge
+    //    usedRegister sets. This is probably not great, but it optimizes for the common case that
+    //    Air didn't duplicate code or that such duplication didn't cause any interesting changes to
+    //    register assignment.
+    //
+    // 2) Have the Special maintain a HashMap. This works because the analysis
+    //    that feeds into this call is performed just before code generation and there is no way
+    //    for the Vector<>'s that contain the Insts to be reallocated. This allows generate() to
+    //    consult the HashMap.
+    //
+    // 3) Hybrid: you could use (1) and fire up a HashMap if you see multiple calls.
+    //
+    // Note that it's not possible to rely on reportUsedRegisters() being called in the same order
+    // as generate(). If we could rely on that, then we could just have each Special instance
+    // maintain a Vector of RegisterSet's and then process that vector in the right order in
+    // generate(). But, the ordering difference is unlikely to change since it would harm the
+    // performance of the liveness analysis.
+    //
+    // Currently, we do (1) for B3 stackmaps.
+    virtual void reportUsedRegisters(Inst&, const RegisterSet&) = 0;
+    
+    virtual CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&) = 0;
+
+    virtual RegisterSet extraEarlyClobberedRegs(Inst&) = 0;
+    virtual RegisterSet extraClobberedRegs(Inst&) = 0;
+    
+    // By default, this returns false.
+    virtual bool isTerminal(Inst&);
+
+    // By default, this returns true.
+    virtual bool hasNonArgEffects(Inst&);
+
+    // By default, this returns true.
+    virtual bool hasNonArgNonControlEffects(Inst&);
+
+    void dump(PrintStream&) const;
+    void deepDump(PrintStream&) const;
+
+protected:
+    virtual void dumpImpl(PrintStream&) const = 0;
+    virtual void deepDumpImpl(PrintStream&) const = 0;
+
+private:
+    friend class Code;
+    friend class SparseCollection;
+
+    unsigned m_index { UINT_MAX };
+    Code* m_code { nullptr };
+};
+
+class DeepSpecialDump {
+public:
+    DeepSpecialDump(const Special* special)
+        : m_special(special)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_special)
+            m_special->deepDump(out);
+        else
+            out.print("");
+    }
+
+private:
+    const Special* m_special;
+};
+
+inline DeepSpecialDump deepDump(const Special* special)
+{
+    return DeepSpecialDump(special);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirSpillEverything.cpp b/Source/JavaScriptCore/b3/air/AirSpillEverything.cpp
new file mode 100644
index 000000000..ebf3774a5
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSpillEverything.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirSpillEverything.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPadInterference.h"
+#include "AirPhaseScope.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+void spillEverything(Code& code)
+{
+    PhaseScope phaseScope(code, "spillEverything");
+    
+    padInterference(code);
+
+    // We want to know the set of registers used at every point in every basic block.
+    IndexMap> usedRegisters(code.size());
+    GPLiveness gpLiveness(code);
+    FPLiveness fpLiveness(code);
+    for (BasicBlock* block : code) {
+        GPLiveness::LocalCalc gpLocalCalc(gpLiveness, block);
+        FPLiveness::LocalCalc fpLocalCalc(fpLiveness, block);
+
+        usedRegisters[block].resize(block->size() + 1);
+
+        auto setUsedRegisters = [&] (unsigned index) {
+            RegisterSet& registerSet = usedRegisters[block][index];
+            for (Tmp tmp : gpLocalCalc.live()) {
+                if (tmp.isReg())
+                    registerSet.set(tmp.reg());
+            }
+            for (Tmp tmp : fpLocalCalc.live()) {
+                if (tmp.isReg())
+                    registerSet.set(tmp.reg());
+            }
+
+            // Gotta account for dead assignments to registers. These may happen because the input
+            // code is suboptimal.
+            Inst::forEachDefWithExtraClobberedRegs(
+                block->get(index - 1), block->get(index),
+                [&] (const Tmp& tmp, Arg::Role, Arg::Type, Arg::Width) {
+                    if (tmp.isReg())
+                        registerSet.set(tmp.reg());
+                });
+        };
+
+        for (unsigned instIndex = block->size(); instIndex--;) {
+            setUsedRegisters(instIndex + 1);
+            gpLocalCalc.execute(instIndex);
+            fpLocalCalc.execute(instIndex);
+        }
+        setUsedRegisters(0);
+    }
+
+    // Allocate a stack slot for each tmp.
+    Vector allStackSlots[Arg::numTypes];
+    for (unsigned typeIndex = 0; typeIndex < Arg::numTypes; ++typeIndex) {
+        Vector& stackSlots = allStackSlots[typeIndex];
+        Arg::Type type = static_cast(typeIndex);
+        stackSlots.resize(code.numTmps(type));
+        for (unsigned tmpIndex = code.numTmps(type); tmpIndex--;)
+            stackSlots[tmpIndex] = code.addStackSlot(8, StackSlotKind::Spill);
+    }
+
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+            RegisterSet& setBefore = usedRegisters[block][instIndex];
+            RegisterSet& setAfter = usedRegisters[block][instIndex + 1];
+            Inst& inst = block->at(instIndex);
+
+            // First try to spill directly.
+            for (unsigned i = 0; i < inst.args.size(); ++i) {
+                Arg& arg = inst.args[i];
+
+                if (arg.isTmp()) {
+                    if (arg.isReg())
+                        continue;
+
+                    if (inst.admitsStack(i)) {
+                        StackSlot* stackSlot = allStackSlots[arg.type()][arg.tmpIndex()];
+                        arg = Arg::stack(stackSlot);
+                        continue;
+                    }
+                }
+            }
+
+            // Now fall back on spilling using separate Move's to load/store the tmp.
+            inst.forEachTmp(
+                [&] (Tmp& tmp, Arg::Role role, Arg::Type type, Arg::Width) {
+                    if (tmp.isReg())
+                        return;
+                    
+                    StackSlot* stackSlot = allStackSlots[type][tmp.tmpIndex()];
+                    Arg arg = Arg::stack(stackSlot);
+
+                    // Need to figure out a register to use. How we do that depends on the role.
+                    Reg chosenReg;
+                    switch (role) {
+                    case Arg::Use:
+                    case Arg::ColdUse:
+                        for (Reg reg : code.regsInPriorityOrder(type)) {
+                            if (!setBefore.get(reg)) {
+                                setBefore.set(reg);
+                                chosenReg = reg;
+                                break;
+                            }
+                        }
+                        break;
+                    case Arg::Def:
+                    case Arg::ZDef:
+                        for (Reg reg : code.regsInPriorityOrder(type)) {
+                            if (!setAfter.get(reg)) {
+                                setAfter.set(reg);
+                                chosenReg = reg;
+                                break;
+                            }
+                        }
+                        break;
+                    case Arg::UseDef:
+                    case Arg::UseZDef:
+                    case Arg::LateUse:
+                    case Arg::LateColdUse:
+                    case Arg::Scratch:
+                    case Arg::EarlyDef:
+                        for (Reg reg : code.regsInPriorityOrder(type)) {
+                            if (!setBefore.get(reg) && !setAfter.get(reg)) {
+                                setAfter.set(reg);
+                                setBefore.set(reg);
+                                chosenReg = reg;
+                                break;
+                            }
+                        }
+                        break;
+                    case Arg::UseAddr:
+                        // We will never UseAddr a Tmp, that doesn't make sense.
+                        RELEASE_ASSERT_NOT_REACHED();
+                        break;
+                    }
+                    RELEASE_ASSERT(chosenReg);
+
+                    tmp = Tmp(chosenReg);
+
+                    Opcode move = type == Arg::GP ? Move : MoveDouble;
+
+                    if (Arg::isAnyUse(role) && role != Arg::Scratch)
+                        insertionSet.insert(instIndex, move, inst.origin, arg, tmp);
+                    if (Arg::isAnyDef(role))
+                        insertionSet.insert(instIndex + 1, move, inst.origin, tmp, arg);
+                });
+        }
+        insertionSet.execute(block);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirSpillEverything.h b/Source/JavaScriptCore/b3/air/AirSpillEverything.h
new file mode 100644
index 000000000..0fdca6677
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSpillEverything.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is a phase for testing. It behaves like a register allocator in the sense that it
+// eliminates temporaries from the program. It accomplishes this by always spilling all
+// temporaries. The resulting code is going to be very inefficient. This phase is great if you
+// think that there is a bug in the register allocator. You can confirm this by running this
+// phase instead of the register allocator.
+//
+// Note that even though this phase does the cheapest thing possible, it's not even written in a
+// particularly efficient way. So, don't get any ideas about using this phase to reduce compiler
+// latency. If you wanted to do that, you should come up with a clever algorithm instead of using
+// this silly thing.
+
+void spillEverything(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirStackSlot.cpp b/Source/JavaScriptCore/b3/air/AirStackSlot.cpp
new file mode 100644
index 000000000..58cac0657
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirStackSlot.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirStackSlot.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackSlot.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void StackSlot::setOffsetFromFP(intptr_t value)
+{
+    m_offsetFromFP = value;
+    if (m_b3Slot)
+        m_b3Slot->m_offsetFromFP = value;
+}
+
+unsigned StackSlot::jsHash() const
+{
+    return static_cast(m_kind) + m_byteSize * 3 + m_offsetFromFP * 7;
+}
+
+void StackSlot::dump(PrintStream& out) const
+{
+    if (isSpill())
+        out.print("spill");
+    else
+        out.print("stack");
+    out.print(m_index);
+}
+
+void StackSlot::deepDump(PrintStream& out) const
+{
+    out.print("byteSize = ", m_byteSize, ", offsetFromFP = ", m_offsetFromFP, ", kind = ", m_kind);
+    if (m_b3Slot)
+        out.print(", b3Slot = ", *m_b3Slot, ": (", B3::deepDump(m_b3Slot), ")");
+}
+
+StackSlot::StackSlot(unsigned byteSize, StackSlotKind kind, B3::StackSlot* b3Slot)
+    : m_byteSize(byteSize)
+    , m_offsetFromFP(b3Slot ? b3Slot->offsetFromFP() : 0)
+    , m_kind(kind)
+    , m_b3Slot(b3Slot)
+{
+    ASSERT(byteSize);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirStackSlot.h b/Source/JavaScriptCore/b3/air/AirStackSlot.h
new file mode 100644
index 000000000..85c94acc8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirStackSlot.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirStackSlotKind.h"
+#include "B3SparseCollection.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class StackSlot;
+
+namespace Air {
+
+class StackSlot {
+    WTF_MAKE_NONCOPYABLE(StackSlot);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    unsigned byteSize() const { return m_byteSize; }
+    StackSlotKind kind() const { return m_kind; }
+    bool isLocked() const { return m_kind == StackSlotKind::Locked; }
+    bool isSpill() const { return m_kind == StackSlotKind::Spill; }
+    unsigned index() const { return m_index; }
+
+    void ensureSize(unsigned requestedSize)
+    {
+        ASSERT(!m_offsetFromFP);
+        m_byteSize = std::max(m_byteSize, requestedSize);
+    }
+
+    unsigned alignment() const
+    {
+        if (byteSize() <= 1)
+            return 1;
+        if (byteSize() <= 2)
+            return 2;
+        if (byteSize() <= 4)
+            return 4;
+        return 8;
+    }
+
+    B3::StackSlot* b3Slot() const { return m_b3Slot; }
+
+    // Zero means that it's not yet assigned.
+    intptr_t offsetFromFP() const { return m_offsetFromFP; }
+
+    // This should usually just be called from phases that do stack allocation. But you can
+    // totally force a stack slot to land at some offset.
+    void setOffsetFromFP(intptr_t);
+    
+    // This computes a hash for comparing this to JSAir's StackSlot.
+    unsigned jsHash() const;
+
+    void dump(PrintStream&) const;
+    void deepDump(PrintStream&) const;
+
+private:
+    friend class Code;
+    friend class SparseCollection;
+
+    StackSlot(unsigned byteSize, StackSlotKind, B3::StackSlot*);
+    
+    unsigned m_byteSize { 0 };
+    unsigned m_index { UINT_MAX };
+    intptr_t m_offsetFromFP { 0 };
+    StackSlotKind m_kind { StackSlotKind::Locked };
+    B3::StackSlot* m_b3Slot { nullptr };
+};
+
+class DeepStackSlotDump {
+public:
+    DeepStackSlotDump(const StackSlot* slot)
+        : m_slot(slot)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_slot)
+            m_slot->deepDump(out);
+        else
+            out.print("");
+    }
+
+private:
+    const StackSlot* m_slot;
+};
+
+inline DeepStackSlotDump deepDump(const StackSlot* slot)
+{
+    return DeepStackSlotDump(slot);
+}
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+inline void printInternal(PrintStream& out, JSC::B3::Air::StackSlot* stackSlot)
+{
+    out.print(pointerDump(stackSlot));
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirStackSlotKind.cpp b/Source/JavaScriptCore/b3/air/AirStackSlotKind.cpp
new file mode 100644
index 000000000..af83de1b9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirStackSlotKind.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirStackSlotKind.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace WTF {
+
+using namespace JSC::B3::Air;
+
+void printInternal(PrintStream& out, StackSlotKind kind)
+{
+    switch (kind) {
+    case StackSlotKind::Locked:
+        out.print("Locked");
+        return;
+    case StackSlotKind::Spill:
+        out.print("Spill");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirStackSlotKind.h b/Source/JavaScriptCore/b3/air/AirStackSlotKind.h
new file mode 100644
index 000000000..9ef205772
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirStackSlotKind.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+enum class StackSlotKind : uint8_t {
+    // A locked stack slot is an area of stack requested by the client. It cannot be killed. The
+    // client can get its FP offset and write to it from stack walking code, so we must assume
+    // that reads and writes to a locked stack slot can be clobbered the same way as reads and
+    // writes to any memory location.
+    Locked,
+
+    // A spill slot. These have fundamentally different behavior than a typical memory location.
+    // They are lowered to from temporaries. This means for example that a 32-bit ZDef store to a
+    // 8 byte stack slot will zero the top 4 bytes, even though a 32-bit ZDef store to any other
+    // kind of memory location would do no such thing. UseAddr on a spill slot is not allowed, so
+    // they never escape.
+    Spill
+
+    // FIXME: We should add a third mode, which means that the stack slot will be read asynchronously
+    // as with Locked, but never written to asynchronously. Then, Air could optimize spilling and
+    // filling by tracking whether the value had been stored to a read-only locked slot. If it had,
+    // then we can refill from that slot.
+    // https://bugs.webkit.org/show_bug.cgi?id=150587
+};
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::B3::Air::StackSlotKind);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirTmp.cpp b/Source/JavaScriptCore/b3/air/AirTmp.cpp
new file mode 100644
index 000000000..487f52177
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirTmp.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirTmp.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+void Tmp::dump(PrintStream& out) const
+{
+    if (!*this) {
+        out.print("");
+        return;
+    }
+
+    if (isReg()) {
+        out.print(reg());
+        return;
+    }
+
+    if (isGP()) {
+        out.print("%tmp", gpTmpIndex());
+        return;
+    }
+
+    out.print("%ftmp", fpTmpIndex());
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirTmp.h b/Source/JavaScriptCore/b3/air/AirTmp.h
new file mode 100644
index 000000000..c01427c2b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirTmp.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "Reg.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Arg;
+
+// A Tmp is a generalization of a register. It can be used to refer to any GPR or FPR. It can also
+// be used to refer to an unallocated register (i.e. a temporary). Like many Air classes, we use
+// deliberately terse naming since we will have to use this name a lot.
+
+class Tmp {
+public:
+    Tmp()
+        : m_value(0)
+    {
+    }
+
+    explicit Tmp(Reg reg)
+    {
+        if (reg) {
+            if (reg.isGPR())
+                m_value = encodeGPR(reg.gpr());
+            else
+                m_value = encodeFPR(reg.fpr());
+        } else
+            m_value = 0;
+    }
+
+    explicit Tmp(const Arg&);
+
+    static Tmp gpTmpForIndex(unsigned index)
+    {
+        Tmp result;
+        result.m_value = encodeGPTmp(index);
+        return result;
+    }
+
+    static Tmp fpTmpForIndex(unsigned index)
+    {
+        Tmp result;
+        result.m_value = encodeFPTmp(index);
+        return result;
+    }
+
+    explicit operator bool() const { return !!m_value; }
+
+    bool isGP() const
+    {
+        return isEncodedGP(m_value);
+    }
+
+    bool isFP() const
+    {
+        return isEncodedFP(m_value);
+    }
+
+    bool isGPR() const
+    {
+        return isEncodedGPR(m_value);
+    }
+
+    bool isFPR() const
+    {
+        return isEncodedFPR(m_value);
+    }
+
+    bool isReg() const
+    {
+        return isGPR() || isFPR();
+    }
+
+    GPRReg gpr() const
+    {
+        return decodeGPR(m_value);
+    }
+
+    FPRReg fpr() const
+    {
+        return decodeFPR(m_value);
+    }
+
+    Reg reg() const
+    {
+        if (isGP())
+            return gpr();
+        return fpr();
+    }
+
+    bool hasTmpIndex() const
+    {
+        return !isReg();
+    }
+
+    unsigned gpTmpIndex() const
+    {
+        return decodeGPTmp(m_value);
+    }
+
+    unsigned fpTmpIndex() const
+    {
+        return decodeFPTmp(m_value);
+    }
+
+    unsigned tmpIndex() const
+    {
+        if (isGP())
+            return gpTmpIndex();
+        return fpTmpIndex();
+    }
+
+    bool isAlive() const
+    {
+        return !!*this;
+    }
+
+    bool operator==(const Tmp& other) const
+    {
+        return m_value == other.m_value;
+    }
+
+    bool operator!=(const Tmp& other) const
+    {
+        return !(*this == other);
+    }
+
+    void dump(PrintStream& out) const;
+
+    Tmp(WTF::HashTableDeletedValueType)
+        : m_value(std::numeric_limits::max())
+    {
+    }
+
+    bool isHashTableDeletedValue() const
+    {
+        return *this == Tmp(WTF::HashTableDeletedValue);
+    }
+
+    unsigned hash() const
+    {
+        return WTF::IntHash::hash(m_value);
+    }
+
+    unsigned internalValue() const { return static_cast(m_value); }
+
+    static Tmp tmpForInternalValue(unsigned index)
+    {
+        Tmp result;
+        result.m_value = static_cast(index);
+        return result;
+    }
+
+private:
+    static int encodeGP(unsigned index)
+    {
+        return 1 + index;
+    }
+
+    static int encodeFP(unsigned index)
+    {
+        return -1 - index;
+    }
+
+    static int encodeGPR(GPRReg gpr)
+    {
+        return encodeGP(gpr - MacroAssembler::firstRegister());
+    }
+
+    static int encodeFPR(FPRReg fpr)
+    {
+        return encodeFP(fpr - MacroAssembler::firstFPRegister());
+    }
+
+    static int encodeGPTmp(unsigned index)
+    {
+        return encodeGPR(MacroAssembler::lastRegister()) + 1 + index;
+    }
+
+    static int encodeFPTmp(unsigned index)
+    {
+        return encodeFPR(MacroAssembler::lastFPRegister()) - 1 - index;
+    }
+
+    static bool isEncodedGP(int value)
+    {
+        return value > 0;
+    }
+
+    static bool isEncodedFP(int value)
+    {
+        return value < 0;
+    }
+
+    static bool isEncodedGPR(int value)
+    {
+        return isEncodedGP(value) && value <= encodeGPR(MacroAssembler::lastRegister());
+    }
+
+    static bool isEncodedFPR(int value)
+    {
+        return isEncodedFP(value) && value >= encodeFPR(MacroAssembler::lastFPRegister());
+    }
+
+    static bool isEncodedGPTmp(int value)
+    {
+        return isEncodedGP(value) && !isEncodedGPR(value);
+    }
+
+    static bool isEncodedFPTmp(int value)
+    {
+        return isEncodedFP(value) && !isEncodedFPR(value);
+    }
+
+    static GPRReg decodeGPR(int value)
+    {
+        ASSERT(isEncodedGPR(value));
+        return static_cast(
+            (value - encodeGPR(MacroAssembler::firstRegister())) + MacroAssembler::firstRegister());
+    }
+
+    static FPRReg decodeFPR(int value)
+    {
+        ASSERT(isEncodedFPR(value));
+        return static_cast(
+            (encodeFPR(MacroAssembler::firstFPRegister()) - value) +
+            MacroAssembler::firstFPRegister());
+    }
+
+    static unsigned decodeGPTmp(int value)
+    {
+        ASSERT(isEncodedGPTmp(value));
+        return value - (encodeGPR(MacroAssembler::lastRegister()) + 1);
+    }
+
+    static unsigned decodeFPTmp(int value)
+    {
+        ASSERT(isEncodedFPTmp(value));
+        return (encodeFPR(MacroAssembler::lastFPRegister()) - 1) - value;
+    }
+
+    // 0: empty Tmp
+    // positive: GPRs and then GP temps.
+    // negative: FPRs and then FP temps.
+    int m_value;
+};
+
+struct TmpHash {
+    static unsigned hash(const Tmp& key) { return key.hash(); }
+    static bool equal(const Tmp& a, const Tmp& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::B3::Air::TmpHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits { };
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirTmpInlines.h b/Source/JavaScriptCore/b3/air/AirTmpInlines.h
new file mode 100644
index 000000000..a7de098b4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirTmpInlines.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirTmp.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+inline Tmp::Tmp(const Arg& arg)
+{
+    *this = arg.tmp();
+}
+
+// When a Hash structure is too slow or when Sets contains most values, you can
+// use direct array addressing with Tmps.
+template
+struct AbsoluteTmpMapper;
+
+template<>
+struct AbsoluteTmpMapper {
+    static unsigned absoluteIndex(const Tmp& tmp)
+    {
+        ASSERT(tmp.isGP());
+        ASSERT(static_cast(tmp.internalValue()) > 0);
+        return tmp.internalValue();
+    }
+
+    static unsigned absoluteIndex(unsigned tmpIndex)
+    {
+        return absoluteIndex(Tmp::gpTmpForIndex(tmpIndex));
+    }
+
+    static unsigned lastMachineRegisterIndex()
+    {
+        return absoluteIndex(Tmp(MacroAssembler::lastRegister()));
+    }
+
+    static Tmp tmpFromAbsoluteIndex(unsigned tmpIndex)
+    {
+        return Tmp::tmpForInternalValue(tmpIndex);
+    }
+};
+
+template<>
+struct AbsoluteTmpMapper {
+    static unsigned absoluteIndex(const Tmp& tmp)
+    {
+        ASSERT(tmp.isFP());
+        ASSERT(static_cast(tmp.internalValue()) < 0);
+        return -tmp.internalValue();
+    }
+
+    static unsigned absoluteIndex(unsigned tmpIndex)
+    {
+        return absoluteIndex(Tmp::fpTmpForIndex(tmpIndex));
+    }
+
+    static unsigned lastMachineRegisterIndex()
+    {
+        return absoluteIndex(Tmp(MacroAssembler::lastFPRegister()));
+    }
+
+    static Tmp tmpFromAbsoluteIndex(unsigned tmpIndex)
+    {
+        return Tmp::tmpForInternalValue(-tmpIndex);
+    }
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirTmpWidth.cpp b/Source/JavaScriptCore/b3/air/AirTmpWidth.cpp
new file mode 100644
index 000000000..f1173c022
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirTmpWidth.cpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirTmpWidth.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+TmpWidth::TmpWidth()
+{
+}
+
+TmpWidth::TmpWidth(Code& code)
+{
+    recompute(code);
+}
+
+TmpWidth::~TmpWidth()
+{
+}
+
+void TmpWidth::recompute(Code& code)
+{
+    // Set this to true to cause this analysis to always return pessimistic results.
+    const bool beCareful = false;
+
+    const bool verbose = false;
+
+    if (verbose) {
+        dataLog("Code before TmpWidth:\n");
+        dataLog(code);
+    }
+    
+    m_width.clear();
+    
+    auto assumeTheWorst = [&] (Tmp tmp) {
+        Widths& widths = m_width.add(tmp, Widths()).iterator->value;
+        Arg::Type type = Arg(tmp).type();
+        widths.use = Arg::conservativeWidth(type);
+        widths.def = Arg::conservativeWidth(type);
+    };
+    
+    // Assume the worst for registers.
+    RegisterSet::allRegisters().forEach(
+        [&] (Reg reg) {
+            assumeTheWorst(Tmp(reg));
+        });
+
+    if (beCareful) {
+        code.forAllTmps(assumeTheWorst);
+        
+        // We fall through because the fixpoint that follows can only make things even more
+        // conservative. This mode isn't meant to be fast, just safe.
+    }
+
+    // Now really analyze everything but Move's over Tmp's, but set aside those Move's so we can find
+    // them quickly during the fixpoint below. Note that we can make this analysis stronger by
+    // recognizing more kinds of Move's or anything that has Move-like behavior, though it's probably not
+    // worth it.
+    Vector moves;
+    for (BasicBlock* block : code) {
+        for (Inst& inst : *block) {
+            if (inst.kind.opcode == Move && inst.args[1].isTmp()) {
+                if (inst.args[0].isTmp()) {
+                    // Make sure that both sides of the Move have a width already initialized. The
+                    // fixpoint below assumes that it never has to add things to the HashMap.
+                    m_width.add(inst.args[0].tmp(), Widths(Arg::GP));
+                    m_width.add(inst.args[1].tmp(), Widths(Arg::GP));
+                    
+                    moves.append(&inst);
+                    continue;
+                }
+                if (inst.args[0].isImm()
+                    && inst.args[0].value() >= 0) {
+                    Tmp tmp = inst.args[1].tmp();
+                    Widths& widths = m_width.add(tmp, Widths(Arg::GP)).iterator->value;
+                    
+                    if (inst.args[0].value() <= std::numeric_limits::max())
+                        widths.def = std::max(widths.def, Arg::Width8);
+                    else if (inst.args[0].value() <= std::numeric_limits::max())
+                        widths.def = std::max(widths.def, Arg::Width16);
+                    else if (inst.args[0].value() <= std::numeric_limits::max())
+                        widths.def = std::max(widths.def, Arg::Width32);
+                    else
+                        widths.def = std::max(widths.def, Arg::Width64);
+
+                    continue;
+                }
+            }
+            inst.forEachTmp(
+                [&] (Tmp& tmp, Arg::Role role, Arg::Type type, Arg::Width width) {
+                    Widths& widths = m_width.add(tmp, Widths(type)).iterator->value;
+                    
+                    if (Arg::isAnyUse(role))
+                        widths.use = std::max(widths.use, width);
+
+                    if (Arg::isZDef(role))
+                        widths.def = std::max(widths.def, width);
+                    else if (Arg::isAnyDef(role))
+                        widths.def = Arg::conservativeWidth(type);
+                });
+        }
+    }
+
+    // Finally, fixpoint over the Move's.
+    bool changed = true;
+    while (changed) {
+        changed = false;
+        for (Inst* move : moves) {
+            ASSERT(move->kind.opcode == Move);
+            ASSERT(move->args[0].isTmp());
+            ASSERT(move->args[1].isTmp());
+
+            // We already ensure that both tmps are added to the width map. That's important
+            // because you cannot add both tmps here while simultaneously getting a reference to
+            // their values, since the second add would invalidate the reference returned by the
+            // first one.
+            Widths& srcWidths = m_width.find(move->args[0].tmp())->value;
+            Widths& dstWidths = m_width.find(move->args[1].tmp())->value;
+
+            // Legend:
+            //
+            //     Move %src, %dst
+
+            // defWidth(%dst) is a promise about how many high bits are zero. The smaller the width, the
+            // stronger the promise. This Move may weaken that promise if we know that %src is making a
+            // weaker promise. Such forward flow is the only thing that determines defWidth().
+            if (dstWidths.def < srcWidths.def) {
+                dstWidths.def = srcWidths.def;
+                changed = true;
+            }
+
+            // srcWidth(%src) is a promise about how many high bits are ignored. The smaller the width,
+            // the stronger the promise. This Move may weaken that promise if we know that %dst is making
+            // a weaker promise. Such backward flow is the only thing that determines srcWidth().
+            if (srcWidths.use < dstWidths.use) {
+                srcWidths.use = dstWidths.use;
+                changed = true;
+            }
+        }
+    }
+
+    if (verbose)
+        dataLog("width: ", mapDump(m_width), "\n");
+}
+
+void TmpWidth::Widths::dump(PrintStream& out) const
+{
+    out.print("{use = ", use, ", def = ", def, "}");
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirTmpWidth.h b/Source/JavaScriptCore/b3/air/AirTmpWidth.h
new file mode 100644
index 000000000..ea612b662
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirTmpWidth.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+class TmpWidth {
+public:
+    TmpWidth();
+    TmpWidth(Code&);
+    ~TmpWidth();
+
+    void recompute(Code&);
+
+    // The width of a Tmp is the number of bits that you need to be able to track without some trivial
+    // recovery. A Tmp may have a "subwidth" (say, Width32 on a 64-bit system) if either of the following
+    // is true:
+    //
+    // - The high bits are never read.
+    // - The high bits are always zero.
+    //
+    // This doesn't tell you which of those properties holds, but you can query that using the other
+    // methods.
+    Arg::Width width(Tmp tmp) const
+    {
+        auto iter = m_width.find(tmp);
+        if (iter == m_width.end())
+            return Arg::minimumWidth(Arg(tmp).type());
+        return std::min(iter->value.use, iter->value.def);
+    }
+
+    // Return the minimum required width for all defs/uses of this Tmp.
+    Arg::Width requiredWidth(Tmp tmp)
+    {
+        auto iter = m_width.find(tmp);
+        if (iter == m_width.end())
+            return Arg::minimumWidth(Arg(tmp).type());
+        return std::max(iter->value.use, iter->value.def);
+    }
+
+    // This indirectly tells you how much of the tmp's high bits are guaranteed to be zero. The number of
+    // high bits that are zero are:
+    //
+    //     TotalBits - defWidth(tmp)
+    //
+    // Where TotalBits are the total number of bits in the register, so 64 on a 64-bit system.
+    Arg::Width defWidth(Tmp tmp) const
+    {
+        auto iter = m_width.find(tmp);
+        if (iter == m_width.end())
+            return Arg::minimumWidth(Arg(tmp).type());
+        return iter->value.def;
+    }
+
+    // This tells you how much of Tmp is going to be read.
+    Arg::Width useWidth(Tmp tmp) const
+    {
+        auto iter = m_width.find(tmp);
+        if (iter == m_width.end())
+            return Arg::minimumWidth(Arg(tmp).type());
+        return iter->value.use;
+    }
+    
+private:
+    struct Widths {
+        Widths() { }
+
+        Widths(Arg::Type type)
+        {
+            use = Arg::minimumWidth(type);
+            def = Arg::minimumWidth(type);
+        }
+
+        void dump(PrintStream& out) const;
+        
+        Arg::Width use;
+        Arg::Width def;
+    };
+    
+    HashMap m_width;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirUseCounts.h b/Source/JavaScriptCore/b3/air/AirUseCounts.h
new file mode 100644
index 000000000..98a749321
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirUseCounts.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirBlockWorklist.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Computes the number of uses of a variable based on frequency of execution. The frequency of blocks
+// that are only reachable by rare edges is scaled by Options::rareBlockPenalty().
+
+// Thing can be either Tmp or Arg.
+template
+class UseCounts {
+public:
+    struct Counts {
+        void dump(PrintStream& out) const
+        {
+            out.print(
+                "{numWarmUses = ", numWarmUses, ", numColdUses = ", numColdUses, ", numDefs = ",
+                numDefs, "}");
+        }
+        
+        double numWarmUses { 0 };
+        double numColdUses { 0 };
+        double numDefs { 0 };
+        double numConstDefs { 0 };
+    };
+    
+    UseCounts(Code& code)
+    {
+        // Find non-rare blocks.
+        BlockWorklist fastWorklist;
+        fastWorklist.push(code[0]);
+        while (BasicBlock* block = fastWorklist.pop()) {
+            for (FrequentedBlock& successor : block->successors()) {
+                if (!successor.isRare())
+                    fastWorklist.push(successor.block());
+            }
+        }
+        
+        for (BasicBlock* block : code) {
+            double frequency = block->frequency();
+            if (!fastWorklist.saw(block))
+                frequency *= Options::rareBlockPenalty();
+            for (Inst& inst : *block) {
+                inst.forEach(
+                    [&] (Thing& arg, Arg::Role role, Arg::Type, Arg::Width) {
+                        Counts& counts = m_counts.add(arg, Counts()).iterator->value;
+
+                        if (Arg::isWarmUse(role))
+                            counts.numWarmUses += frequency;
+                        if (Arg::isColdUse(role))
+                            counts.numColdUses += frequency;
+                        if (Arg::isAnyDef(role))
+                            counts.numDefs += frequency;
+                    });
+
+                if ((inst.kind.opcode == Move || inst.kind.opcode == Move32)
+                    && inst.args[0].isSomeImm()
+                    && inst.args[1].is())
+                    m_counts.add(inst.args[1].as(), Counts()).iterator->value.numConstDefs++;
+            }
+        }
+    }
+
+    const Counts* operator[](const Thing& arg) const
+    {
+        auto iter = m_counts.find(arg);
+        if (iter == m_counts.end())
+            return nullptr;
+        return &iter->value;
+    }
+
+    void dump(PrintStream& out) const
+    {
+        out.print(mapDump(m_counts));
+    }
+
+private:
+    HashMap m_counts;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirValidate.cpp b/Source/JavaScriptCore/b3/air/AirValidate.cpp
new file mode 100644
index 000000000..d90de62eb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirValidate.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirValidate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+class Validater {
+public:
+    Validater(Code& code, const char* dumpBefore)
+        : m_code(code)
+        , m_dumpBefore(dumpBefore)
+    {
+    }
+
+#define VALIDATE(condition, message) do {                               \
+        if (condition)                                                  \
+            break;                                                      \
+        fail(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #condition, toCString message); \
+    } while (false)
+    
+    void run()
+    {
+        HashSet validSlots;
+        HashSet validBlocks;
+        HashSet validSpecials;
+        
+        for (BasicBlock* block : m_code)
+            validBlocks.add(block);
+        for (StackSlot* slot : m_code.stackSlots())
+            validSlots.add(slot);
+        for (Special* special : m_code.specials())
+            validSpecials.add(special);
+
+        for (BasicBlock* block : m_code) {
+            // Blocks that are entrypoints must not have predecessors.
+            if (m_code.isEntrypoint(block))
+                VALIDATE(!block->numPredecessors(), ("At entrypoint ", *block));
+            
+            for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+                Inst& inst = block->at(instIndex);
+                for (Arg& arg : inst.args) {
+                    switch (arg.kind()) {
+                    case Arg::Stack:
+                        VALIDATE(validSlots.contains(arg.stackSlot()), ("At ", inst, " in ", *block));
+                        break;
+                    case Arg::Special:
+                        VALIDATE(validSpecials.contains(arg.special()), ("At ", inst, " in ", *block));
+                        break;
+                    default:
+                        break;
+                    }
+                }
+                VALIDATE(inst.isValidForm(), ("At ", inst, " in ", *block));
+                if (instIndex == block->size() - 1)
+                    VALIDATE(inst.isTerminal(), ("At ", inst, " in ", *block));
+                else
+                    VALIDATE(!inst.isTerminal(), ("At ", inst, " in ", *block));
+
+                // forEachArg must return Arg&'s that point into the args array.
+                inst.forEachArg(
+                    [&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+                        VALIDATE(&arg >= &inst.args[0], ("At ", arg, " in ", inst, " in ", *block));
+                        VALIDATE(&arg <= &inst.args.last(), ("At ", arg, " in ", inst, " in ", *block));
+                    });
+                
+                switch (inst.kind.opcode) {
+                case EntrySwitch:
+                    VALIDATE(block->numSuccessors() == m_code.proc().numEntrypoints(), ("At ", inst, " in ", *block));
+                    break;
+                case Shuffle:
+                    // We can't handle trapping shuffles because of how we lower them. That could
+                    // be fixed though.
+                    VALIDATE(!inst.kind.traps, ("At ", inst, " in ", *block));
+                    break;
+                default:
+                    break;
+                }
+            }
+            for (BasicBlock* successor : block->successorBlocks())
+                VALIDATE(validBlocks.contains(successor), ("In ", *block));
+        }
+    }
+
+private:
+    NO_RETURN_DUE_TO_CRASH void fail(
+        const char* filename, int lineNumber, const char* function, const char* condition,
+        CString message)
+    {
+        CString failureMessage;
+        {
+            StringPrintStream out;
+            out.print("AIR VALIDATION FAILURE\n");
+            out.print("    ", condition, " (", filename, ":", lineNumber, ")\n");
+            out.print("    ", message, "\n");
+            out.print("    After ", m_code.lastPhaseName(), "\n");
+            failureMessage = out.toCString();
+        }
+
+        dataLog(failureMessage);
+        if (m_dumpBefore) {
+            dataLog("Before ", m_code.lastPhaseName(), ":\n");
+            dataLog(m_dumpBefore);
+        }
+        dataLog("At time of failure:\n");
+        dataLog(m_code);
+
+        dataLog(failureMessage);
+        WTFReportAssertionFailure(filename, lineNumber, function, condition);
+        CRASH();
+    }
+    
+    Code& m_code;
+    const char* m_dumpBefore;
+};
+
+} // anonymous namespace
+
+void validate(Code& code, const char* dumpBefore)
+{
+    Validater validater(code, dumpBefore);
+    validater.run();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirValidate.h b/Source/JavaScriptCore/b3/air/AirValidate.h
new file mode 100644
index 000000000..472c76379
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirValidate.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+JS_EXPORT_PRIVATE void validate(Code&, const char* dumpBefore = nullptr);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/opcode_generator.rb b/Source/JavaScriptCore/b3/air/opcode_generator.rb
new file mode 100644
index 000000000..d14240515
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/opcode_generator.rb
@@ -0,0 +1,1228 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "pathname"
+
+class Opcode
+    attr_reader :name, :custom, :overloads
+    attr_reader :attributes
+
+    def initialize(name, custom)
+        @name = name
+        @custom = custom
+        @attributes = {}
+        unless custom
+            @overloads = []
+        end
+    end
+
+    def masmName
+        name[0].downcase + name[1..-1]
+    end
+end
+
+class Arg
+    attr_reader :role, :type, :width
+
+    def initialize(role, type, width)
+        @role = role
+        @type = type
+        @width = width
+    end
+
+    def widthCode
+        if width == "Ptr"
+            "Arg::pointerWidth()"
+        else
+            "Arg::Width#{width}"
+        end
+    end
+end
+
+class Overload
+    attr_reader :signature, :forms
+
+    def initialize(signature, forms)
+        @signature = signature
+        @forms = forms
+    end
+end
+
+class Kind
+    attr_reader :name
+    attr_accessor :custom
+
+    def initialize(name)
+        @name = name
+        @custom = false
+    end
+
+    def ==(other)
+        if other.is_a? String
+            @name == other
+        else
+            @name == other.name and @custom == other.custom
+        end
+    end
+
+    def Kind.argKinds(kind)
+        if kind == "Addr"
+            ["Addr", "Stack", "CallArg"]
+        else
+            [kind]
+        end
+    end
+
+    def argKinds
+        Kind.argKinds(kind)
+    end
+end
+
+class Form
+    attr_reader :kinds, :altName, :archs
+
+    def initialize(kinds, altName, archs)
+        @kinds = kinds
+        @altName = altName
+        @archs = archs
+    end
+end
+
+class Origin
+    attr_reader :fileName, :lineNumber
+    
+    def initialize(fileName, lineNumber)
+        @fileName = fileName
+        @lineNumber = lineNumber
+    end
+    
+    def to_s
+        "#{fileName}:#{lineNumber}"
+    end
+end
+
+class Token
+    attr_reader :origin, :string
+    
+    def initialize(origin, string)
+        @origin = origin
+        @string = string
+    end
+    
+    def ==(other)
+        if other.is_a? Token
+            @string == other.string
+        else
+            @string == other
+        end
+    end
+    
+    def =~(other)
+        @string =~ other
+    end
+    
+    def to_s
+        "#{@string.inspect} at #{origin}"
+    end
+    
+    def parseError(*comment)
+        if comment.empty?
+            raise "Parse error: #{to_s}"
+        else
+            raise "Parse error: #{to_s}: #{comment[0]}"
+        end
+    end
+end
+
+def lex(str, fileName)
+    fileName = Pathname.new(fileName)
+    result = []
+    lineNumber = 1
+    while not str.empty?
+        case str
+        when /\A\#([^\n]*)/
+            # comment, ignore
+        when /\A\n/
+            # newline, ignore
+            lineNumber += 1
+        when /\A([a-zA-Z0-9_]+)/
+            result << Token.new(Origin.new(fileName, lineNumber), $&)
+        when /\A([ \t\r]+)/
+            # whitespace, ignore
+        when /\A[,:*\/]/
+            result << Token.new(Origin.new(fileName, lineNumber), $&)
+        else
+            raise "Lexer error at #{Origin.new(fileName, lineNumber).to_s}, unexpected sequence #{str[0..20].inspect}"
+        end
+        str = $~.post_match
+    end
+    result
+end
+
+def isRole(token)
+    token =~ /\A((U)|(D)|(UD)|(ZD)|(UZD)|(UA)|(S))\Z/
+end
+
+def isGF(token)
+    token =~ /\A((G)|(F))\Z/
+end
+
+def isKind(token)
+    token =~ /\A((Tmp)|(Imm)|(BigImm)|(BitImm)|(BitImm64)|(Addr)|(Index)|(RelCond)|(ResCond)|(DoubleCond))\Z/
+end
+
+def isArch(token)
+    token =~ /\A((x86)|(x86_32)|(x86_64)|(arm)|(armv7)|(arm64)|(32)|(64))\Z/
+end
+
+def isWidth(token)
+    token =~ /\A((8)|(16)|(32)|(64)|(Ptr))\Z/
+end
+
+def isKeyword(token)
+    isRole(token) or isGF(token) or isKind(token) or isArch(token) or isWidth(token) or
+        token == "custom" or token == "as"
+end
+
+def isIdentifier(token)
+    token =~ /\A([a-zA-Z0-9_]+)\Z/ and not isKeyword(token)
+end
+
+class Parser
+    def initialize(data, fileName)
+        @tokens = lex(data, fileName)
+        @idx = 0
+    end
+
+    def token
+        @tokens[@idx]
+    end
+
+    def advance
+        @idx += 1
+    end
+
+    def parseError(*comment)
+        if token
+            token.parseError(*comment)
+        else
+            if comment.empty?
+                raise "Parse error at end of file"
+            else
+                raise "Parse error at end of file: #{comment[0]}"
+            end
+        end
+    end
+
+    def consume(string)
+        parseError("Expected #{string}") unless token == string
+        advance
+    end
+
+    def consumeIdentifier
+        result = token.string
+        parseError("Expected identifier") unless isIdentifier(result)
+        advance
+        result
+    end
+
+    def consumeRole
+        result = token.string
+        parseError("Expected role (U, D, UD, ZD, UZD, UA, or S)") unless isRole(result)
+        advance
+        result
+    end
+
+    def consumeType
+        result = token.string
+        parseError("Expected type (G or F)") unless isGF(result)
+        advance
+        result
+    end
+
+    def consumeKind
+        result = token.string
+        parseError("Expected kind (Imm, BigImm, BitImm, BitImm64, Tmp, Addr, Index, RelCond, ResCond, or DoubleCond)") unless isKind(result)
+        advance
+        result
+    end
+
+    def consumeWidth
+        result = token.string
+        parseError("Expected width (8, 16, 32, or 64)") unless isWidth(result)
+        advance
+        result
+    end
+
+    def parseArchs
+        return nil unless isArch(token)
+
+        result = []
+        while isArch(token)
+            case token.string
+            when "x86"
+                result << "X86"
+                result << "X86_64"
+            when "x86_32"
+                result << "X86"
+            when "x86_64"
+                result << "X86_64"
+            when "arm"
+                result << "ARMv7"
+                result << "ARM64"
+            when "armv7"
+                result << "ARMv7"
+            when "arm64"
+                result << "ARM64"
+            when "32"
+                result << "X86"
+                result << "ARMv7"
+            when "64"
+                result << "X86_64"
+                result << "ARM64"
+            else
+                raise token.string
+            end
+            advance
+        end
+
+        consume(":")
+        @lastArchs = result
+    end
+
+    def consumeArchs
+        result = @lastArchs
+        @lastArchs = nil
+        result
+    end
+
+    def parseAndConsumeArchs
+        parseArchs
+        consumeArchs
+    end
+
+    def intersectArchs(left, right)
+        return left unless right
+        return right unless left
+
+        left.select {
+            | value |
+            right.find {
+                | otherValue |
+                value == otherValue
+            }
+        }
+    end
+
+    def parse
+        result = {}
+        
+        loop {
+            break if @idx >= @tokens.length
+
+            if token == "custom"
+                consume("custom")
+                opcodeName = consumeIdentifier
+
+                parseError("Cannot overload a custom opcode") if result[opcodeName]
+
+                result[opcodeName] = Opcode.new(opcodeName, true)
+            else
+                opcodeArchs = parseAndConsumeArchs
+
+                opcodeName = consumeIdentifier
+
+                if result[opcodeName]
+                    opcode = result[opcodeName]
+                    parseError("Cannot overload a custom opcode") if opcode.custom
+                else
+                    opcode = Opcode.new(opcodeName, false)
+                    result[opcodeName] = opcode
+                end
+
+                signature = []
+                forms = []
+                
+                if isRole(token)
+                    loop {
+                        role = consumeRole
+                        consume(":")
+                        type = consumeType
+                        consume(":")
+                        width = consumeWidth
+                        
+                        signature << Arg.new(role, type, width)
+                        
+                        break unless token == ","
+                        consume(",")
+                    }
+                end
+
+                while token == "/"
+                    consume("/")
+                    case token.string
+                    when "branch"
+                        opcode.attributes[:branch] = true
+                        opcode.attributes[:terminal] = true
+                    when "terminal"
+                        opcode.attributes[:terminal] = true
+                    when "effects"
+                        opcode.attributes[:effects] = true
+                    when "return"
+                        opcode.attributes[:return] = true
+                        opcode.attributes[:terminal] = true
+                    else
+                        parseError("Bad / directive")
+                    end
+                    advance
+                end
+
+                parseArchs
+                if isKind(token)
+                    loop {
+                        kinds = []
+                        altName = nil
+                        formArchs = consumeArchs
+                        loop {
+                            kinds << Kind.new(consumeKind)
+
+                            if token == "*"
+                                parseError("Can only apply * to Tmp") unless kinds[-1].name == "Tmp"
+                                kinds[-1].custom = true
+                                consume("*")
+                            end
+
+                            break unless token == ","
+                            consume(",")
+                        }
+
+                        if token == "as"
+                            consume("as")
+                            altName = consumeIdentifier
+                        end
+
+                        parseError("Form has wrong number of arguments for overload") unless kinds.length == signature.length
+                        kinds.each_with_index {
+                            | kind, index |
+                            if kind.name == "Imm" or kind.name == "BigImm" or kind.name == "BitImm" or kind.name == "BitImm64"
+                                if signature[index].role != "U"
+                                    parseError("Form has an immediate for a non-use argument")
+                                end
+                                if signature[index].type != "G"
+                                    parseError("Form has an immediate for a non-general-purpose argument")
+                                end
+                            end
+                        }
+                        forms << Form.new(kinds, altName, intersectArchs(opcodeArchs, formArchs))
+
+                        parseArchs
+                        break unless isKind(token)
+                    }
+                end
+
+                if signature.length == 0
+                    raise unless forms.length == 0
+                    forms << Form.new([], nil, opcodeArchs)
+                end
+
+                opcode.overloads << Overload.new(signature, forms)
+            end
+        }
+
+        result
+    end
+end
+
+$fileName = ARGV[0]
+
+parser = Parser.new(IO::read($fileName), $fileName)
+$opcodes = parser.parse
+
+def writeH(filename)
+    File.open("Air#{filename}.h", "w") {
+        | outp |
+        
+        outp.puts "// Generated by opcode_generator.rb from #{$fileName} -- do not edit!"
+        
+        outp.puts "#ifndef Air#{filename}_h"
+        outp.puts "#define Air#{filename}_h"
+
+        yield outp
+        
+        outp.puts "#endif // Air#{filename}_h"
+    }
+end
+
+writeH("Opcode") {
+    | outp |
+    outp.puts "namespace JSC { namespace B3 { namespace Air {"
+    outp.puts "enum Opcode : int16_t {"
+    $opcodes.keys.sort.each {
+        | opcode |
+        outp.puts "    #{opcode},"
+    }
+    outp.puts "};"
+
+    outp.puts "static const unsigned numOpcodes = #{$opcodes.keys.size};"
+    outp.puts "} } } // namespace JSC::B3::Air"
+    
+    outp.puts "namespace WTF {"
+    outp.puts "class PrintStream;"
+    outp.puts "JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Opcode);"
+    outp.puts "} // namespace WTF"
+}
+
+# From here on, we don't try to emit properly indented code, since we're using a recursive pattern
+# matcher.
+
+def matchForms(outp, speed, forms, columnIndex, columnGetter, filter, callback)
+    return if forms.length == 0
+
+    if filter[forms]
+        return
+    end
+
+    if columnIndex >= forms[0].kinds.length
+        raise "Did not reduce to one form: #{forms.inspect}" unless forms.length == 1
+        callback[forms[0]]
+        outp.puts "break;"
+        return
+    end
+    
+    groups = {}
+    forms.each {
+        | form |
+        kind = form.kinds[columnIndex].name
+        if groups[kind]
+            groups[kind] << form
+        else
+            groups[kind] = [form]
+        end
+    }
+
+    if speed == :fast and groups.length == 1
+        matchForms(outp, speed, forms, columnIndex + 1, columnGetter, filter, callback)
+        return
+    end
+
+    outp.puts "switch (#{columnGetter[columnIndex]}) {"
+    groups.each_pair {
+        | key, value |
+        outp.puts "#if USE(JSVALUE64)" if key == "BigImm" or key == "BitImm64"
+        Kind.argKinds(key).each {
+            | argKind |
+            outp.puts "case Arg::#{argKind}:"
+        }
+        matchForms(outp, speed, value, columnIndex + 1, columnGetter, filter, callback)
+        outp.puts "break;"
+        outp.puts "#endif // USE(JSVALUE64)" if key == "BigImm" or key == "BitImm64"
+    }
+    outp.puts "default:"
+    outp.puts "break;"
+    outp.puts "}"
+end
+
+def matchInstOverload(outp, speed, inst)
+    outp.puts "switch (#{inst}->kind.opcode) {"
+    $opcodes.values.each {
+        | opcode |
+        outp.puts "case #{opcode.name}:"
+        if opcode.custom
+            yield opcode, nil
+        else
+            needOverloadSwitch = ((opcode.overloads.size != 1) or speed == :safe)
+            outp.puts "switch (#{inst}->args.size()) {" if needOverloadSwitch
+            opcode.overloads.each {
+                | overload |
+                outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+                yield opcode, overload
+                outp.puts "break;" if needOverloadSwitch
+            }
+            if needOverloadSwitch
+                outp.puts "default:"
+                outp.puts "break;"
+                outp.puts "}"
+            end
+        end
+        outp.puts "break;"
+    }
+    outp.puts "default:"
+    outp.puts "break;"
+    outp.puts "}"
+end
+    
+def matchInstOverloadForm(outp, speed, inst)
+    matchInstOverload(outp, speed, inst) {
+        | opcode, overload |
+        if opcode.custom
+            yield opcode, nil, nil
+        else
+            columnGetter = proc {
+                | columnIndex |
+                "#{inst}->args[#{columnIndex}].kind()"
+            }
+            filter = proc { false }
+            callback = proc {
+                | form |
+                yield opcode, overload, form
+            }
+            matchForms(outp, speed, overload.forms, 0, columnGetter, filter, callback)
+        end
+    }
+end
+
+def beginArchs(outp, archs)
+    return unless archs
+    if archs.empty?
+        outp.puts "#if 0"
+        return
+    end
+    outp.puts("#if " + archs.map {
+                  | arch |
+                  "CPU(#{arch})"
+              }.join(" || "))
+end
+
+def endArchs(outp, archs)
+    return unless archs
+    outp.puts "#endif"
+end
+
+writeH("OpcodeUtils") {
+    | outp |
+    outp.puts "#include \"AirCustom.h\""
+    outp.puts "#include \"AirInst.h\""
+    outp.puts "namespace JSC { namespace B3 { namespace Air {"
+    
+    outp.puts "inline bool opgenHiddenTruth() { return true; }"
+    outp.puts "template"
+    outp.puts "inline T* opgenHiddenPtrIdentity(T* pointer) { return pointer; }"
+    outp.puts "#define OPGEN_RETURN(value) do {\\"
+    outp.puts "    if (opgenHiddenTruth())\\"
+    outp.puts "        return value;\\"
+    outp.puts "} while (false)"
+
+    outp.puts "template"
+    outp.puts "void Inst::forEachArg(const Functor& functor)"
+    outp.puts "{"
+    matchInstOverload(outp, :fast, "this") {
+        | opcode, overload |
+        if opcode.custom
+            outp.puts "#{opcode.name}Custom::forEachArg(*this, functor);"
+        else
+            overload.signature.each_with_index {
+                | arg, index |
+                
+                role = nil
+                case arg.role
+                when "U"
+                    role = "Use"
+                when "D"
+                    role = "Def"
+                when "ZD"
+                    role = "ZDef"
+                when "UD"
+                    role = "UseDef"
+                when "UZD"
+                    role = "UseZDef"
+                when "UA"
+                    role = "UseAddr"
+                when "S"
+                    role = "Scratch"
+                else
+                    raise
+                end
+
+                outp.puts "functor(args[#{index}], Arg::#{role}, Arg::#{arg.type}P, #{arg.widthCode});"
+            }
+        end
+    }
+    outp.puts "}"
+
+    outp.puts "template"
+    outp.puts "ALWAYS_INLINE bool isValidForm(Opcode opcode, Arguments... arguments)"
+    outp.puts "{"
+    outp.puts "Arg::Kind kinds[sizeof...(Arguments)] = { arguments... };"
+    outp.puts "switch (opcode) {"
+    $opcodes.values.each {
+        | opcode |
+        outp.puts "case #{opcode.name}:"
+        if opcode.custom
+            outp.puts "OPGEN_RETURN(#{opcode.name}Custom::isValidFormStatic(arguments...));"
+        else
+            outp.puts "switch (sizeof...(Arguments)) {"
+            opcode.overloads.each {
+                | overload |
+                outp.puts "case #{overload.signature.length}:"
+                columnGetter = proc { | columnIndex | "opgenHiddenPtrIdentity(kinds)[#{columnIndex}]" }
+                filter = proc { false }
+                callback = proc {
+                    | form |
+                    # This conservatively says that Stack is not a valid form for UseAddr,
+                    # because it's only valid if it's not a spill slot. This is consistent with
+                    # isValidForm() being conservative and it also happens to be practical since
+                    # we don't really use isValidForm for deciding when Stack is safe.
+                    overload.signature.length.times {
+                        | index |
+                        if overload.signature[index].role == "UA"
+                            outp.puts "if (opgenHiddenPtrIdentity(kinds)[#{index}] == Arg::Stack)"
+                            outp.puts "    return false;"
+                        end
+                    }
+                    
+                    notCustom = (not form.kinds.detect { | kind | kind.custom })
+                    if notCustom
+                        beginArchs(outp, form.archs)
+                        outp.puts "OPGEN_RETURN(true);"
+                        endArchs(outp, form.archs)
+                    end
+                }
+                matchForms(outp, :safe, overload.forms, 0, columnGetter, filter, callback)
+                outp.puts "break;"
+            }
+            outp.puts "default:"
+            outp.puts "break;"
+            outp.puts "}"
+        end
+        outp.puts "break;"
+    }
+    outp.puts "default:"
+    outp.puts "break;"
+    outp.puts "}"
+    outp.puts "return false; "
+    outp.puts "}"
+
+    outp.puts "inline bool isDefinitelyTerminal(Opcode opcode)"
+    outp.puts "{"
+    outp.puts "switch (opcode) {"
+    didFindTerminals = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:terminal]
+            outp.puts "case #{opcode.name}:"
+            didFindTerminals = true
+        end
+    }
+    if didFindTerminals
+        outp.puts "return true;"
+    end
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+
+    outp.puts "inline bool isReturn(Opcode opcode)"
+    outp.puts "{"
+    outp.puts "switch (opcode) {"
+    didFindReturns = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:return]
+            outp.puts "case #{opcode.name}:"
+            didFindReturns = true
+        end
+    }
+    if didFindReturns
+        outp.puts "return true;"
+    end
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "} } } // namespace JSC::B3::Air"
+}
+
+writeH("OpcodeGenerated") {
+    | outp |
+    outp.puts "#include \"AirInstInlines.h\""
+    outp.puts "#include \"wtf/PrintStream.h\""
+    outp.puts "namespace WTF {"
+    outp.puts "using namespace JSC::B3::Air;"
+    outp.puts "void printInternal(PrintStream& out, Opcode opcode)"
+    outp.puts "{"
+    outp.puts "    switch (opcode) {"
+    $opcodes.keys.each {
+        | opcode |
+        outp.puts "    case #{opcode}:"
+        outp.puts "        out.print(\"#{opcode}\");"
+        outp.puts "        return;"
+    }
+    outp.puts "    }"
+    outp.puts "    RELEASE_ASSERT_NOT_REACHED();"
+    outp.puts "}"
+    outp.puts "} // namespace WTF"
+    outp.puts "namespace JSC { namespace B3 { namespace Air {"
+    outp.puts "bool Inst::isValidForm()"
+    outp.puts "{"
+    matchInstOverloadForm(outp, :safe, "this") {
+        | opcode, overload, form |
+        if opcode.custom
+            outp.puts "OPGEN_RETURN(#{opcode.name}Custom::isValidForm(*this));"
+        else
+            beginArchs(outp, form.archs)
+            needsMoreValidation = false
+            overload.signature.length.times {
+                | index |
+                arg = overload.signature[index]
+                kind = form.kinds[index]
+                needsMoreValidation |= kind.custom
+
+                # Some kinds of Args reqire additional validation.
+                case kind.name
+                when "Tmp"
+                    outp.puts "if (!args[#{index}].tmp().is#{arg.type}P())"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "Imm"
+                    outp.puts "if (!Arg::isValidImmForm(args[#{index}].value()))"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "BitImm"
+                    outp.puts "if (!Arg::isValidBitImmForm(args[#{index}].value()))"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "BitImm64"
+                    outp.puts "if (!Arg::isValidBitImm64Form(args[#{index}].value()))"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "Addr"
+                    if arg.role == "UA"
+                        outp.puts "if (args[#{index}].isStack() && args[#{index}].stackSlot()->isSpill())"
+                        outp.puts "OPGEN_RETURN(false);"
+                    end
+                    
+                    outp.puts "if (!Arg::isValidAddrForm(args[#{index}].offset()))"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "Index"
+                    outp.puts "if (!Arg::isValidIndexForm(args[#{index}].scale(), args[#{index}].offset(), #{arg.widthCode}))"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "BigImm"
+                when "RelCond"
+                when "ResCond"
+                when "DoubleCond"
+                else
+                    raise "Unexpected kind: #{kind.name}"
+                end
+            }
+            if needsMoreValidation
+                outp.puts "if (!is#{opcode.name}Valid(*this))"
+                outp.puts "OPGEN_RETURN(false);"
+            end
+            outp.puts "OPGEN_RETURN(true);"
+            endArchs(outp, form.archs)
+        end
+    }
+    outp.puts "return false;"
+    outp.puts "}"
+
+    outp.puts "bool Inst::admitsStack(unsigned argIndex)"
+    outp.puts "{"
+    outp.puts "switch (kind.opcode) {"
+    $opcodes.values.each {
+        | opcode |
+        outp.puts "case #{opcode.name}:"
+
+        if opcode.custom
+            outp.puts "OPGEN_RETURN(#{opcode.name}Custom::admitsStack(*this, argIndex));"
+        else
+            # Switch on the argIndex.
+            outp.puts "switch (argIndex) {"
+
+            numArgs = opcode.overloads.map {
+                | overload |
+                overload.signature.length
+            }.max
+            
+            numArgs.times {
+                | argIndex |
+                outp.puts "case #{argIndex}:"
+
+                # Check if all of the forms of all of the overloads either do, or don't, admit an address
+                # at this index. We expect this to be a very common case.
+                numYes = 0
+                numNo = 0
+                opcode.overloads.each {
+                    | overload |
+                    useAddr = (overload.signature[argIndex] and
+                               overload.signature[argIndex].role == "UA")
+                    overload.forms.each {
+                        | form |
+                        if form.kinds[argIndex] == "Addr" and not useAddr
+                            numYes += 1
+                        else
+                            numNo += 1
+                        end
+                    }
+                }
+
+                # Note that we deliberately test numYes first because if we end up with no forms, we want
+                # to say that Address is inadmissible.
+                if numYes == 0
+                    outp.puts "OPGEN_RETURN(false);"
+                elsif numNo == 0
+                    outp.puts "OPGEN_RETURN(true);"
+                else
+                    # Now do the full test.
+
+                    needOverloadSwitch = (opcode.overloads.size != 1)
+
+                    outp.puts "switch (args.size()) {" if needOverloadSwitch
+                    opcode.overloads.each {
+                        | overload |
+
+                        useAddr = (overload.signature[argIndex] and
+                                   overload.signature[argIndex].role == "UA")
+                        
+                        # Again, check if all of them do what we want.
+                        numYes = 0
+                        numNo = 0
+                        overload.forms.each {
+                            | form |
+                            if form.kinds[argIndex] == "Addr" and not useAddr
+                                numYes += 1
+                            else
+                                numNo += 1
+                            end
+                        }
+
+                        if numYes == 0
+                            # Don't emit anything, just drop to default.
+                        elsif numNo == 0
+                            outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+                            outp.puts "OPGEN_RETURN(true);"
+                            outp.puts "break;" if needOverloadSwitch
+                        else
+                            outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+
+                            # This is how we test the hypothesis that changing this argument to an
+                            # address yields a valid form.
+                            columnGetter = proc {
+                                | columnIndex |
+                                if columnIndex == argIndex
+                                    "Arg::Addr"
+                                else
+                                    "args[#{columnIndex}].kind()"
+                                end
+                            }
+                            filter = proc {
+                                | forms |
+                                numYes = 0
+
+                                forms.each {
+                                    | form |
+                                    if form.kinds[argIndex] == "Addr"
+                                        numYes += 1
+                                    end
+                                }
+
+                                if numYes == 0
+                                    # Drop down, emit no code, since we cannot match.
+                                    true
+                                else
+                                    # Keep going.
+                                    false
+                                end
+                            }
+                            callback = proc {
+                                | form |
+                                beginArchs(outp, form.archs)
+                                outp.puts "OPGEN_RETURN(true);"
+                                endArchs(outp, form.archs)
+                            }
+                            matchForms(outp, :safe, overload.forms, 0, columnGetter, filter, callback)
+
+                            outp.puts "break;" if needOverloadSwitch
+                        end
+                    }
+                    if needOverloadSwitch
+                        outp.puts "default:"
+                        outp.puts "break;"
+                        outp.puts "}"
+                    end
+                end
+                
+                outp.puts "break;"
+            }
+            
+            outp.puts "default:"
+            outp.puts "break;"
+            outp.puts "}"
+        end
+        
+        outp.puts "break;"
+    }
+    outp.puts "default:";
+    outp.puts "break;"
+    outp.puts "}"
+    outp.puts "return false;"
+    outp.puts "}"
+
+    outp.puts "bool Inst::isTerminal()"
+    outp.puts "{"
+    outp.puts "switch (kind.opcode) {"
+    foundTrue = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:terminal]
+            outp.puts "case #{opcode.name}:"
+            foundTrue = true
+        end
+    }
+    if foundTrue
+        outp.puts "return true;"
+    end
+    $opcodes.values.each {
+        | opcode |
+        if opcode.custom
+            outp.puts "case #{opcode.name}:"
+            outp.puts "return #{opcode.name}Custom::isTerminal(*this);"
+        end
+    }
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "bool Inst::hasNonArgNonControlEffects()"
+    outp.puts "{"
+    outp.puts "if (kind.traps)"
+    outp.puts "return true;"
+    outp.puts "switch (kind.opcode) {"
+    foundTrue = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:effects]
+            outp.puts "case #{opcode.name}:"
+            foundTrue = true
+        end
+    }
+    if foundTrue
+        outp.puts "return true;"
+    end
+    $opcodes.values.each {
+        | opcode |
+        if opcode.custom
+            outp.puts "case #{opcode.name}:"
+            outp.puts "return #{opcode.name}Custom::hasNonArgNonControlEffects(*this);"
+        end
+    }
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "bool Inst::hasNonArgEffects()"
+    outp.puts "{"
+    outp.puts "if (kind.traps)"
+    outp.puts "return true;"
+    outp.puts "switch (kind.opcode) {"
+    foundTrue = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:terminal] or opcode.attributes[:effects]
+            outp.puts "case #{opcode.name}:"
+            foundTrue = true
+        end
+    }
+    if foundTrue
+        outp.puts "return true;"
+    end
+    $opcodes.values.each {
+        | opcode |
+        if opcode.custom
+            outp.puts "case #{opcode.name}:"
+            outp.puts "return #{opcode.name}Custom::hasNonArgEffects(*this);"
+        end
+    }
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "CCallHelpers::Jump Inst::generate(CCallHelpers& jit, GenerationContext& context)"
+    outp.puts "{"
+    outp.puts "UNUSED_PARAM(jit);"
+    outp.puts "UNUSED_PARAM(context);"
+    outp.puts "CCallHelpers::Jump result;"
+    matchInstOverloadForm(outp, :fast, "this") {
+        | opcode, overload, form |
+        if opcode.custom
+            outp.puts "OPGEN_RETURN(#{opcode.name}Custom::generate(*this, jit, context));"
+        else
+            beginArchs(outp, form.archs)
+            if form.altName
+                methodName = form.altName
+            else
+                methodName = opcode.masmName
+            end
+            if opcode.attributes[:branch]
+                outp.print "result = "
+            end
+            outp.print "jit.#{methodName}("
+
+            form.kinds.each_with_index {
+                | kind, index |
+                if index != 0
+                    outp.print ", "
+                end
+                case kind.name
+                when "Tmp"
+                    if overload.signature[index].type == "G"
+                        outp.print "args[#{index}].gpr()"
+                    else
+                        outp.print "args[#{index}].fpr()"
+                    end
+                when "Imm", "BitImm"
+                    outp.print "args[#{index}].asTrustedImm32()"
+                when "BigImm", "BitImm64"
+                    outp.print "args[#{index}].asTrustedImm64()"
+                when "Addr"
+                    outp.print "args[#{index}].asAddress()"
+                when "Index"
+                    outp.print "args[#{index}].asBaseIndex()"
+                when "RelCond"
+                    outp.print "args[#{index}].asRelationalCondition()"
+                when "ResCond"
+                    outp.print "args[#{index}].asResultCondition()"
+                when "DoubleCond"
+                    outp.print "args[#{index}].asDoubleCondition()"
+                end
+            }
+
+            outp.puts ");"
+            outp.puts "OPGEN_RETURN(result);"
+            endArchs(outp, form.archs)
+        end
+    }
+    outp.puts "RELEASE_ASSERT_NOT_REACHED();"
+    outp.puts "return result;"
+    outp.puts "}"
+
+    outp.puts "} } } // namespace JSC::B3::Air"
+}
+
+# This is a hack for JSAir. It's a joke.
+File.open("JSAir_opcode.js", "w") {
+    | outp |
+    outp.puts "\"use strict\";"
+    outp.puts "// Generated by opcode_generator.rb from #{$fileName} -- do not edit!"
+    
+    $opcodes.values.each {
+        | opcode |
+        outp.puts "const #{opcode.name} = Symbol(#{opcode.name.inspect});"
+    }
+    
+    outp.puts "function Inst_forEachArg(inst, func)"
+    outp.puts "{"
+    outp.puts "let replacement;"
+    outp.puts "switch (inst.opcode) {"
+    $opcodes.values.each {
+        | opcode |
+        outp.puts "case #{opcode.name}:"
+        if opcode.custom
+            outp.puts "#{opcode.name}Custom.forEachArg(inst, func);"
+        else
+            needOverloadSwitch = opcode.overloads.size != 1
+            outp.puts "switch (inst.args.length) {" if needOverloadSwitch
+            opcode.overloads.each {
+                | overload |
+                outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+                overload.signature.each_with_index {
+                    | arg, index |
+                    role = nil
+                    case arg.role
+                    when "U"
+                        role = "Use"
+                    when "D"
+                        role = "Def"
+                    when "ZD"
+                        role = "ZDef"
+                    when "UD"
+                        role = "UseDef"
+                    when "UZD"
+                        role = "UseZDef"
+                    when "UA"
+                        role = "UseAddr"
+                    when "S"
+                        role = "Scratch"
+                    else
+                        raise
+                    end
+                    
+                    outp.puts "inst.visitArg(#{index}, func, Arg.#{role}, #{arg.type}P, #{arg.width});"
+                }
+                outp.puts "break;"
+            }
+            if needOverloadSwitch
+                outp.puts "default:"
+                outp.puts "throw new Error(\"Bad overload\");"
+                outp.puts "break;"
+                outp.puts "}"
+            end
+        end
+        outp.puts "break;"
+    }
+    outp.puts "default:"
+    outp.puts "throw \"Bad opcode\";"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "function Inst_hasNonArgEffects(inst)"
+    outp.puts "{"
+    outp.puts "switch (inst.opcode) {"
+    foundTrue = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:terminal] or opcode.attributes[:effects]
+            outp.puts "case #{opcode.name}:"
+            foundTrue = true
+        end
+    }
+    if foundTrue
+        outp.puts "return true;"
+    end
+    $opcodes.values.each {
+        | opcode |
+        if opcode.custom
+            outp.puts "case #{opcode.name}:"
+            outp.puts "return #{opcode.name}Custom.hasNonArgNonControlEffects(inst);"
+        end
+    }
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "function opcodeCode(opcode)"
+    outp.puts "{"
+    outp.puts "switch (opcode) {"
+    $opcodes.keys.sort.each_with_index {
+        | opcode, index |
+        outp.puts "case #{opcode}:"
+        outp.puts "return #{index}"
+    }
+    outp.puts "default:"
+    outp.puts "throw new Error(\"bad opcode\");"
+    outp.puts "}"
+    outp.puts "}"
+}
+
diff --git a/Source/JavaScriptCore/b3/air/testair.cpp b/Source/JavaScriptCore/b3/air/testair.cpp
new file mode 100644
index 000000000..9f8a8d83e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/testair.cpp
@@ -0,0 +1,1964 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#include "AirCode.h"
+#include "AirGenerate.h"
+#include "AirInstInlines.h"
+#include "AllowMacroScratchRegisterUsage.h"
+#include "B3Compilation.h"
+#include "B3Procedure.h"
+#include "CCallHelpers.h"
+#include "InitializeThreading.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "PureNaN.h"
+#include "VM.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+// We don't have a NO_RETURN_DUE_TO_EXIT, nor should we. That's ridiculous.
+static bool hiddenTruthBecauseNoReturnIsStupid() { return true; }
+
+static void usage()
+{
+    dataLog("Usage: testb3 []\n");
+    if (hiddenTruthBecauseNoReturnIsStupid())
+        exit(1);
+}
+
+#if ENABLE(B3_JIT)
+
+using namespace JSC;
+using namespace JSC::B3::Air;
+
+namespace {
+
+StaticLock crashLock;
+
+// Nothing fancy for now; we just use the existing WTF assertion machinery.
+#define CHECK(x) do {                                                   \
+        if (!!(x))                                                      \
+            break;                                                      \
+        crashLock.lock();                                               \
+        WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #x); \
+        CRASH();                                                        \
+    } while (false)
+
+VM* vm;
+
+std::unique_ptr compile(B3::Procedure& proc)
+{
+    prepareForGeneration(proc.code());
+    CCallHelpers jit(vm);
+    generate(proc.code(), jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+
+    return std::make_unique(
+        FINALIZE_CODE(linkBuffer, ("testair compilation")), proc.releaseByproducts());
+}
+
+template
+T invoke(const B3::Compilation& code, Arguments... arguments)
+{
+    T (*function)(Arguments...) = bitwise_cast(code.code().executableAddress());
+    return function(arguments...);
+}
+
+template
+T compileAndRun(B3::Procedure& procedure, Arguments... arguments)
+{
+    return invoke(*compile(procedure), arguments...);
+}
+
+void testSimple()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Arg::imm(42), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(compileAndRun(proc) == 42);
+}
+
+// Use this to put a constant into a register without Air being able to see the constant.
+template
+void loadConstantImpl(BasicBlock* block, T value, B3::Air::Opcode move, Tmp tmp, Tmp scratch)
+{
+    static StaticLock lock;
+    static std::map* map; // I'm not messing with HashMap's problems with integers.
+
+    LockHolder locker(lock);
+    if (!map)
+        map = new std::map();
+
+    if (!map->count(value))
+        (*map)[value] = new T(value);
+
+    T* ptr = (*map)[value];
+    block->append(Move, nullptr, Arg::bigImm(bitwise_cast(ptr)), scratch);
+    block->append(move, nullptr, Arg::addr(scratch), tmp);
+}
+
+void loadConstant(BasicBlock* block, intptr_t value, Tmp tmp)
+{
+    loadConstantImpl(block, value, Move, tmp, tmp);
+}
+
+void loadDoubleConstant(BasicBlock* block, double value, Tmp tmp, Tmp scratch)
+{
+    loadConstantImpl(block, value, MoveDouble, tmp, scratch);
+}
+
+void testShuffleSimpleSwap()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32));
+
+    int32_t things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 2);
+    CHECK(things[2] == 4);
+    CHECK(things[3] == 3);
+}
+
+void testShuffleSimpleShift()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32));
+
+    int32_t things[5];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 2);
+    CHECK(things[2] == 3);
+    CHECK(things[3] == 3);
+    CHECK(things[4] == 4);
+}
+
+void testShuffleLongShift()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    loadConstant(root, 7, Tmp(GPRInfo::regT6));
+    loadConstant(root, 8, Tmp(GPRInfo::regT7));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32));
+
+    int32_t things[8];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 3);
+    CHECK(things[4] == 4);
+    CHECK(things[5] == 5);
+    CHECK(things[6] == 6);
+    CHECK(things[7] == 7);
+}
+
+void testShuffleLongShiftBackwards()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    loadConstant(root, 7, Tmp(GPRInfo::regT6));
+    loadConstant(root, 8, Tmp(GPRInfo::regT7));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32));
+
+    int32_t things[8];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 3);
+    CHECK(things[4] == 4);
+    CHECK(things[5] == 5);
+    CHECK(things[6] == 6);
+    CHECK(things[7] == 7);
+}
+
+void testShuffleSimpleRotate()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32));
+
+    int32_t things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 4);
+}
+
+void testShuffleSimpleBroadcast()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32));
+
+    int32_t things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 1);
+    CHECK(things[3] == 1);
+}
+
+void testShuffleBroadcastAllRegs()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    const Vector& regs = code.regsInPriorityOrder(Arg::GP);
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Arg::imm(35), Tmp(GPRInfo::regT0));
+    unsigned count = 1;
+    for (Reg reg : regs) {
+        if (reg != Reg(GPRInfo::regT0))
+            loadConstant(root, count++, Tmp(reg));
+    }
+    Inst& shuffle = root->append(Shuffle, nullptr);
+    for (Reg reg : regs) {
+        if (reg != Reg(GPRInfo::regT0))
+            shuffle.append(Tmp(GPRInfo::regT0), Tmp(reg), Arg::widthArg(Arg::Width32));
+    }
+
+    StackSlot* slot = code.addStackSlot(sizeof(int32_t) * regs.size(), StackSlotKind::Locked);
+    for (unsigned i = 0; i < regs.size(); ++i)
+        root->append(Move32, nullptr, Tmp(regs[i]), Arg::stack(slot, i * sizeof(int32_t)));
+
+    Vector things(regs.size(), 666);
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), base);
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
+        root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
+    }
+    
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    for (int32_t thing : things)
+        CHECK(thing == 35);
+}
+
+void testShuffleTreeShift()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    loadConstant(root, 7, Tmp(GPRInfo::regT6));
+    loadConstant(root, 8, Tmp(GPRInfo::regT7));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32));
+
+    int32_t things[8];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 1);
+    CHECK(things[3] == 2);
+    CHECK(things[4] == 2);
+    CHECK(things[5] == 3);
+    CHECK(things[6] == 3);
+    CHECK(things[7] == 4);
+}
+
+void testShuffleTreeShiftBackward()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    loadConstant(root, 7, Tmp(GPRInfo::regT6));
+    loadConstant(root, 8, Tmp(GPRInfo::regT7));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32));
+
+    int32_t things[8];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 1);
+    CHECK(things[3] == 2);
+    CHECK(things[4] == 2);
+    CHECK(things[5] == 3);
+    CHECK(things[6] == 3);
+    CHECK(things[7] == 4);
+}
+
+void testShuffleTreeShiftOtherBackward()
+{
+    // NOTE: This test was my original attempt at TreeShiftBackward but mistakes were made. So, this
+    // ends up being just a weird test. But weird tests are useful, so I kept it.
+    
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    loadConstant(root, 7, Tmp(GPRInfo::regT6));
+    loadConstant(root, 8, Tmp(GPRInfo::regT7));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT7), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT7), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32));
+
+    int32_t things[8];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 8);
+    CHECK(things[2] == 8);
+    CHECK(things[3] == 7);
+    CHECK(things[4] == 7);
+    CHECK(things[5] == 6);
+    CHECK(things[6] == 6);
+    CHECK(things[7] == 5);
+}
+
+void testShuffleMultipleShifts()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 3);
+    CHECK(things[3] == 3);
+    CHECK(things[4] == 3);
+    CHECK(things[5] == 1);
+}
+
+void testShuffleRotateWithFringe()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 1);
+    CHECK(things[4] == 2);
+    CHECK(things[5] == 3);
+}
+
+void testShuffleRotateWithFringeInWeirdOrder()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 1);
+    CHECK(things[4] == 2);
+    CHECK(things[5] == 3);
+}
+
+void testShuffleRotateWithLongFringe()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 1);
+    CHECK(things[4] == 4);
+    CHECK(things[5] == 5);
+}
+
+void testShuffleMultipleRotates()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 6);
+    CHECK(things[4] == 4);
+    CHECK(things[5] == 5);
+}
+
+void testShuffleShiftAndRotate()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 4);
+    CHECK(things[4] == 4);
+    CHECK(things[5] == 5);
+}
+
+void testShuffleShiftAllRegs()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    const Vector& regs = code.regsInPriorityOrder(Arg::GP);
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, 35 + i, Tmp(regs[i]));
+    Inst& shuffle = root->append(Shuffle, nullptr);
+    for (unsigned i = 1; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
+
+    StackSlot* slot = code.addStackSlot(sizeof(int32_t) * regs.size(), StackSlotKind::Locked);
+    for (unsigned i = 0; i < regs.size(); ++i)
+        root->append(Move32, nullptr, Tmp(regs[i]), Arg::stack(slot, i * sizeof(int32_t)));
+
+    Vector things(regs.size(), 666);
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), base);
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
+        root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
+    }
+    
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 35);
+    for (unsigned i = 1; i < regs.size(); ++i)
+        CHECK(things[i] == 35 + static_cast(i) - 1);
+}
+
+void testShuffleRotateAllRegs()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    const Vector& regs = code.regsInPriorityOrder(Arg::GP);
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, 35 + i, Tmp(regs[i]));
+    Inst& shuffle = root->append(Shuffle, nullptr);
+    for (unsigned i = 1; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
+    shuffle.append(Tmp(regs.last()), Tmp(regs[0]), Arg::widthArg(Arg::Width32));
+
+    StackSlot* slot = code.addStackSlot(sizeof(int32_t) * regs.size(), StackSlotKind::Locked);
+    for (unsigned i = 0; i < regs.size(); ++i)
+        root->append(Move32, nullptr, Tmp(regs[i]), Arg::stack(slot, i * sizeof(int32_t)));
+
+    Vector things(regs.size(), 666);
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), base);
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
+        root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
+    }
+    
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 35 + static_cast(regs.size()) - 1);
+    for (unsigned i = 1; i < regs.size(); ++i)
+        CHECK(things[i] == 35 + static_cast(i) - 1);
+}
+
+void testShuffleSimpleSwap64()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+    loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+    loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width64),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width64));
+
+    int64_t things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 10000000000000000ll);
+    CHECK(things[1] == 20000000000000000ll);
+    CHECK(things[2] == 40000000000000000ll);
+    CHECK(things[3] == 30000000000000000ll);
+}
+
+void testShuffleSimpleShift64()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+    loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+    loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+    loadConstant(root, 50000000000000000ll, Tmp(GPRInfo::regT4));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width64),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width64));
+
+    int64_t things[5];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 10000000000000000ll);
+    CHECK(things[1] == 20000000000000000ll);
+    CHECK(things[2] == 30000000000000000ll);
+    CHECK(things[3] == 30000000000000000ll);
+    CHECK(things[4] == 40000000000000000ll);
+}
+
+void testShuffleSwapMixedWidth()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+    loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+    loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width64));
+
+    int64_t things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 10000000000000000ll);
+    CHECK(things[1] == 20000000000000000ll);
+    CHECK(things[2] == 40000000000000000ll);
+    CHECK(things[3] == static_cast(30000000000000000ll));
+}
+
+void testShuffleShiftMixedWidth()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+    loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+    loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+    loadConstant(root, 50000000000000000ll, Tmp(GPRInfo::regT4));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width64),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32));
+
+    int64_t things[5];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 10000000000000000ll);
+    CHECK(things[1] == 20000000000000000ll);
+    CHECK(things[2] == 30000000000000000ll);
+    CHECK(things[3] == 30000000000000000ll);
+    CHECK(things[4] == static_cast(40000000000000000ll));
+}
+
+void testShuffleShiftMemory()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int32_t memory[2];
+    memory[0] = 35;
+    memory[1] = 36;
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int32_t)),
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32));
+
+    int32_t things[2];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(memory[0] == 35);
+    CHECK(memory[1] == 35);
+}
+
+void testShuffleShiftMemoryLong()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int32_t memory[2];
+    memory[0] = 35;
+    memory[1] = 36;
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        
+        Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT3), 0 * sizeof(int32_t)),
+        Arg::widthArg(Arg::Width32),
+        
+        Arg::addr(Tmp(GPRInfo::regT3), 0 * sizeof(int32_t)),
+        Arg::addr(Tmp(GPRInfo::regT3), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32),
+
+        Arg::addr(Tmp(GPRInfo::regT3), 1 * sizeof(int32_t)), Tmp(GPRInfo::regT2),
+        Arg::widthArg(Arg::Width32));
+
+    int32_t things[3];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 36);
+    CHECK(memory[0] == 2);
+    CHECK(memory[1] == 35);
+}
+
+void testShuffleShiftMemoryAllRegs()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int32_t memory[2];
+    memory[0] = 35;
+    memory[1] = 36;
+
+    Vector regs = code.regsInPriorityOrder(Arg::GP);
+    regs.removeFirst(Reg(GPRInfo::regT0));
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, i + 1, Tmp(regs[i]));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+    Inst& shuffle = root->append(
+        Shuffle, nullptr,
+        
+        Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int32_t)),
+        Arg::widthArg(Arg::Width32),
+        
+        Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int32_t)),
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32),
+
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int32_t)), Tmp(regs[1]),
+        Arg::widthArg(Arg::Width32));
+
+    for (unsigned i = 2; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
+
+    Vector things(regs.size(), 666);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), Tmp(GPRInfo::regT0));
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(
+            Move32, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int32_t)));
+    }
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 36);
+    for (unsigned i = 2; i < regs.size(); ++i)
+        CHECK(things[i] == static_cast(i));
+    CHECK(memory[0] == 1);
+    CHECK(memory[1] == 35);
+}
+
+void testShuffleShiftMemoryAllRegs64()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    Vector regs = code.regsInPriorityOrder(Arg::GP);
+    regs.removeFirst(Reg(GPRInfo::regT0));
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+    Inst& shuffle = root->append(
+        Shuffle, nullptr,
+        
+        Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width64),
+        
+        Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+        Arg::widthArg(Arg::Width64));
+
+    for (unsigned i = 2; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
+
+    Vector things(regs.size(), 666);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), Tmp(GPRInfo::regT0));
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(
+            Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+    }
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1000000000000ll);
+    CHECK(things[1] == 36000000000000ll);
+    for (unsigned i = 2; i < regs.size(); ++i)
+        CHECK(things[i] == static_cast(i) * 1000000000000ll);
+    CHECK(memory[0] == 1000000000000ll);
+    CHECK(memory[1] == 35000000000000ll);
+}
+
+int64_t combineHiLo(int64_t high, int64_t low)
+{
+    union {
+        int64_t value;
+        int32_t halves[2];
+    } u;
+    u.value = high;
+    u.halves[0] = static_cast(low);
+    return u.value;
+}
+
+void testShuffleShiftMemoryAllRegsMixedWidth()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    Vector regs = code.regsInPriorityOrder(Arg::GP);
+    regs.removeFirst(Reg(GPRInfo::regT0));
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+    Inst& shuffle = root->append(
+        Shuffle, nullptr,
+        
+        Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width32),
+        
+        Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+        Arg::widthArg(Arg::Width32));
+
+    for (unsigned i = 2; i < regs.size(); ++i) {
+        shuffle.append(
+            Tmp(regs[i - 1]), Tmp(regs[i]),
+            (i & 1) ? Arg::widthArg(Arg::Width32) : Arg::widthArg(Arg::Width64));
+    }
+
+    Vector things(regs.size(), 666);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), Tmp(GPRInfo::regT0));
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(
+            Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+    }
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1000000000000ll);
+    CHECK(things[1] == static_cast(36000000000000ll));
+    for (unsigned i = 2; i < regs.size(); ++i) {
+        int64_t value = static_cast(i) * 1000000000000ll;
+        CHECK(things[i] == ((i & 1) ? static_cast(value) : value));
+    }
+    CHECK(memory[0] == combineHiLo(35000000000000ll, 1000000000000ll));
+    CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleRotateMemory()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int32_t memory[2];
+    memory[0] = 35;
+    memory[1] = 36;
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+    root->append(
+        Shuffle, nullptr,
+        
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+
+        Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int32_t)),
+        Arg::widthArg(Arg::Width32),
+        
+        Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int32_t)),
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32),
+
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int32_t)), Tmp(GPRInfo::regT0),
+        Arg::widthArg(Arg::Width32));
+
+    int32_t things[2];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 36);
+    CHECK(things[1] == 1);
+    CHECK(memory[0] == 2);
+    CHECK(memory[1] == 35);
+}
+
+void testShuffleRotateMemory64()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2000000000000ll, Tmp(GPRInfo::regT1));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+    root->append(
+        Shuffle, nullptr,
+        
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width64),
+
+        Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width64),
+        
+        Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Tmp(GPRInfo::regT0),
+        Arg::widthArg(Arg::Width64));
+
+    int64_t things[2];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 36000000000000ll);
+    CHECK(things[1] == 1000000000000ll);
+    CHECK(memory[0] == 2000000000000ll);
+    CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleRotateMemoryMixedWidth()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2000000000000ll, Tmp(GPRInfo::regT1));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+    root->append(
+        Shuffle, nullptr,
+        
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+
+        Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width64),
+        
+        Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width32),
+
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Tmp(GPRInfo::regT0),
+        Arg::widthArg(Arg::Width64));
+
+    int64_t things[2];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 36000000000000ll);
+    CHECK(things[1] == static_cast(1000000000000ll));
+    CHECK(memory[0] == 2000000000000ll);
+    CHECK(memory[1] == combineHiLo(36000000000000ll, 35000000000000ll));
+}
+
+void testShuffleRotateMemoryAllRegs64()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    Vector regs = code.regsInPriorityOrder(Arg::GP);
+    regs.removeFirst(Reg(GPRInfo::regT0));
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+    Inst& shuffle = root->append(
+        Shuffle, nullptr,
+        
+        Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width64),
+        
+        Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+        Arg::widthArg(Arg::Width64),
+
+        regs.last(), regs[0], Arg::widthArg(Arg::Width64));
+
+    for (unsigned i = 2; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
+
+    Vector things(regs.size(), 666);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), Tmp(GPRInfo::regT0));
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(
+            Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+    }
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == static_cast(regs.size()) * 1000000000000ll);
+    CHECK(things[1] == 36000000000000ll);
+    for (unsigned i = 2; i < regs.size(); ++i)
+        CHECK(things[i] == static_cast(i) * 1000000000000ll);
+    CHECK(memory[0] == 1000000000000ll);
+    CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleRotateMemoryAllRegsMixedWidth()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    Vector regs = code.regsInPriorityOrder(Arg::GP);
+    regs.removeFirst(Reg(GPRInfo::regT0));
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+    Inst& shuffle = root->append(
+        Shuffle, nullptr,
+        
+        Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width32),
+        
+        Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+        Arg::widthArg(Arg::Width32),
+
+        regs.last(), regs[0], Arg::widthArg(Arg::Width32));
+
+    for (unsigned i = 2; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
+
+    Vector things(regs.size(), 666);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), Tmp(GPRInfo::regT0));
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(
+            Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+    }
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == static_cast(static_cast(regs.size()) * 1000000000000ll));
+    CHECK(things[1] == static_cast(36000000000000ll));
+    for (unsigned i = 2; i < regs.size(); ++i)
+        CHECK(things[i] == static_cast(i) * 1000000000000ll);
+    CHECK(memory[0] == combineHiLo(35000000000000ll, 1000000000000ll));
+    CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleSwapDouble()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadDoubleConstant(root, 1, Tmp(FPRInfo::fpRegT0), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 2, Tmp(FPRInfo::fpRegT1), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 3, Tmp(FPRInfo::fpRegT2), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 4, Tmp(FPRInfo::fpRegT3), Tmp(GPRInfo::regT0));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(FPRInfo::fpRegT2), Tmp(FPRInfo::fpRegT3), Arg::widthArg(Arg::Width64),
+        Tmp(FPRInfo::fpRegT3), Tmp(FPRInfo::fpRegT2), Arg::widthArg(Arg::Width64));
+
+    double things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT0), Arg::addr(base, 0 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT1), Arg::addr(base, 1 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT2), Arg::addr(base, 2 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT3), Arg::addr(base, 3 * sizeof(double)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 2);
+    CHECK(things[2] == 4);
+    CHECK(things[3] == 3);
+}
+
+void testShuffleShiftDouble()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadDoubleConstant(root, 1, Tmp(FPRInfo::fpRegT0), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 2, Tmp(FPRInfo::fpRegT1), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 3, Tmp(FPRInfo::fpRegT2), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 4, Tmp(FPRInfo::fpRegT3), Tmp(GPRInfo::regT0));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(FPRInfo::fpRegT2), Tmp(FPRInfo::fpRegT3), Arg::widthArg(Arg::Width64));
+
+    double things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT0), Arg::addr(base, 0 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT1), Arg::addr(base, 1 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT2), Arg::addr(base, 2 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT3), Arg::addr(base, 3 * sizeof(double)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 2);
+    CHECK(things[2] == 3);
+    CHECK(things[3] == 3);
+}
+
+#if CPU(X86) || CPU(X86_64)
+void testX86VMULSD()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(FPRInfo::argumentFPR1), Tmp(FPRInfo::argumentFPR2));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR2), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    CHECK(compileAndRun(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDDestRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    CHECK(compileAndRun(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDOp1DestRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14));
+    root->append(MulDouble, nullptr, Tmp(X86Registers::xmm14), Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    CHECK(compileAndRun(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDOp2DestRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm14));
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    CHECK(compileAndRun(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDOpsDestRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm13));
+    root->append(MulDouble, nullptr, Tmp(X86Registers::xmm14), Tmp(X86Registers::xmm13), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    CHECK(compileAndRun(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDAddr()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(GPRInfo::argumentGPR0), - 16), Tmp(FPRInfo::argumentFPR2));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR2), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    CHECK(compileAndRun(proc, 2.4, &secondArg + 2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDAddrOpRexAddr()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13));
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(X86Registers::r13), - 16), Tmp(FPRInfo::argumentFPR2));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR2), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    CHECK(compileAndRun(proc, 2.4, &secondArg + 2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDDestRexAddr()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(GPRInfo::argumentGPR0), 16), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    CHECK(compileAndRun(proc, 2.4, &secondArg - 2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDRegOpDestRexAddr()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14));
+    root->append(MulDouble, nullptr, Arg::addr(Tmp(GPRInfo::argumentGPR0)), Tmp(X86Registers::xmm14), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    CHECK(compileAndRun(proc, 2.4, &secondArg, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDAddrOpDestRexAddr()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13));
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(X86Registers::r13), 8), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    CHECK(compileAndRun(proc, 2.4, &secondArg - 1, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDBaseNeedsRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13));
+    root->append(MulDouble, nullptr, Arg::index(Tmp(X86Registers::r13), Tmp(GPRInfo::argumentGPR1)), Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm0));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm0), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    uint64_t index = 8;
+    CHECK(compileAndRun(proc, 2.4, &secondArg - 1, index, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDIndexNeedsRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR1), Tmp(X86Registers::r13));
+    root->append(MulDouble, nullptr, Arg::index(Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13)), Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm0));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm0), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    uint64_t index = - 8;
+    CHECK(compileAndRun(proc, 2.4, &secondArg + 1, index, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDBaseIndexNeedRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r12));
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR1), Tmp(X86Registers::r13));
+    root->append(MulDouble, nullptr, Arg::index(Tmp(X86Registers::r12), Tmp(X86Registers::r13)), Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm0));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm0), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    uint64_t index = 16;
+    CHECK(compileAndRun(proc, 2.4, &secondArg - 2, index, pureNaN()) == 2.4 * 4.2);
+}
+
+#endif
+
+#define RUN(test) do {                          \
+        if (!shouldRun(#test))                  \
+            break;                              \
+        tasks.append(                           \
+            createSharedTask(           \
+                [&] () {                        \
+                    dataLog(#test "...\n");     \
+                    test;                       \
+                    dataLog(#test ": OK!\n");   \
+                }));                            \
+    } while (false);
+
+void run(const char* filter)
+{
+    JSC::initializeThreading();
+    vm = &VM::create(LargeHeap).leakRef();
+
+    Deque>> tasks;
+
+    auto shouldRun = [&] (const char* testName) -> bool {
+        return !filter || !!strcasestr(testName, filter);
+    };
+
+    RUN(testSimple());
+    
+    RUN(testShuffleSimpleSwap());
+    RUN(testShuffleSimpleShift());
+    RUN(testShuffleLongShift());
+    RUN(testShuffleLongShiftBackwards());
+    RUN(testShuffleSimpleRotate());
+    RUN(testShuffleSimpleBroadcast());
+    RUN(testShuffleBroadcastAllRegs());
+    RUN(testShuffleTreeShift());
+    RUN(testShuffleTreeShiftBackward());
+    RUN(testShuffleTreeShiftOtherBackward());
+    RUN(testShuffleMultipleShifts());
+    RUN(testShuffleRotateWithFringe());
+    RUN(testShuffleRotateWithFringeInWeirdOrder());
+    RUN(testShuffleRotateWithLongFringe());
+    RUN(testShuffleMultipleRotates());
+    RUN(testShuffleShiftAndRotate());
+    RUN(testShuffleShiftAllRegs());
+    RUN(testShuffleRotateAllRegs());
+    RUN(testShuffleSimpleSwap64());
+    RUN(testShuffleSimpleShift64());
+    RUN(testShuffleSwapMixedWidth());
+    RUN(testShuffleShiftMixedWidth());
+    RUN(testShuffleShiftMemory());
+    RUN(testShuffleShiftMemoryLong());
+    RUN(testShuffleShiftMemoryAllRegs());
+    RUN(testShuffleShiftMemoryAllRegs64());
+    RUN(testShuffleShiftMemoryAllRegsMixedWidth());
+    RUN(testShuffleRotateMemory());
+    RUN(testShuffleRotateMemory64());
+    RUN(testShuffleRotateMemoryMixedWidth());
+    RUN(testShuffleRotateMemoryAllRegs64());
+    RUN(testShuffleRotateMemoryAllRegsMixedWidth());
+    RUN(testShuffleSwapDouble());
+    RUN(testShuffleShiftDouble());
+
+#if CPU(X86) || CPU(X86_64)
+    RUN(testX86VMULSD());
+    RUN(testX86VMULSDDestRex());
+    RUN(testX86VMULSDOp1DestRex());
+    RUN(testX86VMULSDOp2DestRex());
+    RUN(testX86VMULSDOpsDestRex());
+
+    RUN(testX86VMULSDAddr());
+    RUN(testX86VMULSDAddrOpRexAddr());
+    RUN(testX86VMULSDDestRexAddr());
+    RUN(testX86VMULSDRegOpDestRexAddr());
+    RUN(testX86VMULSDAddrOpDestRexAddr());
+
+    RUN(testX86VMULSDBaseNeedsRex());
+    RUN(testX86VMULSDIndexNeedsRex());
+    RUN(testX86VMULSDBaseIndexNeedRex());
+#endif
+
+    if (tasks.isEmpty())
+        usage();
+
+    Lock lock;
+
+    Vector threads;
+    for (unsigned i = filter ? 1 : WTF::numberOfProcessorCores(); i--;) {
+        threads.append(
+            createThread(
+                "testb3 thread",
+                [&] () {
+                    for (;;) {
+                        RefPtr> task;
+                        {
+                            LockHolder locker(lock);
+                            if (tasks.isEmpty())
+                                return;
+                            task = tasks.takeFirst();
+                        }
+
+                        task->run();
+                    }
+                }));
+    }
+
+    for (ThreadIdentifier thread : threads)
+        waitForThreadCompletion(thread);
+    crashLock.lock();
+}
+
+} // anonymois namespace
+
+#else // ENABLE(B3_JIT)
+
+static void run(const char*)
+{
+    dataLog("B3 JIT is not enabled.\n");
+}
+
+#endif // ENABLE(B3_JIT)
+
+int main(int argc, char** argv)
+{
+    const char* filter = nullptr;
+    switch (argc) {
+    case 1:
+        break;
+    case 2:
+        filter = argv[1];
+        break;
+    default:
+        usage();
+        break;
+    }
+    
+    run(filter);
+    return 0;
+}
diff --git a/Source/JavaScriptCore/b3/testb3.cpp b/Source/JavaScriptCore/b3/testb3.cpp
new file mode 100644
index 000000000..a2eebe8eb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/testb3.cpp
@@ -0,0 +1,15923 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirValidate.h"
+#include "AllowMacroScratchRegisterUsage.h"
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3CCallValue.h"
+#include "B3Compilation.h"
+#include "B3Compile.h"
+#include "B3ComputeDivisionMagic.h"
+#include "B3Const32Value.h"
+#include "B3ConstPtrValue.h"
+#include "B3Effects.h"
+#include "B3FenceValue.h"
+#include "B3Generate.h"
+#include "B3LowerToAir.h"
+#include "B3MathExtras.h"
+#include "B3MemoryValue.h"
+#include "B3MoveConstants.h"
+#include "B3Procedure.h"
+#include "B3ReduceStrength.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3UseCounts.h"
+#include "B3Validate.h"
+#include "B3ValueInlines.h"
+#include "B3VariableValue.h"
+#include "B3WasmAddressValue.h"
+#include "B3WasmBoundsCheckValue.h"
+#include "CCallHelpers.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "InitializeThreading.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "PureNaN.h"
+#include "VM.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+// We don't have a NO_RETURN_DUE_TO_EXIT, nor should we. That's ridiculous.
+static bool hiddenTruthBecauseNoReturnIsStupid() { return true; }
+
+static void usage()
+{
+    dataLog("Usage: testb3 []\n");
+    if (hiddenTruthBecauseNoReturnIsStupid())
+        exit(1);
+}
+
+#if ENABLE(B3_JIT)
+
+using namespace JSC;
+using namespace JSC::B3;
+
+namespace {
+
+bool shouldBeVerbose()
+{
+    return shouldDumpIR(B3Mode);
+}
+
+StaticLock crashLock;
+
+// Nothing fancy for now; we just use the existing WTF assertion machinery.
+#define CHECK(x) do {                                                   \
+        if (!!(x))                                                      \
+            break;                                                      \
+        crashLock.lock();                                               \
+        WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #x); \
+        CRASH();                                                        \
+    } while (false)
+
+#define CHECK_EQ(x, y) do { \
+        auto __x = (x); \
+        auto __y = (y); \
+        if (__x == __y) \
+            break; \
+        crashLock.lock(); \
+        WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, toCString(#x " == " #y, " (" #x " == ", __x, ", " #y " == ", __y, ")").data()); \
+        CRASH(); \
+    } while (false)
+
+VM* vm;
+
+std::unique_ptr compile(Procedure& procedure, unsigned optLevel = 1)
+{
+    return std::make_unique(B3::compile(*vm, procedure, optLevel));
+}
+
+template
+T invoke(MacroAssemblerCodePtr ptr, Arguments... arguments)
+{
+    T (*function)(Arguments...) = bitwise_cast(ptr.executableAddress());
+    return function(arguments...);
+}
+
+template
+T invoke(const Compilation& code, Arguments... arguments)
+{
+    return invoke(code.code(), arguments...);
+}
+
+template
+T compileAndRun(Procedure& procedure, Arguments... arguments)
+{
+    return invoke(*compile(procedure), arguments...);
+}
+
+void lowerToAirForTesting(Procedure& proc)
+{
+    proc.resetReachability();
+    
+    if (shouldBeVerbose())
+        dataLog("B3 before lowering:\n", proc);
+    
+    validate(proc);
+    lowerToAir(proc);
+    
+    if (shouldBeVerbose())
+        dataLog("Air after lowering:\n", proc.code());
+    
+    Air::validate(proc.code());
+}
+
+template
+void checkDisassembly(Compilation& compilation, const Func& func, CString failText)
+{
+    CString disassembly = compilation.disassembly();
+    if (func(disassembly.data()))
+        return;
+    
+    crashLock.lock();
+    dataLog("Bad lowering!  ", failText, "\n");
+    dataLog(disassembly);
+    CRASH();
+}
+
+void checkUsesInstruction(Compilation& compilation, const char* text)
+{
+    checkDisassembly(
+        compilation,
+        [&] (const char* disassembly) -> bool {
+            return strstr(disassembly, text);
+        },
+        toCString("Expected to find ", text, " but didnt!"));
+}
+
+void checkDoesNotUseInstruction(Compilation& compilation, const char* text)
+{
+    checkDisassembly(
+        compilation,
+        [&] (const char* disassembly) -> bool {
+            return !strstr(disassembly, text);
+        },
+        toCString("Did not expected to find ", text, " but it's there!"));
+}
+
+template
+struct Operand {
+    const char* name;
+    Type value;
+};
+
+typedef Operand Int64Operand;
+typedef Operand Int32Operand;
+
+template
+void populateWithInterestingValues(Vector>& operands)
+{
+    operands.append({ "0.", static_cast(0.) });
+    operands.append({ "-0.", static_cast(-0.) });
+    operands.append({ "0.4", static_cast(0.5) });
+    operands.append({ "-0.4", static_cast(-0.5) });
+    operands.append({ "0.5", static_cast(0.5) });
+    operands.append({ "-0.5", static_cast(-0.5) });
+    operands.append({ "0.6", static_cast(0.5) });
+    operands.append({ "-0.6", static_cast(-0.5) });
+    operands.append({ "1.", static_cast(1.) });
+    operands.append({ "-1.", static_cast(-1.) });
+    operands.append({ "2.", static_cast(2.) });
+    operands.append({ "-2.", static_cast(-2.) });
+    operands.append({ "M_PI", static_cast(M_PI) });
+    operands.append({ "-M_PI", static_cast(-M_PI) });
+    operands.append({ "min", std::numeric_limits::min() });
+    operands.append({ "max", std::numeric_limits::max() });
+    operands.append({ "lowest", std::numeric_limits::lowest() });
+    operands.append({ "epsilon", std::numeric_limits::epsilon() });
+    operands.append({ "infiniti", std::numeric_limits::infinity() });
+    operands.append({ "-infiniti", - std::numeric_limits::infinity() });
+    operands.append({ "PNaN", static_cast(PNaN) });
+}
+
+template
+Vector> floatingPointOperands()
+{
+    Vector> operands;
+    populateWithInterestingValues(operands);
+    return operands;
+};
+
+static Vector int64Operands()
+{
+    Vector operands;
+    operands.append({ "0", 0 });
+    operands.append({ "1", 1 });
+    operands.append({ "-1", -1 });
+    operands.append({ "42", 42 });
+    operands.append({ "-42", -42 });
+    operands.append({ "int64-max", std::numeric_limits::max() });
+    operands.append({ "int64-min", std::numeric_limits::min() });
+    operands.append({ "int32-max", std::numeric_limits::max() });
+    operands.append({ "int32-min", std::numeric_limits::min() });
+
+    return operands;
+}
+
+static Vector int32Operands()
+{
+    Vector operands({
+        { "0", 0 },
+        { "1", 1 },
+        { "-1", -1 },
+        { "42", 42 },
+        { "-42", -42 },
+        { "int32-max", std::numeric_limits::max() },
+        { "int32-min", std::numeric_limits::min() }
+    });
+    return operands;
+}
+
+void add32(CCallHelpers& jit, GPRReg src1, GPRReg src2, GPRReg dest)
+{
+    if (src2 == dest)
+        jit.add32(src1, dest);
+    else {
+        jit.move(src1, dest);
+        jit.add32(src2, dest);
+    }
+}
+
+void test42()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* const42 = root->appendNew(proc, Origin(), 42);
+    root->appendNewControlValue(proc, Return, Origin(), const42);
+
+    CHECK(compileAndRun(proc) == 42);
+}
+
+void testLoad42()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(proc, Origin(), &x)));
+
+    CHECK(compileAndRun(proc) == 42);
+}
+
+void testLoadWithOffsetImpl(int32_t offset64, int32_t offset32)
+{
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        int64_t x = -42;
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Load, Int64, Origin(),
+                base,
+                offset64));
+
+        char* address = reinterpret_cast(&x) - offset64;
+        CHECK(compileAndRun(proc, address) == -42);
+    }
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        int32_t x = -42;
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Load, Int32, Origin(),
+                base,
+                offset32));
+
+        char* address = reinterpret_cast(&x) - offset32;
+        CHECK(compileAndRun(proc, address) == -42);
+    }
+}
+
+void testLoadOffsetImm9Max()
+{
+    testLoadWithOffsetImpl(255, 255);
+}
+
+void testLoadOffsetImm9MaxPlusOne()
+{
+    testLoadWithOffsetImpl(256, 256);
+}
+
+void testLoadOffsetImm9MaxPlusTwo()
+{
+    testLoadWithOffsetImpl(257, 257);
+}
+
+void testLoadOffsetImm9Min()
+{
+    testLoadWithOffsetImpl(-256, -256);
+}
+
+void testLoadOffsetImm9MinMinusOne()
+{
+    testLoadWithOffsetImpl(-257, -257);
+}
+
+void testLoadOffsetScaledUnsignedImm12Max()
+{
+    testLoadWithOffsetImpl(32760, 16380);
+}
+
+void testLoadOffsetScaledUnsignedOverImm12Max()
+{
+    testLoadWithOffsetImpl(32760, 32760);
+    testLoadWithOffsetImpl(32761, 16381);
+    testLoadWithOffsetImpl(32768, 16384);
+}
+
+void testArg(int argument)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+
+    CHECK(compileAndRun(proc, argument) == argument);
+}
+
+void testReturnConst64(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), value));
+
+    CHECK(compileAndRun(proc) == value);
+}
+
+void testReturnVoid()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(proc, Return, Origin());
+    compileAndRun(proc);
+}
+
+void testAddArg(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == a + a);
+}
+
+void testAddArgs(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a + b);
+}
+
+void testAddArgImm(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == a + b);
+}
+
+void testAddImmArg(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == a + b);
+}
+
+void testAddArgMem(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t inputOutput = b;
+    CHECK(!compileAndRun(proc, a, &inputOutput));
+    CHECK(inputOutput == a + b);
+}
+
+void testAddMemArg(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Add, Origin(),
+        load,
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, &a, b) == a + b);
+}
+
+void testAddImmMem(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Add, Origin(),
+        root->appendNew(proc, Origin(), a),
+        load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t inputOutput = b;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a + b);
+}
+
+void testAddArg32(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == a + a);
+}
+
+void testAddArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == a + b);
+}
+
+void testAddArgMem32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* result = root->appendNew(proc, Add, Origin(), argument, load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t inputOutput = b;
+    CHECK(!compileAndRun(proc, a, &inputOutput));
+    CHECK(inputOutput == a + b);
+}
+
+void testAddMemArg32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, Add, Origin(), load, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, &a, b) == a + b);
+}
+
+void testAddImmMem32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* result = root->appendNew(proc, Add, Origin(),
+        root->appendNew(proc, Origin(), a),
+        load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t inputOutput = b;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a + b);
+}
+
+void testAddArgZeroImmZDef()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* constZero = root->appendNew(proc, Origin(), 0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            arg,
+            constZero));
+
+    auto code = compile(proc, 0);
+    CHECK(invoke(*code, 0x0123456789abcdef) == 0x89abcdef);
+}
+
+void testAddLoadTwice()
+{
+    auto test = [&] (unsigned optLevel) {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        int32_t value = 42;
+        Value* load = root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(proc, Origin(), &value));
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(proc, Add, Origin(), load, load));
+
+        auto code = compile(proc, optLevel);
+        CHECK(invoke(*code) == 42 * 2);
+    };
+
+    test(0);
+    test(1);
+}
+
+void testAddArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), value, value));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a + a));
+}
+
+void testAddArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), a + b));
+}
+
+void testAddArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a + b));
+}
+
+void testAddImmArgDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, b), a + b));
+}
+
+void testAddImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc), a + b));
+}
+
+void testAddArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Add, Origin(), floatValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a + a)));
+}
+
+void testAddArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* result = root->appendNew(proc, Add, Origin(), floatValue1, floatValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a + b)));
+}
+
+void testAddFPRArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* argument2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR1));
+    Value* result = root->appendNew(proc, Add, Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), a + b));
+}
+
+void testAddArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Add, Origin(), floatValue, constValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a + b)));
+}
+
+void testAddImmArgFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Add, Origin(), constValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(b)), bitwise_cast(a + b)));
+}
+
+void testAddImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* constValue1 = root->appendNew(proc, Origin(), a);
+    Value* constValue2 = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Add, Origin(), constValue1, constValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(a + b)));
+}
+
+void testAddArgFloatWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argumentInt32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Add, Origin(), asDouble, asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a + a)));
+}
+
+void testAddArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Add, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a + b)));
+}
+
+void testAddArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Add, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* doubleAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    root->appendNew(proc, Store, Origin(), result, doubleAddress);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), &effect), bitwise_cast(a + b)));
+    CHECK(isIdentical(effect, static_cast(a) + static_cast(b)));
+}
+
+void testMulArg(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == a * a);
+}
+
+void testMulArgStore(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    int mulSlot;
+    int valueSlot;
+    
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* mul = root->appendNew(proc, Mul, Origin(), value, value);
+
+    root->appendNew(
+        proc, Store, Origin(), value,
+        root->appendNew(proc, Origin(), &valueSlot));
+    root->appendNew(
+        proc, Store, Origin(), mul,
+        root->appendNew(proc, Origin(), &mulSlot));
+
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, a));
+    CHECK(mulSlot == a * a);
+    CHECK(valueSlot == a);
+}
+
+void testMulAddArg(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Mul, Origin(), value, value),
+            value));
+
+    CHECK(compileAndRun(proc, a) == a * a + a);
+}
+
+void testMulArgs(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Mul, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a * b);
+}
+
+void testMulArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Mul, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == a * b);
+}
+
+void testMulImmArg(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Mul, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == a * b);
+}
+
+void testMulArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Mul, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == a * b);
+}
+
+void testMulLoadTwice()
+{
+    auto test = [&] (unsigned optLevel) {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        int32_t value = 42;
+        Value* load = root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(proc, Origin(), &value));
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(proc, Mul, Origin(), load, load));
+
+        auto code = compile(proc, optLevel);
+        CHECK(invoke(*code) == 42 * 42);
+    };
+
+    test(0);
+    test(1);
+}
+
+void testMulAddArgsLeft()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* added = root->appendNew(proc, Add, Origin(), multiplied, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int64Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value * b.value + c.value);
+            }
+        }
+    }
+}
+
+void testMulAddArgsRight()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg1, arg2);
+    Value* added = root->appendNew(proc, Add, Origin(), arg0, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int64Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value + b.value * c.value);
+            }
+        }
+    }
+}
+
+void testMulAddArgsLeft32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* added = root->appendNew(proc, Add, Origin(), multiplied, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int32Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value * b.value + c.value);
+            }
+        }
+    }
+}
+
+void testMulAddArgsRight32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg1, arg2);
+    Value* added = root->appendNew(proc, Add, Origin(), arg0, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int32Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value + b.value * c.value);
+            }
+        }
+    }
+}
+
+void testMulSubArgsLeft()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* added = root->appendNew(proc, Sub, Origin(), multiplied, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int64Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value * b.value - c.value);
+            }
+        }
+    }
+}
+
+void testMulSubArgsRight()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg1, arg2);
+    Value* added = root->appendNew(proc, Sub, Origin(), arg0, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int64Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value - b.value * c.value);
+            }
+        }
+    }
+}
+
+void testMulSubArgsLeft32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* added = root->appendNew(proc, Sub, Origin(), multiplied, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int32Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value * b.value - c.value);
+            }
+        }
+    }
+}
+
+void testMulSubArgsRight32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg1, arg2);
+    Value* added = root->appendNew(proc, Sub, Origin(), arg0, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int32Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value - b.value * c.value);
+            }
+        }
+    }
+}
+
+void testMulNegArgs()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* zero = root->appendNew(proc, Origin(), 0);
+    Value* added = root->appendNew(proc, Sub, Origin(), zero, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int64Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            CHECK(invoke(*code, a.value, b.value) == -(a.value * b.value));
+        }
+    }
+}
+
+void testMulNegArgs32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* zero = root->appendNew(proc, Origin(), 0);
+    Value* added = root->appendNew(proc, Sub, Origin(), zero, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int32Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            CHECK(invoke(*code, a.value, b.value) == -(a.value * b.value));
+        }
+    }
+}
+
+void testMulArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), value, value));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a * a));
+}
+
+void testMulArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), a * b));
+}
+
+void testMulArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a * b));
+}
+
+void testMulImmArgDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, b), a * b));
+}
+
+void testMulImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc), a * b));
+}
+
+void testMulArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Mul, Origin(), floatValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a * a)));
+}
+
+void testMulArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* result = root->appendNew(proc, Mul, Origin(), floatValue1, floatValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a * b)));
+}
+
+void testMulArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Mul, Origin(), floatValue, constValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a * b)));
+}
+
+void testMulImmArgFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Mul, Origin(), constValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(b)), bitwise_cast(a * b)));
+}
+
+void testMulImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* constValue1 = root->appendNew(proc, Origin(), a);
+    Value* constValue2 = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Mul, Origin(), constValue1, constValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(a * b)));
+}
+
+void testMulArgFloatWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argumentInt32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Mul, Origin(), asDouble, asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a * a)));
+}
+
+void testMulArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Mul, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a * b)));
+}
+
+void testMulArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Mul, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* doubleMulress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    root->appendNew(proc, Store, Origin(), result, doubleMulress);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), &effect), bitwise_cast(a * b)));
+    CHECK(isIdentical(effect, static_cast(a) * static_cast(b)));
+}
+
+void testDivArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Div, Origin(), value, value));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a / a));
+}
+
+void testDivArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Div, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), a / b));
+}
+
+void testDivArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Div, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a / b));
+}
+
+void testDivImmArgDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Div, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, b), a / b));
+}
+
+void testDivImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Div, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc), a / b));
+}
+
+void testDivArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Div, Origin(), floatValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a / a)));
+}
+
+void testDivArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* result = root->appendNew(proc, Div, Origin(), floatValue1, floatValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a / b)));
+}
+
+void testDivArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Div, Origin(), floatValue, constValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a / b)));
+}
+
+void testDivImmArgFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Div, Origin(), constValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(b)), bitwise_cast(a / b)));
+}
+
+void testDivImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* constValue1 = root->appendNew(proc, Origin(), a);
+    Value* constValue2 = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Div, Origin(), constValue1, constValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(a / b)));
+}
+
+void testModArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mod, Origin(), value, value));
+
+    CHECK(isIdentical(compileAndRun(proc, a), fmod(a, a)));
+}
+
+void testModArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mod, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), fmod(a, b)));
+}
+
+void testModArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mod, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a), fmod(a, b)));
+}
+
+void testModImmArgDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mod, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, b), fmod(a, b)));
+}
+
+void testModImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mod, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc), fmod(a, b)));
+}
+
+void testModArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Mod, Origin(), floatValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(fmod(a, a)))));
+}
+
+void testModArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* result = root->appendNew(proc, Mod, Origin(), floatValue1, floatValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(static_cast(fmod(a, b)))));
+}
+
+void testModArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Mod, Origin(), floatValue, constValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(fmod(a, b)))));
+}
+
+void testModImmArgFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Mod, Origin(), constValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(b)), bitwise_cast(static_cast(fmod(a, b)))));
+}
+
+void testModImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* constValue1 = root->appendNew(proc, Origin(), a);
+    Value* constValue2 = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Mod, Origin(), constValue1, constValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(static_cast(fmod(a, b)))));
+}
+
+void testDivArgFloatWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argumentInt32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Div, Origin(), asDouble, asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a / a)));
+}
+
+void testDivArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Div, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a / b)));
+}
+
+void testDivArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Div, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* doubleDivress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    root->appendNew(proc, Store, Origin(), result, doubleDivress);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), &effect), bitwise_cast(a / b)));
+    CHECK(isIdentical(effect, static_cast(a) / static_cast(b)));
+}
+
+void testUDivArgsInt32(uint32_t a, uint32_t b)
+{
+    // UDiv with denominator == 0 is invalid.
+    if (!b)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, UDiv, Origin(), argument1, argument2);
+    root->appendNew(proc, Return, Origin(), result);
+
+    CHECK_EQ(compileAndRun(proc, a, b), a / b);
+}
+
+void testUDivArgsInt64(uint64_t a, uint64_t b)
+{
+    // UDiv with denominator == 0 is invalid.
+    if (!b)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* result = root->appendNew(proc, UDiv, Origin(), argument1, argument2);
+    root->appendNew(proc, Return, Origin(), result);
+
+    CHECK_EQ(compileAndRun(proc, a, b), a / b);
+}
+
+void testUModArgsInt32(uint32_t a, uint32_t b)
+{
+    // UMod with denominator == 0 is invalid.
+    if (!b)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, UMod, Origin(), argument1, argument2);
+    root->appendNew(proc, Return, Origin(), result);
+
+    CHECK_EQ(compileAndRun(proc, a, b), a % b);
+}
+
+void testUModArgsInt64(uint64_t a, uint64_t b)
+{
+    // UMod with denominator == 0 is invalid.
+    if (!b)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* result = root->appendNew(proc, UMod, Origin(), argument1, argument2);
+    root->appendNew(proc, Return, Origin(), result);
+    
+    CHECK_EQ(compileAndRun(proc, a, b), a % b);
+}
+
+void testSubArg(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), value, value));
+
+    CHECK(!compileAndRun(proc, a));
+}
+
+void testSubArgs(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a - b);
+}
+
+void testSubArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == a - b);
+}
+
+void testNegValueSubOne(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* negArgument = root->appendNew(proc, Sub, Origin(),
+        root->appendNew(proc, Origin(), 0),
+        argument);
+    Value* negArgumentMinusOne = root->appendNew(proc, Sub, Origin(),
+        negArgument,
+        root->appendNew(proc, Origin(), 1));
+    root->appendNewControlValue(proc, Return, Origin(), negArgumentMinusOne);
+    CHECK(compileAndRun(proc, a) == -a - 1);
+}
+
+void testSubImmArg(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == a - b);
+}
+
+void testSubArgMem(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        load);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, a, &b) == a - b);
+}
+
+void testSubMemArg(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        load,
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t inputOutput = a;
+    CHECK(!compileAndRun(proc, &inputOutput, b));
+    CHECK(inputOutput == a - b);
+}
+
+void testSubImmMem(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        root->appendNew(proc, Origin(), a),
+        load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t inputOutput = b;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a - b);
+}
+
+void testSubMemImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        load,
+        root->appendNew(proc, Origin(), b));
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t inputOutput = a;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a - b);
+}
+
+
+void testSubArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == a - b);
+}
+
+void testSubArgImm32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == a - b);
+}
+
+void testSubImmArg32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, b) == a - b);
+}
+
+void testSubMemArg32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, Sub, Origin(), load, argument);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t inputOutput = a;
+    CHECK(!compileAndRun(proc, &inputOutput, b));
+    CHECK(inputOutput == a - b);
+}
+
+void testSubArgMem32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* result = root->appendNew(proc, Sub, Origin(), argument, load);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, a, &b) == a - b);
+}
+
+void testSubImmMem32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        root->appendNew(proc, Origin(), a),
+        load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t inputOutput = b;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a - b);
+}
+
+void testSubMemImm32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        load,
+        root->appendNew(proc, Origin(), b));
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t inputOutput = a;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a - b);
+}
+
+void testNegValueSubOne32(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* negArgument = root->appendNew(proc, Sub, Origin(),
+        root->appendNew(proc, Origin(), 0),
+        argument);
+    Value* negArgumentMinusOne = root->appendNew(proc, Sub, Origin(),
+        negArgument,
+        root->appendNew(proc, Origin(), 1));
+    root->appendNewControlValue(proc, Return, Origin(), negArgumentMinusOne);
+    CHECK(compileAndRun(proc, a) == -a - 1);
+}
+
+void testSubArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), value, value));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a - a));
+}
+
+void testSubArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), a - b));
+}
+
+void testSubArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a - b));
+}
+
+void testSubImmArgDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, b), a - b));
+}
+
+void testSubImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), valueA, valueB));
+    
+    CHECK(isIdentical(compileAndRun(proc), a - b));
+}
+
+void testSubArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Sub, Origin(), floatValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a - a)));
+}
+
+void testSubArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* result = root->appendNew(proc, Sub, Origin(), floatValue1, floatValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a - b)));
+}
+
+void testSubArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Sub, Origin(), floatValue, constValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a - b)));
+}
+
+void testSubImmArgFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Sub, Origin(), constValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(b)), bitwise_cast(a - b)));
+}
+
+void testSubImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* constValue1 = root->appendNew(proc, Origin(), a);
+    Value* constValue2 = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Sub, Origin(), constValue1, constValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(a - b)));
+}
+
+void testSubArgFloatWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argumentInt32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Sub, Origin(), asDouble, asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a - a)));
+}
+
+void testSubArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Sub, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a - b)));
+}
+
+void testSubArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Sub, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* doubleSubress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    root->appendNew(proc, Store, Origin(), result, doubleSubress);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), &effect), bitwise_cast(a - b)));
+    CHECK(isIdentical(effect, static_cast(a) - static_cast(b)));
+}
+
+void testTernarySubInstructionSelection(B3::Opcode valueModifier, Type valueType, Air::Opcode expectedOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* left = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* right = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+
+    if (valueModifier == Trunc) {
+        left = root->appendNew(proc, valueModifier, valueType, Origin(), left);
+        right = root->appendNew(proc, valueModifier, valueType, Origin(), right);
+    }
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), left, right));
+
+    lowerToAirForTesting(proc);
+
+    auto block = proc.code()[0];
+    unsigned numberOfSubInstructions = 0;
+    for (auto instruction : *block) {
+        if (instruction.kind.opcode == expectedOpcode) {
+            CHECK_EQ(instruction.args.size(), 3ul);
+            CHECK_EQ(instruction.args[0].kind(), Air::Arg::Tmp);
+            CHECK_EQ(instruction.args[1].kind(), Air::Arg::Tmp);
+            CHECK_EQ(instruction.args[2].kind(), Air::Arg::Tmp);
+            numberOfSubInstructions++;
+        }
+    }
+    CHECK_EQ(numberOfSubInstructions, 1ul);
+}
+
+void testNegDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Neg, Origin(),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), -a));
+}
+
+void testNegFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Neg, Origin(), floatValue));
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), -a));
+}
+
+void testNegFloatWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argumentInt32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Neg, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), -a));
+}
+
+void testBitAndArgs(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == (a & b));
+}
+
+void testBitAndSameArg(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            argument,
+            argument));
+
+    CHECK(compileAndRun(proc, a) == a);
+}
+
+void testBitAndImms(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a & b));
+}
+
+void testBitAndArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a & b));
+}
+
+void testBitAndImmArg(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == (a & b));
+}
+
+void testBitAndBitAndArgImmImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitAnd = root->appendNew(
+        proc, BitAnd, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            innerBitAnd,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a & b) & c));
+}
+
+void testBitAndImmBitAndArgImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitAnd = root->appendNew(
+        proc, BitAnd, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitAnd));
+
+    CHECK(compileAndRun(proc, b) == (a & (b & c)));
+}
+
+void testBitAndArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a & b));
+}
+
+void testBitAndSameArg32(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            argument,
+            argument));
+
+    CHECK(compileAndRun(proc, a) == a);
+}
+
+void testBitAndImms32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a & b));
+}
+
+void testBitAndArgImm32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a & b));
+}
+
+void testBitAndImmArg32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, b) == (a & b));
+}
+
+void testBitAndBitAndArgImmImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitAnd = root->appendNew(
+        proc, BitAnd, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            innerBitAnd,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a & b) & c));
+}
+
+void testBitAndImmBitAndArgImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitAnd = root->appendNew(
+        proc, BitAnd, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitAnd));
+
+    CHECK(compileAndRun(proc, b) == (a & (b & c)));
+}
+
+void testBitAndWithMaskReturnsBooleans(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* equal = root->appendNew(proc, Equal, Origin(), arg0, arg1);
+    Value* maskedEqual = root->appendNew(proc, BitAnd, Origin(),
+        root->appendNew(proc, Origin(), 0x5),
+        equal);
+    Value* inverted = root->appendNew(proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), 0x1),
+        maskedEqual);
+    Value* select = root->appendNew(proc, Select, Origin(), inverted,
+        root->appendNew(proc, Origin(), 42),
+        root->appendNew(proc, Origin(), -5));
+
+    root->appendNewControlValue(proc, Return, Origin(), select);
+
+    int64_t expected = (a == b) ? -5 : 42;
+    CHECK(compileAndRun(proc, a, b) == expected);
+}
+
+double bitAndDouble(double a, double b)
+{
+    return bitwise_cast(bitwise_cast(a) & bitwise_cast(b));
+}
+
+void testBitAndArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a), bitAndDouble(a, a)));
+}
+
+void testBitAndArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), bitAndDouble(a, b)));
+}
+
+void testBitAndArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), bitAndDouble(a, b)));
+}
+
+void testBitAndImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), a);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc), bitAndDouble(a, b)));
+}
+
+float bitAndFloat(float a, float b)
+{
+    return bitwise_cast(bitwise_cast(a) & bitwise_cast(b));
+}
+
+void testBitAndArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitAndFloat(a, a)));
+}
+
+void testBitAndArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* argumentB = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitAndFloat(a, b)));
+}
+
+void testBitAndArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitAndFloat(a, b)));
+}
+
+void testBitAndImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), a);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc), bitAndFloat(a, b)));
+}
+
+void testBitAndArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* argumentB = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    Value* argumentAasDouble = root->appendNew(proc, FloatToDouble, Origin(), argumentA);
+    Value* argumentBasDouble = root->appendNew(proc, FloatToDouble, Origin(), argumentB);
+    Value* doubleResult = root->appendNew(proc, BitAnd, Origin(), argumentAasDouble, argumentBasDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), doubleResult);
+    root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    double doubleA = a;
+    double doubleB = b;
+    float expected = static_cast(bitAndDouble(doubleA, doubleB));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), expected));
+}
+
+void testBitOrArgs(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == (a | b));
+}
+
+void testBitOrSameArg(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            argument,
+            argument));
+
+    CHECK(compileAndRun(proc, a) == a);
+}
+
+void testBitOrImms(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a | b));
+}
+
+void testBitOrArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a | b));
+}
+
+void testBitOrImmArg(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == (a | b));
+}
+
+void testBitOrBitOrArgImmImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitOr = root->appendNew(
+        proc, BitOr, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            innerBitOr,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a | b) | c));
+}
+
+void testBitOrImmBitOrArgImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitOr = root->appendNew(
+        proc, BitOr, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitOr));
+
+    CHECK(compileAndRun(proc, b) == (a | (b | c)));
+}
+
+void testBitOrArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a | b));
+}
+
+void testBitOrSameArg32(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(
+        proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            argument,
+            argument));
+
+    CHECK(compileAndRun(proc, a) == a);
+}
+
+void testBitOrImms32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a | b));
+}
+
+void testBitOrArgImm32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a | b));
+}
+
+void testBitOrImmArg32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, b) == (a | b));
+}
+
+void testBitOrBitOrArgImmImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitOr = root->appendNew(
+        proc, BitOr, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            innerBitOr,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a | b) | c));
+}
+
+void testBitOrImmBitOrArgImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitOr = root->appendNew(
+        proc, BitOr, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitOr));
+
+    CHECK(compileAndRun(proc, b) == (a | (b | c)));
+}
+
+double bitOrDouble(double a, double b)
+{
+    return bitwise_cast(bitwise_cast(a) | bitwise_cast(b));
+}
+
+void testBitOrArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* result = root->appendNew(proc, BitOr, Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a), bitOrDouble(a, a)));
+}
+
+void testBitOrArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    Value* result = root->appendNew(proc, BitOr, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), bitOrDouble(a, b)));
+}
+
+void testBitOrArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitOr, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), bitOrDouble(a, b)));
+}
+
+void testBitOrImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), a);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitOr, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+    
+    CHECK(isIdentical(compileAndRun(proc), bitOrDouble(a, b)));
+}
+
+float bitOrFloat(float a, float b)
+{
+    return bitwise_cast(bitwise_cast(a) | bitwise_cast(b));
+}
+
+void testBitOrArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* result = root->appendNew(proc, BitOr, Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitOrFloat(a, a)));
+}
+
+void testBitOrArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* argumentB = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    Value* result = root->appendNew(proc, BitOr, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitOrFloat(a, b)));
+}
+
+void testBitOrArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitOr, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitOrFloat(a, b)));
+}
+
+void testBitOrImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), a);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitOr, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc), bitOrFloat(a, b)));
+}
+
+void testBitOrArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* argumentB = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    Value* argumentAasDouble = root->appendNew(proc, FloatToDouble, Origin(), argumentA);
+    Value* argumentBasDouble = root->appendNew(proc, FloatToDouble, Origin(), argumentB);
+    Value* doubleResult = root->appendNew(proc, BitOr, Origin(), argumentAasDouble, argumentBasDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), doubleResult);
+    root->appendNewControlValue(proc, Return, Origin(), floatResult);
+    
+    double doubleA = a;
+    double doubleB = b;
+    float expected = static_cast(bitOrDouble(doubleA, doubleB));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), expected));
+}
+
+void testBitXorArgs(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == (a ^ b));
+}
+
+void testBitXorSameArg(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            argument,
+            argument));
+
+    CHECK(!compileAndRun(proc, a));
+}
+
+void testBitXorImms(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a ^ b));
+}
+
+void testBitXorArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a ^ b));
+}
+
+void testBitXorImmArg(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == (a ^ b));
+}
+
+void testBitXorBitXorArgImmImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitXor = root->appendNew(
+        proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            innerBitXor,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a ^ b) ^ c));
+}
+
+void testBitXorImmBitXorArgImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitXor = root->appendNew(
+        proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitXor));
+
+    CHECK(compileAndRun(proc, b) == (a ^ (b ^ c)));
+}
+
+void testBitXorArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a ^ b));
+}
+
+void testBitXorSameArg32(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(
+        proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            argument,
+            argument));
+
+    CHECK(!compileAndRun(proc, a));
+}
+
+void testBitXorImms32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a ^ b));
+}
+
+void testBitXorArgImm32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a ^ b));
+}
+
+void testBitXorImmArg32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, b) == (a ^ b));
+}
+
+void testBitXorBitXorArgImmImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitXor = root->appendNew(
+        proc, BitXor, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            innerBitXor,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a ^ b) ^ c));
+}
+
+void testBitXorImmBitXorArgImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitXor = root->appendNew(
+        proc, BitXor, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitXor));
+
+    CHECK(compileAndRun(proc, b) == (a ^ (b ^ c)));
+}
+
+void testBitNotArg(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), -1),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), static_cast((static_cast(a) ^ 0xffffffffffffffff))));
+}
+
+void testBitNotImm(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), -1),
+            root->appendNew(proc, Origin(), a)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), static_cast((static_cast(a) ^ 0xffffffffffffffff))));
+}
+
+void testBitNotMem(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* notLoad = root->appendNew(proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), -1),
+        load);
+    root->appendNew(proc, Store, Origin(), notLoad, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t input = a;
+    compileAndRun(proc, &input);
+    CHECK(isIdentical(input, static_cast((static_cast(a) ^ 0xffffffffffffffff))));
+}
+
+void testBitNotArg32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), -1),
+            argument));
+    CHECK(isIdentical(compileAndRun(proc, a), static_cast((static_cast(a) ^ 0xffffffff))));
+}
+
+void testBitNotImm32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), -1),
+            root->appendNew(proc, Origin(), a)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), static_cast((static_cast(a) ^ 0xffffffff))));
+}
+
+void testBitNotMem32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* notLoad = root->appendNew(proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), -1),
+        load);
+    root->appendNew(proc, Store, Origin(), notLoad, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t input = a;
+    compileAndRun(proc, &input);
+    CHECK(isIdentical(input, static_cast((static_cast(a) ^ 0xffffffff))));
+}
+
+void testBitNotOnBooleanAndBranch32(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* argsAreEqual = root->appendNew(proc, Equal, Origin(), arg1, arg2);
+    Value* argsAreNotEqual = root->appendNew(proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), -1),
+        argsAreEqual);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        argsAreNotEqual,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -42));
+
+    int32_t expectedValue = (a != b) ? 42 : -42;
+    CHECK(compileAndRun(proc, a, b) == expectedValue);
+}
+
+void testShlArgs(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a << b));
+}
+
+void testShlImms(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a << b));
+}
+
+void testShlArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a << b));
+}
+
+void testShlArg32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Shl, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == (a << a));
+}
+
+void testShlArgs32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a << b));
+}
+
+void testShlImms32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a << b));
+}
+
+void testShlArgImm32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a << b));
+}
+
+void testSShrArgs(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a >> b));
+}
+
+void testSShrImms(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a >> b));
+}
+
+void testSShrArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a >> b));
+}
+
+void testSShrArg32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, SShr, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == (a >> (a & 31)));
+}
+
+void testSShrArgs32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a >> b));
+}
+
+void testSShrImms32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a >> b));
+}
+
+void testSShrArgImm32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a >> b));
+}
+
+void testZShrArgs(uint64_t a, uint64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a >> b));
+}
+
+void testZShrImms(uint64_t a, uint64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a >> b));
+}
+
+void testZShrArgImm(uint64_t a, uint64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a >> b));
+}
+
+void testZShrArg32(uint32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, ZShr, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == (a >> (a & 31)));
+}
+
+void testZShrArgs32(uint32_t a, uint32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a >> b));
+}
+
+void testZShrImms32(uint32_t a, uint32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a >> b));
+}
+
+void testZShrArgImm32(uint32_t a, uint32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a >> b));
+}
+
+template
+static unsigned countLeadingZero(IntegerType value)
+{
+    unsigned bitCount = sizeof(IntegerType) * 8;
+    if (!value)
+        return bitCount;
+
+    unsigned counter = 0;
+    while (!(static_cast(value) & (1l << (bitCount - 1)))) {
+        value <<= 1;
+        ++counter;
+    }
+    return counter;
+}
+
+void testClzArg64(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* clzValue = root->appendNew(proc, Clz, Origin(), argument);
+    root->appendNewControlValue(proc, Return, Origin(), clzValue);
+    CHECK(compileAndRun(proc, a) == countLeadingZero(a));
+}
+
+void testClzMem64(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* value = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* clzValue = root->appendNew(proc, Clz, Origin(), value);
+    root->appendNewControlValue(proc, Return, Origin(), clzValue);
+    CHECK(compileAndRun(proc, &a) == countLeadingZero(a));
+}
+
+void testClzArg32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* clzValue = root->appendNew(proc, Clz, Origin(), argument);
+    root->appendNewControlValue(proc, Return, Origin(), clzValue);
+    CHECK(compileAndRun(proc, a) == countLeadingZero(a));
+}
+
+void testClzMem32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* value = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* clzValue = root->appendNew(proc, Clz, Origin(), value);
+    root->appendNewControlValue(proc, Return, Origin(), clzValue);
+    CHECK(compileAndRun(proc, &a) == countLeadingZero(a));
+}
+
+void testAbsArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Abs, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), fabs(a)));
+}
+
+void testAbsImm(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Abs, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), fabs(a)));
+}
+
+void testAbsMem(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Abs, Origin(), loadDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, &a), fabs(a)));
+}
+
+void testAbsAbsArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* firstAbs = root->appendNew(proc, Abs, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* secondAbs = root->appendNew(proc, Abs, Origin(), firstAbs);
+    root->appendNewControlValue(proc, Return, Origin(), secondAbs);
+
+    CHECK(isIdentical(compileAndRun(proc, a), fabs(a)));
+}
+
+void testAbsBitwiseCastArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentAsInt64 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argumentAsDouble = root->appendNew(proc, BitwiseCast, Origin(), argumentAsInt64);
+    Value* absValue = root->appendNew(proc, Abs, Origin(), argumentAsDouble);
+    root->appendNewControlValue(proc, Return, Origin(), absValue);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), fabs(a)));
+}
+
+void testBitwiseCastAbsBitwiseCastArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentAsInt64 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argumentAsDouble = root->appendNew(proc, BitwiseCast, Origin(), argumentAsInt64);
+    Value* absValue = root->appendNew(proc, Abs, Origin(), argumentAsDouble);
+    Value* resultAsInt64 = root->appendNew(proc, BitwiseCast, Origin(), absValue);
+
+    root->appendNewControlValue(proc, Return, Origin(), resultAsInt64);
+
+    int64_t expectedResult = bitwise_cast(fabs(a));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), expectedResult));
+}
+
+void testAbsArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Abs, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(fabs(a)))));
+}
+
+void testAbsImm(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Abs, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(fabs(a)))));
+}
+
+void testAbsMem(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* result = root->appendNew(proc, Abs, Origin(), loadFloat);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, &a), bitwise_cast(static_cast(fabs(a)))));
+}
+
+void testAbsAbsArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* firstAbs = root->appendNew(proc, Abs, Origin(), argument);
+    Value* secondAbs = root->appendNew(proc, Abs, Origin(), firstAbs);
+    root->appendNewControlValue(proc, Return, Origin(), secondAbs);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), static_cast(fabs(a))));
+}
+
+void testAbsBitwiseCastArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentAsInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentAsfloat = root->appendNew(proc, BitwiseCast, Origin(), argumentAsInt32);
+    Value* absValue = root->appendNew(proc, Abs, Origin(), argumentAsfloat);
+    root->appendNewControlValue(proc, Return, Origin(), absValue);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), static_cast(fabs(a))));
+}
+
+void testBitwiseCastAbsBitwiseCastArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentAsInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentAsfloat = root->appendNew(proc, BitwiseCast, Origin(), argumentAsInt32);
+    Value* absValue = root->appendNew(proc, Abs, Origin(), argumentAsfloat);
+    Value* resultAsInt64 = root->appendNew(proc, BitwiseCast, Origin(), absValue);
+
+    root->appendNewControlValue(proc, Return, Origin(), resultAsInt64);
+
+    int32_t expectedResult = bitwise_cast(static_cast(fabs(a)));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), expectedResult));
+}
+
+void testAbsArgWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Abs, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(fabs(a)))));
+}
+
+void testAbsArgWithEffectfulDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Abs, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    Value* doubleAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNew(proc, Store, Origin(), result, doubleAddress);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    int32_t resultValue = compileAndRun(proc, bitwise_cast(a), &effect);
+    CHECK(isIdentical(resultValue, bitwise_cast(static_cast(fabs(a)))));
+    CHECK(isIdentical(effect, static_cast(fabs(a))));
+}
+
+void testCeilArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Ceil, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), ceil(a)));
+}
+
+void testCeilImm(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Ceil, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), ceil(a)));
+}
+
+void testCeilMem(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Ceil, Origin(), loadDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, &a), ceil(a)));
+}
+
+void testCeilCeilArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* firstCeil = root->appendNew(proc, Ceil, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* secondCeil = root->appendNew(proc, Ceil, Origin(), firstCeil);
+    root->appendNewControlValue(proc, Return, Origin(), secondCeil);
+
+    CHECK(isIdentical(compileAndRun(proc, a), ceil(a)));
+}
+
+void testFloorCeilArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* firstCeil = root->appendNew(proc, Ceil, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* wrappingFloor = root->appendNew(proc, Floor, Origin(), firstCeil);
+    root->appendNewControlValue(proc, Return, Origin(), wrappingFloor);
+
+    CHECK(isIdentical(compileAndRun(proc, a), ceil(a)));
+}
+
+void testCeilIToD64(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argumentAsDouble = root->appendNew(proc, IToD, Origin(), argument);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Ceil, Origin(), argumentAsDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, a), ceil(static_cast(a))));
+}
+
+void testCeilIToD32(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentAsDouble = root->appendNew(proc, IToD, Origin(), argument);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Ceil, Origin(), argumentAsDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, a), ceil(static_cast(a))));
+}
+
+void testCeilArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Ceil, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(ceilf(a))));
+}
+
+void testCeilImm(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Ceil, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(ceilf(a))));
+}
+
+void testCeilMem(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* result = root->appendNew(proc, Ceil, Origin(), loadFloat);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, &a), bitwise_cast(ceilf(a))));
+}
+
+void testCeilCeilArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* firstCeil = root->appendNew(proc, Ceil, Origin(), argument);
+    Value* secondCeil = root->appendNew(proc, Ceil, Origin(), firstCeil);
+    root->appendNewControlValue(proc, Return, Origin(), secondCeil);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), ceilf(a)));
+}
+
+void testFloorCeilArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* firstCeil = root->appendNew(proc, Ceil, Origin(), argument);
+    Value* wrappingFloor = root->appendNew(proc, Floor, Origin(), firstCeil);
+    root->appendNewControlValue(proc, Return, Origin(), wrappingFloor);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), ceilf(a)));
+}
+
+void testCeilArgWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Ceil, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(ceilf(a))));
+}
+
+void testCeilArgWithEffectfulDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Ceil, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    Value* doubleAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNew(proc, Store, Origin(), result, doubleAddress);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    int32_t resultValue = compileAndRun(proc, bitwise_cast(a), &effect);
+    CHECK(isIdentical(resultValue, bitwise_cast(ceilf(a))));
+    CHECK(isIdentical(effect, static_cast(ceilf(a))));
+}
+
+void testFloorArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Floor, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), floor(a)));
+}
+
+void testFloorImm(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Floor, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), floor(a)));
+}
+
+void testFloorMem(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Floor, Origin(), loadDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, &a), floor(a)));
+}
+
+void testFloorFloorArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* firstFloor = root->appendNew(proc, Floor, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* secondFloor = root->appendNew(proc, Floor, Origin(), firstFloor);
+    root->appendNewControlValue(proc, Return, Origin(), secondFloor);
+
+    CHECK(isIdentical(compileAndRun(proc, a), floor(a)));
+}
+
+void testCeilFloorArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* firstFloor = root->appendNew(proc, Floor, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* wrappingCeil = root->appendNew(proc, Ceil, Origin(), firstFloor);
+    root->appendNewControlValue(proc, Return, Origin(), wrappingCeil);
+
+    CHECK(isIdentical(compileAndRun(proc, a), floor(a)));
+}
+
+void testFloorIToD64(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argumentAsDouble = root->appendNew(proc, IToD, Origin(), argument);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Floor, Origin(), argumentAsDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, a), floor(static_cast(a))));
+}
+
+void testFloorIToD32(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentAsDouble = root->appendNew(proc, IToD, Origin(), argument);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Floor, Origin(), argumentAsDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, a), floor(static_cast(a))));
+}
+
+void testFloorArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Floor, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(floorf(a))));
+}
+
+void testFloorImm(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Floor, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(floorf(a))));
+}
+
+void testFloorMem(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* result = root->appendNew(proc, Floor, Origin(), loadFloat);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, &a), bitwise_cast(floorf(a))));
+}
+
+void testFloorFloorArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* firstFloor = root->appendNew(proc, Floor, Origin(), argument);
+    Value* secondFloor = root->appendNew(proc, Floor, Origin(), firstFloor);
+    root->appendNewControlValue(proc, Return, Origin(), secondFloor);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), floorf(a)));
+}
+
+void testCeilFloorArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* firstFloor = root->appendNew(proc, Floor, Origin(), argument);
+    Value* wrappingCeil = root->appendNew(proc, Ceil, Origin(), firstFloor);
+    root->appendNewControlValue(proc, Return, Origin(), wrappingCeil);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), floorf(a)));
+}
+
+void testFloorArgWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Floor, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(floorf(a))));
+}
+
+void testFloorArgWithEffectfulDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Floor, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    Value* doubleAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNew(proc, Store, Origin(), result, doubleAddress);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    int32_t resultValue = compileAndRun(proc, bitwise_cast(a), &effect);
+    CHECK(isIdentical(resultValue, bitwise_cast(floorf(a))));
+    CHECK(isIdentical(effect, static_cast(floorf(a))));
+}
+
+void testSqrtArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sqrt, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), sqrt(a)));
+}
+
+void testSqrtImm(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sqrt, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), sqrt(a)));
+}
+
+void testSqrtMem(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sqrt, Origin(), loadDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, &a), sqrt(a)));
+}
+
+void testSqrtArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Sqrt, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(sqrt(a)))));
+}
+
+void testSqrtImm(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Sqrt, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(sqrt(a)))));
+}
+
+void testSqrtMem(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* result = root->appendNew(proc, Sqrt, Origin(), loadFloat);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, &a), bitwise_cast(static_cast(sqrt(a)))));
+}
+
+void testSqrtArgWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Sqrt, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(sqrt(a)))));
+}
+
+void testSqrtArgWithEffectfulDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Sqrt, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    Value* doubleAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNew(proc, Store, Origin(), result, doubleAddress);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    int32_t resultValue = compileAndRun(proc, bitwise_cast(a), &effect);
+    CHECK(isIdentical(resultValue, bitwise_cast(static_cast(sqrt(a)))));
+    CHECK(isIdentical(effect, static_cast(sqrt(a))));
+}
+
+void testCompareTwoFloatToDouble(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg1As32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1Float = root->appendNew(proc, BitwiseCast, Origin(), arg1As32);
+    Value* arg1AsDouble = root->appendNew(proc, FloatToDouble, Origin(), arg1Float);
+
+    Value* arg2As32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg2Float = root->appendNew(proc, BitwiseCast, Origin(), arg2As32);
+    Value* arg2AsDouble = root->appendNew(proc, FloatToDouble, Origin(), arg2Float);
+    Value* equal = root->appendNew(proc, Equal, Origin(), arg1AsDouble, arg2AsDouble);
+
+    root->appendNewControlValue(proc, Return, Origin(), equal);
+
+    CHECK(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)) == (a == b));
+}
+
+void testCompareOneFloatToDouble(float a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg1As32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1Float = root->appendNew(proc, BitwiseCast, Origin(), arg1As32);
+    Value* arg1AsDouble = root->appendNew(proc, FloatToDouble, Origin(), arg1Float);
+
+    Value* arg2AsDouble = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* equal = root->appendNew(proc, Equal, Origin(), arg1AsDouble, arg2AsDouble);
+
+    root->appendNewControlValue(proc, Return, Origin(), equal);
+
+    CHECK(compileAndRun(proc, bitwise_cast(a), b) == (a == b));
+}
+
+void testCompareFloatToDoubleThroughPhi(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    Value* arg1As32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg1Float = root->appendNew(proc, BitwiseCast, Origin(), arg1As32);
+    Value* arg1AsDouble = root->appendNew(proc, FloatToDouble, Origin(), arg1Float);
+
+    Value* arg2AsDouble = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* arg2AsFloat = root->appendNew(proc, DoubleToFloat, Origin(), arg2AsDouble);
+    Value* arg2AsFRoundedDouble = root->appendNew(proc, FloatToDouble, Origin(), arg2AsFloat);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(), arg1AsDouble);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* elseConst = elseCase->appendNew(proc, Origin(), 0.);
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), elseConst);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* doubleInput = tail->appendNew(proc, Phi, Double, Origin());
+    thenValue->setPhi(doubleInput);
+    elseValue->setPhi(doubleInput);
+    Value* equal = tail->appendNew(proc, Equal, Origin(), doubleInput, arg2AsFRoundedDouble);
+    tail->appendNewControlValue(proc, Return, Origin(), equal);
+
+    auto code = compile(proc);
+    int32_t integerA = bitwise_cast(a);
+    double doubleB = b;
+    CHECK(invoke(*code, 1, integerA, doubleB) == (a == b));
+    CHECK(invoke(*code, 0, integerA, doubleB) == (b == 0));
+}
+
+void testDoubleToFloatThroughPhi(float value)
+{
+    // Simple case of:
+    //     if (a) {
+    //         x = DoubleAdd(a, b)
+    //     else
+    //         x = DoubleAdd(a, c)
+    //     DoubleToFloat(x)
+    //
+    // Both Adds can be converted to float add.
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* argAsDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    Value* postitiveConst = thenCase->appendNew(proc, Origin(), 42.5f);
+    Value* thenAdd = thenCase->appendNew(proc, Add, Origin(), argAsDouble, postitiveConst);
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(), thenAdd);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* elseConst = elseCase->appendNew(proc, Origin(), M_PI);
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), elseConst);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* doubleInput = tail->appendNew(proc, Phi, Double, Origin());
+    thenValue->setPhi(doubleInput);
+    elseValue->setPhi(doubleInput);
+    Value* floatResult = tail->appendNew(proc, DoubleToFloat, Origin(), doubleInput);
+    tail->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    auto code = compile(proc);
+    CHECK(isIdentical(invoke(*code, 1, bitwise_cast(value)), value + 42.5f));
+    CHECK(isIdentical(invoke(*code, 0, bitwise_cast(value)), static_cast(M_PI)));
+}
+
+void testReduceFloatToDoubleValidates()
+{
+    // Simple case of:
+    //     f = DoubleToFloat(Bitcast(argGPR0))
+    //     if (a) {
+    //         x = FloatConst()
+    //     else
+    //         x = FloatConst()
+    //     p = Phi(x)
+    //     a = Mul(p, p)
+    //     b = Add(a, f)
+    //     c = Add(p, b)
+    //     Return(c)
+    //
+    // This should not crash in the validator after ReduceFloatToDouble.
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* thingy = root->appendNew(proc, BitwiseCast, Origin(), condition);
+    thingy = root->appendNew(proc, DoubleToFloat, Origin(), thingy); // Make the phase think it has work to do.
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(),
+        thenCase->appendNew(proc, Origin(), 11.5));
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), 
+        elseCase->appendNew(proc, Origin(), 10.5));
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* phi =  tail->appendNew(proc, Phi, Float, Origin());
+    thenValue->setPhi(phi);
+    elseValue->setPhi(phi);
+    Value* result = tail->appendNew(proc, Mul, Origin(), 
+            phi, phi);
+    result = tail->appendNew(proc, Add, Origin(), 
+            result,
+            thingy);
+    result = tail->appendNew(proc, Add, Origin(), 
+            phi,
+            result);
+    tail->appendNewControlValue(proc, Return, Origin(), result);
+
+    auto code = compile(proc);
+    CHECK(isIdentical(invoke(*code, 1), 11.5f * 11.5f + static_cast(bitwise_cast(static_cast(1))) + 11.5f));
+    CHECK(isIdentical(invoke(*code, 0), 10.5f * 10.5f + static_cast(bitwise_cast(static_cast(0))) + 10.5f));
+}
+
+void testDoubleProducerPhiToFloatConversion(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    Value* asDouble = thenCase->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(), asDouble);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* constDouble = elseCase->appendNew(proc, Origin(), 42.5);
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), constDouble);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* doubleInput = tail->appendNew(proc, Phi, Double, Origin());
+    thenValue->setPhi(doubleInput);
+    elseValue->setPhi(doubleInput);
+
+    Value* argAsDoubleAgain = tail->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* finalAdd = tail->appendNew(proc, Add, Origin(), doubleInput, argAsDoubleAgain);
+    Value* floatResult = tail->appendNew(proc, DoubleToFloat, Origin(), finalAdd);
+    tail->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    auto code = compile(proc);
+    CHECK(isIdentical(invoke(*code, 1, bitwise_cast(value)), value + value));
+    CHECK(isIdentical(invoke(*code, 0, bitwise_cast(value)), 42.5f + value));
+}
+
+void testDoubleProducerPhiToFloatConversionWithDoubleConsumer(float value)
+{
+    // In this case, the Upsilon-Phi effectively contains a Float value, but it is used
+    // as a Float and as a Double.
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    Value* asDouble = thenCase->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(), asDouble);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* constDouble = elseCase->appendNew(proc, Origin(), 42.5);
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), constDouble);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* doubleInput = tail->appendNew(proc, Phi, Double, Origin());
+    thenValue->setPhi(doubleInput);
+    elseValue->setPhi(doubleInput);
+
+    Value* argAsDoubleAgain = tail->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* floatAdd = tail->appendNew(proc, Add, Origin(), doubleInput, argAsDoubleAgain);
+
+    // FRound.
+    Value* floatResult = tail->appendNew(proc, DoubleToFloat, Origin(), floatAdd);
+    Value* doubleResult = tail->appendNew(proc, FloatToDouble, Origin(), floatResult);
+
+    // This one *cannot* be eliminated
+    Value* doubleAdd = tail->appendNew(proc, Add, Origin(), doubleInput, doubleResult);
+
+    tail->appendNewControlValue(proc, Return, Origin(), doubleAdd);
+
+    auto code = compile(proc);
+    CHECK(isIdentical(invoke(*code, 1, bitwise_cast(value)), (value + value) + static_cast(value)));
+    CHECK(isIdentical(invoke(*code, 0, bitwise_cast(value)), static_cast((42.5f + value) + 42.5f)));
+}
+
+void testDoubleProducerPhiWithNonFloatConst(float value, double constValue)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    Value* asDouble = thenCase->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(), asDouble);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* constDouble = elseCase->appendNew(proc, Origin(), constValue);
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), constDouble);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* doubleInput = tail->appendNew(proc, Phi, Double, Origin());
+    thenValue->setPhi(doubleInput);
+    elseValue->setPhi(doubleInput);
+
+    Value* argAsDoubleAgain = tail->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* finalAdd = tail->appendNew(proc, Add, Origin(), doubleInput, argAsDoubleAgain);
+    Value* floatResult = tail->appendNew(proc, DoubleToFloat, Origin(), finalAdd);
+    tail->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    auto code = compile(proc);
+    CHECK(isIdentical(invoke(*code, 1, bitwise_cast(value)), value + value));
+    CHECK(isIdentical(invoke(*code, 0, bitwise_cast(value)), static_cast(constValue + value)));
+}
+
+void testDoubleArgToInt64BitwiseCast(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc, value), bitwise_cast(value)));
+}
+
+void testDoubleImmToInt64BitwiseCast(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(value)));
+}
+
+void testTwoBitwiseCastOnDouble(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* first = root->appendNew(proc, BitwiseCast, Origin(), argument);
+    Value* second = root->appendNew(proc, BitwiseCast, Origin(), first);
+    root->appendNewControlValue(proc, Return, Origin(), second);
+
+    CHECK(isIdentical(compileAndRun(proc, value), value));
+}
+
+void testBitwiseCastOnDoubleInMemory(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadDouble);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), bitwise_cast(value)));
+}
+
+void testBitwiseCastOnDoubleInMemoryIndexed(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* scaledOffset = root->appendNew(proc, Shl, Origin(),
+        offset,
+        root->appendNew(proc, Origin(), 3));
+    Value* address = root->appendNew(proc, Add, Origin(), base, scaledOffset);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadDouble);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value, 0), bitwise_cast(value)));
+}
+
+void testInt64BArgToDoubleBitwiseCast(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc, value), bitwise_cast(value)));
+}
+
+void testInt64BImmToDoubleBitwiseCast(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(value)));
+}
+
+void testTwoBitwiseCastOnInt64(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* first = root->appendNew(proc, BitwiseCast, Origin(), argument);
+    Value* second = root->appendNew(proc, BitwiseCast, Origin(), first);
+    root->appendNewControlValue(proc, Return, Origin(), second);
+
+    CHECK(isIdentical(compileAndRun(proc, value), value));
+}
+
+void testBitwiseCastOnInt64InMemory(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadDouble);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), bitwise_cast(value)));
+}
+
+void testBitwiseCastOnInt64InMemoryIndexed(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* scaledOffset = root->appendNew(proc, Shl, Origin(),
+        offset,
+        root->appendNew(proc, Origin(), 3));
+    Value* address = root->appendNew(proc, Add, Origin(), base, scaledOffset);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadDouble);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value, 0), bitwise_cast(value)));
+}
+
+void testFloatImmToInt32BitwiseCast(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(value)));
+}
+
+void testBitwiseCastOnFloatInMemory(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadFloat);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), bitwise_cast(value)));
+}
+
+void testInt32BArgToFloatBitwiseCast(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc, value), bitwise_cast(value)));
+}
+
+void testInt32BImmToFloatBitwiseCast(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(value)));
+}
+
+void testTwoBitwiseCastOnInt32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* first = root->appendNew(proc, BitwiseCast, Origin(), argument);
+    Value* second = root->appendNew(proc, BitwiseCast, Origin(), first);
+    root->appendNewControlValue(proc, Return, Origin(), second);
+
+    CHECK(isIdentical(compileAndRun(proc, value), value));
+}
+
+void testBitwiseCastOnInt32InMemory(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadFloat);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), bitwise_cast(value)));
+}
+
+void testConvertDoubleToFloatArg(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+    root->appendNewControlValue(proc, Return, Origin(), asFloat);
+
+    CHECK(isIdentical(compileAndRun(proc, value), static_cast(value)));
+}
+
+void testConvertDoubleToFloatImm(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+    root->appendNewControlValue(proc, Return, Origin(), asFloat);
+
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testConvertDoubleToFloatMem(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), loadedDouble);
+    root->appendNewControlValue(proc, Return, Origin(), asFloat);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), static_cast(value)));
+}
+
+void testConvertFloatToDoubleArg(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(value)), static_cast(value)));
+}
+
+void testConvertFloatToDoubleImm(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), argument);
+    root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testConvertFloatToDoubleMem(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), loadedFloat);
+    root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), static_cast(value)));
+}
+
+void testConvertDoubleToFloatToDoubleToFloat(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), asFloat);
+    Value* asFloatAgain = root->appendNew(proc, DoubleToFloat, Origin(), asDouble);
+    root->appendNewControlValue(proc, Return, Origin(), asFloatAgain);
+
+    CHECK(isIdentical(compileAndRun(proc, value), static_cast(value)));
+}
+
+void testLoadFloatConvertDoubleConvertFloatStoreFloat(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* dst = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    MemoryValue* loadedFloat = root->appendNew(proc, Load, Float, Origin(), src);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), loadedFloat);
+    Value* asFloatAgain = root->appendNew(proc, DoubleToFloat, Origin(), asDouble);
+    root->appendNew(proc, Store, Origin(), asFloatAgain, dst);
+
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    float input = value;
+    float output = 0.;
+    CHECK(!compileAndRun(proc, &input, &output));
+    CHECK(isIdentical(input, output));
+}
+
+void testFroundArg(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), asFloat);
+    root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+    CHECK(isIdentical(compileAndRun(proc, value), static_cast(static_cast(value))));
+}
+
+void testFroundMem(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), loadedDouble);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), asFloat);
+    root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), static_cast(static_cast(value))));
+}
+
+void testIToD64Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+    auto code = compile(proc);
+    for (auto testValue : int64Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testIToF64Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* srcAsFloat = root->appendNew(proc, IToF, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+    auto code = compile(proc);
+    for (auto testValue : int64Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testIToD32Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+    auto code = compile(proc);
+    for (auto testValue : int32Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testIToF32Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* srcAsFloat = root->appendNew(proc, IToF, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+    auto code = compile(proc);
+    for (auto testValue : int32Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testIToD64Mem()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedSrc = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), loadedSrc);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+    auto code = compile(proc);
+    int64_t inMemoryValue;
+    for (auto testValue : int64Operands()) {
+        inMemoryValue = testValue.value;
+        CHECK(isIdentical(invoke(*code, &inMemoryValue), static_cast(testValue.value)));
+        CHECK(inMemoryValue == testValue.value);
+    }
+}
+
+void testIToF64Mem()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedSrc = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* srcAsFloat = root->appendNew(proc, IToF, Origin(), loadedSrc);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+    auto code = compile(proc);
+    int64_t inMemoryValue;
+    for (auto testValue : int64Operands()) {
+        inMemoryValue = testValue.value;
+        CHECK(isIdentical(invoke(*code, &inMemoryValue), static_cast(testValue.value)));
+        CHECK(inMemoryValue == testValue.value);
+    }
+}
+
+void testIToD32Mem()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedSrc = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), loadedSrc);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+    auto code = compile(proc);
+    int32_t inMemoryValue;
+    for (auto testValue : int32Operands()) {
+        inMemoryValue = testValue.value;
+        CHECK(isIdentical(invoke(*code, &inMemoryValue), static_cast(testValue.value)));
+        CHECK(inMemoryValue == testValue.value);
+    }
+}
+
+void testIToF32Mem()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedSrc = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* srcAsFloat = root->appendNew(proc, IToF, Origin(), loadedSrc);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+    auto code = compile(proc);
+    int32_t inMemoryValue;
+    for (auto testValue : int32Operands()) {
+        inMemoryValue = testValue.value;
+        CHECK(isIdentical(invoke(*code, &inMemoryValue), static_cast(testValue.value)));
+        CHECK(inMemoryValue == testValue.value);
+    }
+}
+
+void testIToD64Imm(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), value);
+    Value* srcAsFloatingPoint = root->appendNew(proc, IToD, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testIToF64Imm(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), value);
+    Value* srcAsFloatingPoint = root->appendNew(proc, IToF, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testIToD32Imm(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), value);
+    Value* srcAsFloatingPoint = root->appendNew(proc, IToD, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testIToF32Imm(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), value);
+    Value* srcAsFloatingPoint = root->appendNew(proc, IToF, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testIToDReducedToIToF64Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), src);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), srcAsDouble);
+    root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    auto code = compile(proc);
+    for (auto testValue : int64Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testIToDReducedToIToF32Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), src);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), srcAsDouble);
+    root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    auto code = compile(proc);
+    for (auto testValue : int32Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testStore32(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 0xbaadbeef;
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), &slot));
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, value));
+    CHECK(slot == value);
+}
+
+void testStoreConstant(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 0xbaadbeef;
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(proc, Origin(), value),
+        root->appendNew(proc, Origin(), &slot));
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == value);
+}
+
+void testStoreConstantPtr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    intptr_t slot;
+    if (is64Bit())
+        slot = (static_cast(0xbaadbeef) << 32) + static_cast(0xbaadbeef);
+    else
+        slot = 0xbaadbeef;
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(proc, Origin(), value),
+        root->appendNew(proc, Origin(), &slot));
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == value);
+}
+
+void testStore8Arg()
+{
+    { // Direct addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+
+        root->appendNew(proc, Store8, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int8_t storage = 0;
+        CHECK(compileAndRun(proc, 42, &storage) == 42);
+        CHECK(storage == 42);
+    }
+
+    { // Indexed addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+        Value* displacement = root->appendNew(proc, Origin(), -1);
+
+        Value* baseDisplacement = root->appendNew(proc, Add, Origin(), displacement, base);
+        Value* address = root->appendNew(proc, Add, Origin(), baseDisplacement, offset);
+
+        root->appendNew(proc, Store8, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int8_t storage = 0;
+        CHECK(compileAndRun(proc, 42, &storage, 1) == 42);
+        CHECK(storage == 42);
+    }
+}
+
+void testStore8Imm()
+{
+    { // Direct addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Origin(), 42);
+        Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+        root->appendNew(proc, Store8, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int8_t storage = 0;
+        CHECK(compileAndRun(proc, &storage) == 42);
+        CHECK(storage == 42);
+    }
+
+    { // Indexed addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Origin(), 42);
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* displacement = root->appendNew(proc, Origin(), -1);
+
+        Value* baseDisplacement = root->appendNew(proc, Add, Origin(), displacement, base);
+        Value* address = root->appendNew(proc, Add, Origin(), baseDisplacement, offset);
+
+        root->appendNew(proc, Store8, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int8_t storage = 0;
+        CHECK(compileAndRun(proc, &storage, 1) == 42);
+        CHECK(storage == 42);
+    }
+}
+
+void testStorePartial8BitRegisterOnX86()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    // We want to have this in ECX.
+    Value* returnValue = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    // We want this suck in EDX.
+    Value* whereToStore = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+
+    // The patch point is there to help us force the hand of the compiler.
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+
+    // For the value above to be materialized and give the allocator
+    // a stronger insentive to name those register the way we need.
+    patchpoint->append(ConstrainedValue(returnValue, ValueRep(GPRInfo::regT3)));
+    patchpoint->append(ConstrainedValue(whereToStore, ValueRep(GPRInfo::regT2)));
+
+    // We'll produce EDI.
+    patchpoint->resultConstraint = ValueRep::reg(GPRInfo::regT6);
+
+    // Give the allocator a good reason not to use any other register.
+    RegisterSet clobberSet = RegisterSet::allGPRs();
+    clobberSet.exclude(RegisterSet::stackRegisters());
+    clobberSet.exclude(RegisterSet::reservedHardwareRegisters());
+    clobberSet.clear(GPRInfo::regT3);
+    clobberSet.clear(GPRInfo::regT2);
+    clobberSet.clear(GPRInfo::regT6);
+    patchpoint->clobberLate(clobberSet);
+
+    // Set EDI.
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.xor64(params[0].gpr(), params[0].gpr());
+        });
+
+    // If everything went well, we should have the big number in eax,
+    // patchpoint == EDI and whereToStore = EDX.
+    // Since EDI == 5, and AH = 5 on 8 bit store, this would go wrong
+    // if we use X86 partial registers.
+    root->appendNew(proc, Store8, Origin(), patchpoint, whereToStore);
+
+    root->appendNewControlValue(proc, Return, Origin(), returnValue);
+
+    int8_t storage = 0xff;
+    CHECK(compileAndRun(proc, 0x12345678abcdef12, &storage) == 0x12345678abcdef12);
+    CHECK(!storage);
+}
+
+void testStore16Arg()
+{
+    { // Direct addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+
+        root->appendNew(proc, Store16, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int16_t storage = -1;
+        CHECK(compileAndRun(proc, 42, &storage) == 42);
+        CHECK(storage == 42);
+    }
+
+    { // Indexed addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+        Value* displacement = root->appendNew(proc, Origin(), -1);
+
+        Value* baseDisplacement = root->appendNew(proc, Add, Origin(), displacement, base);
+        Value* address = root->appendNew(proc, Add, Origin(), baseDisplacement, offset);
+
+        root->appendNew(proc, Store16, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int16_t storage = -1;
+        CHECK(compileAndRun(proc, 42, &storage, 1) == 42);
+        CHECK(storage == 42);
+    }
+}
+
+void testStore16Imm()
+{
+    { // Direct addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Origin(), 42);
+        Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+        root->appendNew(proc, Store16, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int16_t storage = -1;
+        CHECK(compileAndRun(proc, &storage) == 42);
+        CHECK(storage == 42);
+    }
+
+    { // Indexed addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Origin(), 42);
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* displacement = root->appendNew(proc, Origin(), -1);
+
+        Value* baseDisplacement = root->appendNew(proc, Add, Origin(), displacement, base);
+        Value* address = root->appendNew(proc, Add, Origin(), baseDisplacement, offset);
+
+        root->appendNew(proc, Store16, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int16_t storage = -1;
+        CHECK(compileAndRun(proc, &storage, 1) == 42);
+        CHECK(storage == 42);
+    }
+}
+
+void testTrunc(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value));
+}
+
+void testAdd1(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), 1)));
+
+    CHECK(compileAndRun(proc, value) == value + 1);
+}
+
+void testAdd1Ptr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), 1)));
+
+    CHECK(compileAndRun(proc, value) == value + 1);
+}
+
+void testNeg32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == -value);
+}
+
+void testNegPtr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, value) == -value);
+}
+
+void testStoreAddLoad32(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm32(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad8(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int8_t slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store8, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm8(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int8_t slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store8, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad16(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int16_t slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store16, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm16(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int16_t slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store16, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad64(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int64_t slot = 37000000000ll;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int64, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37000000000ll + amount);
+}
+
+void testStoreAddLoadImm64(int64_t amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int64_t slot = 370000000000ll;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int64, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 370000000000ll + amount);
+}
+
+void testStoreAddLoad32Index(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    int* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm32Index(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    int* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad8Index(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int8_t slot = 37;
+    int8_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store8, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm8Index(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int8_t slot = 37;
+    int8_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store8, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad16Index(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int16_t slot = 37;
+    int16_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store16, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm16Index(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int16_t slot = 37;
+    int16_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store16, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad64Index(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int64_t slot = 37000000000ll;
+    int64_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int64, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37000000000ll + amount);
+}
+
+void testStoreAddLoadImm64Index(int64_t amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int64_t slot = 370000000000ll;
+    int64_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int64, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 370000000000ll + amount);
+}
+
+void testStoreSubLoad(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int32_t startValue = std::numeric_limits::min();
+    int32_t slot = startValue;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == startValue - amount);
+}
+
+void testStoreAddLoadInterference(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    ArgumentRegValue* otherSlotPtr =
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), slotPtr);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(proc, Origin(), 666),
+        otherSlotPtr);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            load, root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, &slot));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddAndLoad(int amount, int mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, Add, Origin(),
+                root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+                root->appendNew(proc, Origin(), amount)),
+            root->appendNew(proc, Origin(), mask)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == ((37 + amount) & mask));
+}
+
+void testStoreNegLoad32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    int32_t slot = value;
+
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr)),
+        slotPtr);
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == -value);
+}
+
+void testStoreNegLoadPtr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    intptr_t slot = value;
+
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(proc, Load, pointerType(), Origin(), slotPtr)),
+        slotPtr);
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == -value);
+}
+
+void testAdd1Uncommuted(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), 1),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == value + 1);
+}
+
+void testLoadOffset()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int array[] = { 1, 2 };
+    ConstPtrValue* arrayPtr = root->appendNew(proc, Origin(), array);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), arrayPtr, 0),
+            root->appendNew(proc, Load, Int32, Origin(), arrayPtr, sizeof(int))));
+
+    CHECK(compileAndRun(proc) == array[0] + array[1]);
+}
+
+void testLoadOffsetNotConstant()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int array[] = { 1, 2 };
+    Value* arrayPtr = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), arrayPtr, 0),
+            root->appendNew(proc, Load, Int32, Origin(), arrayPtr, sizeof(int))));
+
+    CHECK(compileAndRun(proc, &array[0]) == array[0] + array[1]);
+}
+
+void testLoadOffsetUsingAdd()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int array[] = { 1, 2 };
+    ConstPtrValue* arrayPtr = root->appendNew(proc, Origin(), array);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Load, Int32, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(), arrayPtr,
+                    root->appendNew(proc, Origin(), 0))),
+            root->appendNew(
+                proc, Load, Int32, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(), arrayPtr,
+                    root->appendNew(proc, Origin(), sizeof(int))))));
+    
+    CHECK(compileAndRun(proc) == array[0] + array[1]);
+}
+
+void testLoadOffsetUsingAddInterference()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int array[] = { 1, 2 };
+    ConstPtrValue* arrayPtr = root->appendNew(proc, Origin(), array);
+    ArgumentRegValue* otherArrayPtr =
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Const32Value* theNumberOfTheBeast = root->appendNew(proc, Origin(), 666);
+    MemoryValue* left = root->appendNew(
+        proc, Load, Int32, Origin(),
+        root->appendNew(
+            proc, Add, Origin(), arrayPtr,
+            root->appendNew(proc, Origin(), 0)));
+    MemoryValue* right = root->appendNew(
+        proc, Load, Int32, Origin(),
+        root->appendNew(
+            proc, Add, Origin(), arrayPtr,
+            root->appendNew(proc, Origin(), sizeof(int))));
+    root->appendNew(
+        proc, Store, Origin(), theNumberOfTheBeast, otherArrayPtr, 0);
+    root->appendNew(
+        proc, Store, Origin(), theNumberOfTheBeast, otherArrayPtr, sizeof(int));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(), left, right));
+    
+    CHECK(compileAndRun(proc, &array[0]) == 1 + 2);
+    CHECK(array[0] == 666);
+    CHECK(array[1] == 666);
+}
+
+void testLoadOffsetUsingAddNotConstant()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int array[] = { 1, 2 };
+    Value* arrayPtr = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Load, Int32, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(), arrayPtr,
+                    root->appendNew(proc, Origin(), 0))),
+            root->appendNew(
+                proc, Load, Int32, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(), arrayPtr,
+                    root->appendNew(proc, Origin(), sizeof(int))))));
+    
+    CHECK(compileAndRun(proc, &array[0]) == array[0] + array[1]);
+}
+
+void testLoadAddrShift(unsigned shift)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slots[2];
+
+    // Figure out which slot to use while having proper alignment for the shift.
+    int* slot;
+    uintptr_t arg;
+    for (unsigned i = sizeof(slots)/sizeof(slots[0]); i--;) {
+        slot = slots + i;
+        arg = bitwise_cast(slot) >> shift;
+        if (bitwise_cast(arg << shift) == slot)
+            break;
+    }
+
+    *slot = 8675309;
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(
+                proc, Shl, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), shift))));
+
+    CHECK(compileAndRun(proc, arg) == 8675309);
+}
+
+void testFramePointer()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, FramePointer, Origin()));
+
+    void* fp = compileAndRun(proc);
+    CHECK(fp < &proc);
+    CHECK(fp >= bitwise_cast(&proc) - 10000);
+}
+
+void testOverrideFramePointer()
+{
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        // Add a stack slot to make the frame non trivial.
+        root->appendNew(proc, Origin(), proc.addStackSlot(8));
+
+        // Sub on x86 UseDef the source. If FP is not protected correctly, it will be overridden since it is the last visible use.
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* fp = root->appendNew(proc, FramePointer, Origin());
+        Value* result = root->appendNew(proc, Sub, Origin(), fp, offset);
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        CHECK(compileAndRun(proc, 1));
+    }
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        root->appendNew(proc, Origin(), proc.addStackSlot(8));
+
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* fp = root->appendNew(proc, FramePointer, Origin());
+        Value* offsetFP = root->appendNew(proc, BitAnd, Origin(), offset, fp);
+        Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* offsetArg = root->appendNew(proc, Add, Origin(), offset, arg);
+        Value* result = root->appendNew(proc, Add, Origin(), offsetArg, offsetFP);
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        CHECK(compileAndRun(proc, 1, 2));
+    }
+}
+
+void testStackSlot()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), proc.addStackSlot(1)));
+
+    void* stackSlot = compileAndRun(proc);
+    CHECK(stackSlot < &proc);
+    CHECK(stackSlot >= bitwise_cast(&proc) - 10000);
+}
+
+void testLoadFromFramePointer()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, FramePointer, Origin())));
+
+    void* fp = compileAndRun(proc);
+    void* myFP = __builtin_frame_address(0);
+    CHECK(fp <= myFP);
+    CHECK(fp >= bitwise_cast(myFP) - 10000);
+}
+
+void testStoreLoadStackSlot(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    SlotBaseValue* stack =
+        root->appendNew(proc, Origin(), proc.addStackSlot(sizeof(int)));
+
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        stack);
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Load, Int32, Origin(), stack));
+
+    CHECK(compileAndRun(proc, value) == value);
+}
+
+template
+EffectiveType modelLoad(EffectiveType value)
+{
+    union {
+        EffectiveType original;
+        LoadedType loaded;
+    } u;
+
+    u.original = value;
+    if (std::is_signed::value)
+        return static_cast(u.loaded);
+    return static_cast(static_cast::type>(u.loaded));
+}
+
+template<>
+float modelLoad(float value) { return value; }
+
+template<>
+double modelLoad(double value) { return value; }
+
+template
+void testLoad(B3::Opcode opcode, InputType value)
+{
+    // Simple load from an absolute address.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, opcode, type, Origin(),
+                root->appendNew(proc, Origin(), &value)));
+
+        CHECK(isIdentical(compileAndRun(proc), modelLoad(value)));
+    }
+    
+    // Simple load from an address in a register.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, opcode, type, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+        CHECK(isIdentical(compileAndRun(proc, &value), modelLoad(value)));
+    }
+    
+    // Simple load from an address in a register, at an offset.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, opcode, type, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                sizeof(InputType)));
+
+        CHECK(isIdentical(compileAndRun(proc, &value - 1), modelLoad(value)));
+    }
+
+    // Load from a simple base-index with various scales.
+    for (unsigned logScale = 0; logScale <= 3; ++logScale) {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, opcode, type, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                    root->appendNew(
+                        proc, Shl, Origin(),
+                        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                        root->appendNew(proc, Origin(), logScale)))));
+
+        CHECK(isIdentical(compileAndRun(proc, &value - 2, (sizeof(InputType) * 2) >> logScale), modelLoad(value)));
+    }
+
+    // Load from a simple base-index with various scales, but commuted.
+    for (unsigned logScale = 0; logScale <= 3; ++logScale) {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, opcode, type, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(),
+                    root->appendNew(
+                        proc, Shl, Origin(),
+                        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                        root->appendNew(proc, Origin(), logScale)),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+        CHECK(isIdentical(compileAndRun(proc, &value - 2, (sizeof(InputType) * 2) >> logScale), modelLoad(value)));
+    }
+}
+
+template
+void testLoad(B3::Opcode opcode, int32_t value)
+{
+    return testLoad(opcode, value);
+}
+
+template
+void testLoad(T value)
+{
+    return testLoad(Load, value);
+}
+
+void testStoreFloat(double input)
+{
+    // Simple store from an address in a register.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* argumentAsFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+
+        Value* destinationAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        root->appendNew(proc, Store, Origin(), argumentAsFloat, destinationAddress);
+
+        root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+        float output = 0.;
+        CHECK(!compileAndRun(proc, input, &output));
+        CHECK(isIdentical(static_cast(input), output));
+    }
+
+    // Simple indexed store.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* argumentAsFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+
+        Value* destinationBaseAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* index = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* scaledIndex = root->appendNew(
+            proc, Shl, Origin(),
+            index,
+            root->appendNew(proc, Origin(), 2));
+        Value* destinationAddress = root->appendNew(proc, Add, Origin(), scaledIndex, destinationBaseAddress);
+
+        root->appendNew(proc, Store, Origin(), argumentAsFloat, destinationAddress);
+
+        root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+        float output = 0.;
+        CHECK(!compileAndRun(proc, input, &output - 1, 1));
+        CHECK(isIdentical(static_cast(input), output));
+    }
+}
+
+void testStoreDoubleConstantAsFloat(double input)
+{
+    // Simple store from an address in a register.
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), input);
+    Value* valueAsFloat = root->appendNew(proc, DoubleToFloat, Origin(), value);
+
+    Value* destinationAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    root->appendNew(proc, Store, Origin(), valueAsFloat, destinationAddress);
+
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    float output = 0.;
+    CHECK(!compileAndRun(proc, input, &output));
+    CHECK(isIdentical(static_cast(input), output));
+}
+
+void testSpillGP()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector sources;
+    sources.append(root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    sources.append(root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+
+    for (unsigned i = 0; i < 30; ++i) {
+        sources.append(
+            root->appendNew(proc, Add, Origin(), sources[sources.size() - 1], sources[sources.size() - 2])
+        );
+    }
+
+    Value* total = root->appendNew(proc, Origin(), 0);
+    for (Value* value : sources)
+        total = root->appendNew(proc, Add, Origin(), total, value);
+
+    root->appendNewControlValue(proc, Return, Origin(), total);
+    compileAndRun(proc, 1, 2);
+}
+
+void testSpillFP()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector sources;
+    sources.append(root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    sources.append(root->appendNew(proc, Origin(), FPRInfo::argumentFPR1));
+
+    for (unsigned i = 0; i < 30; ++i) {
+        sources.append(
+            root->appendNew(proc, Add, Origin(), sources[sources.size() - 1], sources[sources.size() - 2])
+        );
+    }
+
+    Value* total = root->appendNew(proc, Origin(), 0.);
+    for (Value* value : sources)
+        total = root->appendNew(proc, Add, Origin(), total, value);
+
+    root->appendNewControlValue(proc, Return, Origin(), total);
+    compileAndRun(proc, 1.1, 2.5);
+}
+
+void testInt32ToDoublePartialRegisterStall()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* loop = proc.addBlock();
+    BasicBlock* done = proc.addBlock();
+
+    // Head.
+    Value* total = root->appendNew(proc, Origin(), 0.);
+    Value* counter = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    UpsilonValue* originalTotal = root->appendNew(proc, Origin(), total);
+    UpsilonValue* originalCounter = root->appendNew(proc, Origin(), counter);
+    root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+
+    // Loop.
+    Value* loopCounter = loop->appendNew(proc, Phi, Int64, Origin());
+    Value* loopTotal = loop->appendNew(proc, Phi, Double, Origin());
+    originalCounter->setPhi(loopCounter);
+    originalTotal->setPhi(loopTotal);
+
+    Value* truncatedCounter = loop->appendNew(proc, Trunc, Origin(), loopCounter);
+    Value* doubleCounter = loop->appendNew(proc, IToD, Origin(), truncatedCounter);
+    Value* updatedTotal = loop->appendNew(proc, Add, Origin(), doubleCounter, loopTotal);
+    UpsilonValue* updatedTotalUpsilon = loop->appendNew(proc, Origin(), updatedTotal);
+    updatedTotalUpsilon->setPhi(loopTotal);
+
+    Value* decCounter = loop->appendNew(proc, Sub, Origin(), loopCounter, loop->appendNew(proc, Origin(), 1));
+    UpsilonValue* decCounterUpsilon = loop->appendNew(proc, Origin(), decCounter);
+    decCounterUpsilon->setPhi(loopCounter);
+    loop->appendNewControlValue(
+        proc, Branch, Origin(),
+        decCounter,
+        FrequentedBlock(loop), FrequentedBlock(done));
+
+    // Tail.
+    done->appendNewControlValue(proc, Return, Origin(), updatedTotal);
+    CHECK(isIdentical(compileAndRun(proc, 100000), 5000050000.));
+}
+
+void testInt32ToDoublePartialRegisterWithoutStall()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* loop = proc.addBlock();
+    BasicBlock* done = proc.addBlock();
+
+    // Head.
+    Value* total = root->appendNew(proc, Origin(), 0.);
+    Value* counter = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    UpsilonValue* originalTotal = root->appendNew(proc, Origin(), total);
+    UpsilonValue* originalCounter = root->appendNew(proc, Origin(), counter);
+    uint64_t forPaddingInput;
+    Value* forPaddingInputAddress = root->appendNew(proc, Origin(), &forPaddingInput);
+    uint64_t forPaddingOutput;
+    Value* forPaddingOutputAddress = root->appendNew(proc, Origin(), &forPaddingOutput);
+    root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+
+    // Loop.
+    Value* loopCounter = loop->appendNew(proc, Phi, Int64, Origin());
+    Value* loopTotal = loop->appendNew(proc, Phi, Double, Origin());
+    originalCounter->setPhi(loopCounter);
+    originalTotal->setPhi(loopTotal);
+
+    Value* truncatedCounter = loop->appendNew(proc, Trunc, Origin(), loopCounter);
+    Value* doubleCounter = loop->appendNew(proc, IToD, Origin(), truncatedCounter);
+    Value* updatedTotal = loop->appendNew(proc, Add, Origin(), doubleCounter, loopTotal);
+
+    // Add enough padding instructions to avoid a stall.
+    Value* loadPadding = loop->appendNew(proc, Load, Int64, Origin(), forPaddingInputAddress);
+    Value* padding = loop->appendNew(proc, BitXor, Origin(), loadPadding, loopCounter);
+    padding = loop->appendNew(proc, Add, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, BitOr, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, Sub, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, BitXor, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, Add, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, BitOr, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, Sub, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, BitXor, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, Add, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, BitOr, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, Sub, Origin(), padding, loopCounter);
+    loop->appendNew(proc, Store, Origin(), padding, forPaddingOutputAddress);
+
+    UpsilonValue* updatedTotalUpsilon = loop->appendNew(proc, Origin(), updatedTotal);
+    updatedTotalUpsilon->setPhi(loopTotal);
+
+    Value* decCounter = loop->appendNew(proc, Sub, Origin(), loopCounter, loop->appendNew(proc, Origin(), 1));
+    UpsilonValue* decCounterUpsilon = loop->appendNew(proc, Origin(), decCounter);
+    decCounterUpsilon->setPhi(loopCounter);
+    loop->appendNewControlValue(
+        proc, Branch, Origin(),
+        decCounter,
+        FrequentedBlock(loop), FrequentedBlock(done));
+
+    // Tail.
+    done->appendNewControlValue(proc, Return, Origin(), updatedTotal);
+    CHECK(isIdentical(compileAndRun(proc, 100000), 5000050000.));
+}
+
+void testBranch()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchPtr()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, static_cast(42)) == 1);
+    CHECK(invoke(*code, static_cast(0)) == 0);
+}
+
+void testDiamond()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* done = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    UpsilonValue* thenResult = thenCase->appendNew(
+        proc, Origin(), thenCase->appendNew(proc, Origin(), 1));
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+    UpsilonValue* elseResult = elseCase->appendNew(
+        proc, Origin(), elseCase->appendNew(proc, Origin(), 0));
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+    Value* phi = done->appendNew(proc, Phi, Int32, Origin());
+    thenResult->setPhi(phi);
+    elseResult->setPhi(phi);
+    done->appendNewControlValue(proc, Return, Origin(), phi);
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchNotEqual()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, NotEqual, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchNotEqualCommute()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, NotEqual, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchNotEqualNotEqual()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, NotEqual, Origin(),
+            root->appendNew(
+                proc, NotEqual, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), 0)),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchEqual()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 0));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 1));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchEqualEqual()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), 0)),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchEqualCommute()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 0));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 1));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchEqualEqual1()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), 0)),
+            root->appendNew(proc, Origin(), 1)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 0));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 1));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchEqualOrUnorderedArgs(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc, a, b) == expected);
+}
+
+void testBranchEqualOrUnorderedArgs(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentB = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc, &a, &b) == expected);
+}
+
+void testBranchNotEqualAndOrderedArgs(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    Value* equalOrUnordered = root->appendNew(
+        proc, EqualOrUnordered, Origin(),
+        argumentA,
+        argumentB);
+    Value* notEqualAndOrdered = root->appendNew(
+        proc, Equal, Origin(),
+        root->appendNew(proc, Origin(), 0),
+        equalOrUnordered);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        notEqualAndOrdered,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (!std::isunordered(a, b) && a != b) ? 42 : -13;
+    CHECK(compileAndRun(proc, a, b) == expected);
+}
+
+void testBranchNotEqualAndOrderedArgs(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentB = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* equalOrUnordered = root->appendNew(
+        proc, EqualOrUnordered, Origin(),
+        argumentA,
+        argumentB);
+    Value* notEqualAndOrdered = root->appendNew(
+        proc, Equal, Origin(),
+        root->appendNew(proc, Origin(), 0),
+        equalOrUnordered);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        notEqualAndOrdered,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (!std::isunordered(a, b) && a != b) ? 42 : -13;
+    CHECK(compileAndRun(proc, &a, &b) == expected);
+}
+
+void testBranchEqualOrUnorderedDoubleArgImm(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc, a) == expected);
+}
+
+void testBranchEqualOrUnorderedFloatArgImm(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc, &a) == expected);
+}
+
+void testBranchEqualOrUnorderedDoubleImms(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Origin(), a);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc) == expected);
+}
+
+void testBranchEqualOrUnorderedFloatImms(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Origin(), a);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc) == expected);
+}
+
+void testBranchEqualOrUnorderedFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2 = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* argument1AsDouble = root->appendNew(proc, FloatToDouble, Origin(), argument1);
+    Value* argument2AsDouble = root->appendNew(proc, FloatToDouble, Origin(), argument2);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argument1AsDouble,
+            argument2AsDouble),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc, &a, &b) == expected);
+}
+
+void testBranchFold(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Origin(), value),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    CHECK(compileAndRun(proc) == !!value);
+}
+
+void testDiamondFold(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* done = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Origin(), value),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    UpsilonValue* thenResult = thenCase->appendNew(
+        proc, Origin(), thenCase->appendNew(proc, Origin(), 1));
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+    UpsilonValue* elseResult = elseCase->appendNew(
+        proc, Origin(), elseCase->appendNew(proc, Origin(), 0));
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+    Value* phi = done->appendNew(proc, Phi, Int32, Origin());
+    thenResult->setPhi(phi);
+    elseResult->setPhi(phi);
+    done->appendNewControlValue(proc, Return, Origin(), phi);
+
+    CHECK(compileAndRun(proc) == !!value);
+}
+
+void testBranchNotEqualFoldPtr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, NotEqual, Origin(),
+            root->appendNew(proc, Origin(), value),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    CHECK(compileAndRun(proc) == !!value);
+}
+
+void testBranchEqualFoldPtr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(proc, Origin(), value),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    CHECK(compileAndRun(proc) == !value);
+}
+
+void testBranchLoadPtr()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    intptr_t cond;
+    cond = 42;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranchLoad32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    int32_t cond;
+    cond = 42;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranchLoad8S()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load8S, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    int8_t cond;
+    cond = -1;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranchLoad8Z()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load8Z, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    uint8_t cond;
+    cond = 1;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranchLoad16S()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load16S, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    int16_t cond;
+    cond = -1;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranchLoad16Z()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load16Z, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    uint16_t cond;
+    cond = 1;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranch8WithLoad8ZIndex()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    int logScale = 1;
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Above, Origin(),
+            root->appendNew(
+                proc, Load8Z, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                    root->appendNew(
+                        proc, Shl, Origin(),
+                        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                        root->appendNew(proc, Origin(), logScale)))),
+            root->appendNew(proc, Origin(), 250)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    uint32_t cond;
+    cond = 0xffffffffU; // All bytes are 0xff.
+    CHECK(invoke(*code, &cond - 2, (sizeof(uint32_t) * 2) >> logScale) == 1);
+    cond = 0x00000000U; // All bytes are 0.
+    CHECK(invoke(*code, &cond - 2, (sizeof(uint32_t) * 2) >> logScale) == 0);
+}
+
+void testComplex(unsigned numVars, unsigned numConstructs)
+{
+    double before = monotonicallyIncreasingTimeMS();
+    
+    Procedure proc;
+    BasicBlock* current = proc.addBlock();
+
+    Const32Value* one = current->appendNew(proc, Origin(), 1);
+
+    Vector varSlots;
+    for (unsigned i = numVars; i--;)
+        varSlots.append(i);
+
+    Vector vars;
+    for (int32_t& varSlot : varSlots) {
+        Value* varSlotPtr = current->appendNew(proc, Origin(), &varSlot);
+        vars.append(current->appendNew(proc, Load, Int32, Origin(), varSlotPtr));
+    }
+
+    for (unsigned i = 0; i < numConstructs; ++i) {
+        if (i & 1) {
+            // Control flow diamond.
+            unsigned predicateVarIndex = ((i >> 1) + 2) % numVars;
+            unsigned thenIncVarIndex = ((i >> 1) + 0) % numVars;
+            unsigned elseIncVarIndex = ((i >> 1) + 1) % numVars;
+
+            BasicBlock* thenBlock = proc.addBlock();
+            BasicBlock* elseBlock = proc.addBlock();
+            BasicBlock* continuation = proc.addBlock();
+
+            current->appendNewControlValue(
+                proc, Branch, Origin(), vars[predicateVarIndex],
+                FrequentedBlock(thenBlock), FrequentedBlock(elseBlock));
+
+            UpsilonValue* thenThenResult = thenBlock->appendNew(
+                proc, Origin(),
+                thenBlock->appendNew(proc, Add, Origin(), vars[thenIncVarIndex], one));
+            UpsilonValue* thenElseResult = thenBlock->appendNew(
+                proc, Origin(), vars[elseIncVarIndex]);
+            thenBlock->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+            UpsilonValue* elseElseResult = elseBlock->appendNew(
+                proc, Origin(),
+                elseBlock->appendNew(proc, Add, Origin(), vars[elseIncVarIndex], one));
+            UpsilonValue* elseThenResult = elseBlock->appendNew(
+                proc, Origin(), vars[thenIncVarIndex]);
+            elseBlock->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+            Value* thenPhi = continuation->appendNew(proc, Phi, Int32, Origin());
+            thenThenResult->setPhi(thenPhi);
+            elseThenResult->setPhi(thenPhi);
+            vars[thenIncVarIndex] = thenPhi;
+            
+            Value* elsePhi = continuation->appendNew(proc, Phi, Int32, Origin());
+            thenElseResult->setPhi(elsePhi);
+            elseElseResult->setPhi(elsePhi);
+            vars[elseIncVarIndex] = thenPhi;
+            
+            current = continuation;
+        } else {
+            // Loop.
+
+            BasicBlock* loopEntry = proc.addBlock();
+            BasicBlock* loopReentry = proc.addBlock();
+            BasicBlock* loopBody = proc.addBlock();
+            BasicBlock* loopExit = proc.addBlock();
+            BasicBlock* loopSkip = proc.addBlock();
+            BasicBlock* continuation = proc.addBlock();
+            
+            Value* startIndex = vars[((i >> 1) + 1) % numVars];
+            Value* startSum = current->appendNew(proc, Origin(), 0);
+            current->appendNewControlValue(
+                proc, Branch, Origin(), startIndex,
+                FrequentedBlock(loopEntry), FrequentedBlock(loopSkip));
+
+            UpsilonValue* startIndexForBody = loopEntry->appendNew(
+                proc, Origin(), startIndex);
+            UpsilonValue* startSumForBody = loopEntry->appendNew(
+                proc, Origin(), startSum);
+            loopEntry->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loopBody));
+
+            Value* bodyIndex = loopBody->appendNew(proc, Phi, Int32, Origin());
+            startIndexForBody->setPhi(bodyIndex);
+            Value* bodySum = loopBody->appendNew(proc, Phi, Int32, Origin());
+            startSumForBody->setPhi(bodySum);
+            Value* newBodyIndex = loopBody->appendNew(proc, Sub, Origin(), bodyIndex, one);
+            Value* newBodySum = loopBody->appendNew(
+                proc, Add, Origin(),
+                bodySum,
+                loopBody->appendNew(
+                    proc, Load, Int32, Origin(),
+                    loopBody->appendNew(
+                        proc, Add, Origin(),
+                        loopBody->appendNew(proc, Origin(), varSlots.data()),
+                        loopBody->appendNew(
+                            proc, Shl, Origin(),
+                            loopBody->appendNew(
+                                proc, ZExt32, Origin(),
+                                loopBody->appendNew(
+                                    proc, BitAnd, Origin(),
+                                    newBodyIndex,
+                                    loopBody->appendNew(
+                                        proc, Origin(), numVars - 1))),
+                            loopBody->appendNew(proc, Origin(), 2)))));
+            loopBody->appendNewControlValue(
+                proc, Branch, Origin(), newBodyIndex,
+                FrequentedBlock(loopReentry), FrequentedBlock(loopExit));
+
+            loopReentry->appendNew(proc, Origin(), newBodyIndex, bodyIndex);
+            loopReentry->appendNew(proc, Origin(), newBodySum, bodySum);
+            loopReentry->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loopBody));
+
+            UpsilonValue* exitSum = loopExit->appendNew(proc, Origin(), newBodySum);
+            loopExit->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+            UpsilonValue* skipSum = loopSkip->appendNew(proc, Origin(), startSum);
+            loopSkip->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+            Value* finalSum = continuation->appendNew(proc, Phi, Int32, Origin());
+            exitSum->setPhi(finalSum);
+            skipSum->setPhi(finalSum);
+
+            current = continuation;
+            vars[((i >> 1) + 0) % numVars] = finalSum;
+        }
+    }
+
+    current->appendNewControlValue(proc, Return, Origin(), vars[0]);
+
+    compile(proc);
+
+    double after = monotonicallyIncreasingTimeMS();
+    dataLog(toCString("    That took ", after - before, " ms.\n"));
+}
+
+void testSimplePatchpoint()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testSimplePatchpointWithoutOuputClobbersGPArgs()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* const1 = root->appendNew(proc, Origin(), 42);
+    Value* const2 = root->appendNew(proc, Origin(), 13);
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Void, Origin());
+    patchpoint->clobberLate(RegisterSet(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1));
+    patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), params[0].gpr());
+            jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), params[1].gpr());
+            jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), GPRInfo::argumentGPR0);
+            jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), GPRInfo::argumentGPR1);
+        });
+
+    Value* result = root->appendNew(proc, Add, Origin(), arg1, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testSimplePatchpointWithOuputClobbersGPArgs()
+{
+    // We can't predict where the output will be but we want to be sure it is not
+    // one of the clobbered registers which is a bit hard to test.
+    //
+    // What we do is force the hand of our register allocator by clobbering absolutely
+    // everything but 1. The only valid allocation is to give it to the result and
+    // spill everything else.
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* const1 = root->appendNew(proc, Origin(), 42);
+    Value* const2 = root->appendNew(proc, Origin(), 13);
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Int64, Origin());
+
+    RegisterSet clobberAll = RegisterSet::allGPRs();
+    clobberAll.exclude(RegisterSet::stackRegisters());
+    clobberAll.exclude(RegisterSet::reservedHardwareRegisters());
+    clobberAll.clear(GPRInfo::argumentGPR2);
+    patchpoint->clobberLate(clobberAll);
+
+    patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            jit.move(params[1].gpr(), params[0].gpr());
+            jit.add64(params[2].gpr(), params[0].gpr());
+
+            clobberAll.forEach([&] (Reg reg) {
+                jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), reg.gpr());
+            });
+        });
+
+    Value* result = root->appendNew(proc, Add, Origin(), patchpoint,
+        root->appendNew(proc, Add, Origin(), arg1, arg2));
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, 1, 2) == 58);
+}
+
+void testSimplePatchpointWithoutOuputClobbersFPArgs()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    Value* const1 = root->appendNew(proc, Origin(), 42.5);
+    Value* const2 = root->appendNew(proc, Origin(), 13.1);
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Void, Origin());
+    patchpoint->clobberLate(RegisterSet(FPRInfo::argumentFPR0, FPRInfo::argumentFPR1));
+    patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isFPR());
+            CHECK(params[1].isFPR());
+            jit.moveZeroToDouble(params[0].fpr());
+            jit.moveZeroToDouble(params[1].fpr());
+            jit.moveZeroToDouble(FPRInfo::argumentFPR0);
+            jit.moveZeroToDouble(FPRInfo::argumentFPR1);
+        });
+
+    Value* result = root->appendNew(proc, Add, Origin(), arg1, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, 1.5, 2.5) == 4);
+}
+
+void testSimplePatchpointWithOuputClobbersFPArgs()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    Value* const1 = root->appendNew(proc, Origin(), 42.5);
+    Value* const2 = root->appendNew(proc, Origin(), 13.1);
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Double, Origin());
+
+    RegisterSet clobberAll = RegisterSet::allFPRs();
+    clobberAll.exclude(RegisterSet::stackRegisters());
+    clobberAll.exclude(RegisterSet::reservedHardwareRegisters());
+    clobberAll.clear(FPRInfo::argumentFPR2);
+    patchpoint->clobberLate(clobberAll);
+
+    patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isFPR());
+            CHECK(params[1].isFPR());
+            CHECK(params[2].isFPR());
+            jit.addDouble(params[1].fpr(), params[2].fpr(), params[0].fpr());
+
+            clobberAll.forEach([&] (Reg reg) {
+                jit.moveZeroToDouble(reg.fpr());
+            });
+        });
+
+    Value* result = root->appendNew(proc, Add, Origin(), patchpoint,
+        root->appendNew(proc, Add, Origin(), arg1, arg2));
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, 1.5, 2.5) == 59.6);
+}
+
+void testPatchpointWithEarlyClobber()
+{
+    auto test = [] (GPRReg registerToClobber, bool arg1InArgGPR, bool arg2InArgGPR) {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        
+        PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+        patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+        patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        patchpoint->clobberEarly(RegisterSet(registerToClobber));
+        patchpoint->setGenerator(
+            [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                CHECK((params[1].gpr() == GPRInfo::argumentGPR0) == arg1InArgGPR);
+                CHECK((params[2].gpr() == GPRInfo::argumentGPR1) == arg2InArgGPR);
+                
+                add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+            });
+
+        root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+        CHECK(compileAndRun(proc, 1, 2) == 3);
+    };
+
+    test(GPRInfo::nonArgGPR0, true, true);
+    test(GPRInfo::argumentGPR0, false, true);
+    test(GPRInfo::argumentGPR1, true, false);
+}
+
+void testPatchpointCallArg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::stackArgument(0)));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::stackArgument(8)));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isStack());
+            CHECK(params[2].isStack());
+            jit.load32(
+                CCallHelpers::Address(GPRInfo::callFrameRegister, params[1].offsetFromFP()),
+                params[0].gpr());
+            jit.add32(
+                CCallHelpers::Address(GPRInfo::callFrameRegister, params[2].offsetFromFP()),
+                params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointFixedRegister()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep(GPRInfo::regT0)));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep(GPRInfo::regT1)));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1] == ValueRep(GPRInfo::regT0));
+            CHECK(params[2] == ValueRep(GPRInfo::regT1));
+            add32(jit, GPRInfo::regT0, GPRInfo::regT1, params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointAny(ValueRep rep)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, rep));
+    patchpoint->append(ConstrainedValue(arg2, rep));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            // We shouldn't have spilled the inputs, so we assert that they're in registers.
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointGPScratch()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(arg1, ValueRep::SomeRegister);
+    patchpoint->append(arg2, ValueRep::SomeRegister);
+    patchpoint->numGPScratchRegisters = 2;
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            // We shouldn't have spilled the inputs, so we assert that they're in registers.
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            CHECK(params.gpScratch(0) != InvalidGPRReg);
+            CHECK(params.gpScratch(0) != params[0].gpr());
+            CHECK(params.gpScratch(0) != params[1].gpr());
+            CHECK(params.gpScratch(0) != params[2].gpr());
+            CHECK(params.gpScratch(1) != InvalidGPRReg);
+            CHECK(params.gpScratch(1) != params.gpScratch(0));
+            CHECK(params.gpScratch(1) != params[0].gpr());
+            CHECK(params.gpScratch(1) != params[1].gpr());
+            CHECK(params.gpScratch(1) != params[2].gpr());
+            CHECK(!params.unavailableRegisters().get(params.gpScratch(0)));
+            CHECK(!params.unavailableRegisters().get(params.gpScratch(1)));
+            add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointFPScratch()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(arg1, ValueRep::SomeRegister);
+    patchpoint->append(arg2, ValueRep::SomeRegister);
+    patchpoint->numFPScratchRegisters = 2;
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            // We shouldn't have spilled the inputs, so we assert that they're in registers.
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            CHECK(params.fpScratch(0) != InvalidFPRReg);
+            CHECK(params.fpScratch(1) != InvalidFPRReg);
+            CHECK(params.fpScratch(1) != params.fpScratch(0));
+            CHECK(!params.unavailableRegisters().get(params.fpScratch(0)));
+            CHECK(!params.unavailableRegisters().get(params.fpScratch(1)));
+            add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointLotsOfLateAnys()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Vector things;
+    for (unsigned i = 200; i--;)
+        things.append(i);
+
+    Vector values;
+    for (int& thing : things) {
+        Value* value = root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(proc, Origin(), &thing));
+        values.append(value);
+    }
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    for (Value* value : values)
+        patchpoint->append(ConstrainedValue(value, ValueRep::LateColdAny));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            // We shouldn't have spilled the inputs, so we assert that they're in registers.
+            CHECK(params.size() == things.size() + 1);
+            CHECK(params[0].isGPR());
+            jit.move(CCallHelpers::TrustedImm32(0), params[0].gpr());
+            for (unsigned i = 1; i < params.size(); ++i) {
+                if (params[i].isGPR()) {
+                    CHECK(params[i] != params[0]);
+                    jit.add32(params[i].gpr(), params[0].gpr());
+                } else {
+                    CHECK(params[i].isStack());
+                    jit.add32(CCallHelpers::Address(GPRInfo::callFrameRegister, params[i].offsetFromFP()), params[0].gpr());
+                }
+            }
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc) == (things.size() * (things.size() - 1)) / 2);
+}
+
+void testPatchpointAnyImm(ValueRep rep)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 42);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, rep));
+    patchpoint->append(ConstrainedValue(arg2, rep));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isConstant());
+            CHECK(params[2].value() == 42);
+            jit.add32(
+                CCallHelpers::TrustedImm32(static_cast(params[2].value())),
+                params[1].gpr(), params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1) == 43);
+}
+
+void testPatchpointManyImms()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), 42);
+    Value* arg2 = root->appendNew(proc, Origin(), 43);
+    Value* arg3 = root->appendNew(proc, Origin(), 43000000000000ll);
+    Value* arg4 = root->appendNew(proc, Origin(), 42.5);
+    PatchpointValue* patchpoint = root->appendNew(proc, Void, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::WarmAny));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::WarmAny));
+    patchpoint->append(ConstrainedValue(arg3, ValueRep::WarmAny));
+    patchpoint->append(ConstrainedValue(arg4, ValueRep::WarmAny));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+            CHECK(params.size() == 4);
+            CHECK(params[0] == ValueRep::constant(42));
+            CHECK(params[1] == ValueRep::constant(43));
+            CHECK(params[2] == ValueRep::constant(43000000000000ll));
+            CHECK(params[3] == ValueRep::constant(bitwise_cast(42.5)));
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+}
+
+void testPatchpointWithRegisterResult()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    patchpoint->resultConstraint = ValueRep::reg(GPRInfo::nonArgGPR0);
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0] == ValueRep::reg(GPRInfo::nonArgGPR0));
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            add32(jit, params[1].gpr(), params[2].gpr(), GPRInfo::nonArgGPR0);
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointWithStackArgumentResult()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    patchpoint->resultConstraint = ValueRep::stackArgument(0);
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0] == ValueRep::stack(-static_cast(proc.frameSize())));
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            jit.add32(params[1].gpr(), params[2].gpr(), jit.scratchRegister());
+            jit.store32(jit.scratchRegister(), CCallHelpers::Address(CCallHelpers::stackPointerRegister, 0));
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointWithAnyResult()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Double, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    patchpoint->resultConstraint = ValueRep::WarmAny;
+    patchpoint->clobberLate(RegisterSet::allFPRs());
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    patchpoint->clobber(RegisterSet(GPRInfo::regT0));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isStack());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            add32(jit, params[1].gpr(), params[2].gpr(), GPRInfo::regT0);
+            jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
+            jit.storeDouble(FPRInfo::fpRegT0, CCallHelpers::Address(GPRInfo::callFrameRegister, params[0].offsetFromFP()));
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testSimpleCheck()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    CheckValue* check = root->appendNew(proc, Check, Origin(), arg);
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    
+    CHECK(invoke(*code, 0) == 0);
+    CHECK(invoke(*code, 1) == 42);
+}
+
+void testCheckFalse()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(), root->appendNew(proc, Origin(), 0));
+    check->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            CHECK(!"This should not have executed");
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    
+    CHECK(invoke(*code) == 0);
+}
+
+void testCheckTrue()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(), root->appendNew(proc, Origin(), 1));
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.value()->opcode() == Patchpoint);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    
+    CHECK(invoke(*code) == 42);
+}
+
+void testCheckLessThan()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, LessThan, Origin(), arg,
+            root->appendNew(proc, Origin(), 42)));
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    
+    CHECK(invoke(*code, 42) == 0);
+    CHECK(invoke(*code, 1000) == 0);
+    CHECK(invoke(*code, 41) == 42);
+    CHECK(invoke(*code, 0) == 42);
+    CHECK(invoke(*code, -1) == 42);
+}
+
+void testCheckMegaCombo()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* index = root->appendNew(
+        proc, ZExt32, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    Value* ptr = root->appendNew(
+        proc, Add, Origin(), base,
+        root->appendNew(
+            proc, Shl, Origin(), index,
+            root->appendNew(proc, Origin(), 1)));
+    
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, LessThan, Origin(),
+            root->appendNew(proc, Load8S, Origin(), ptr),
+            root->appendNew(proc, Origin(), 42)));
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+
+    int8_t value;
+    value = 42;
+    CHECK(invoke(*code, &value - 2, 1) == 0);
+    value = 127;
+    CHECK(invoke(*code, &value - 2, 1) == 0);
+    value = 41;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+    value = 0;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+    value = -1;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+}
+
+void testCheckTrickyMegaCombo()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* index = root->appendNew(
+        proc, ZExt32, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)),
+            root->appendNew(proc, Origin(), 1)));
+
+    Value* ptr = root->appendNew(
+        proc, Add, Origin(), base,
+        root->appendNew(
+            proc, Shl, Origin(), index,
+            root->appendNew(proc, Origin(), 1)));
+    
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, LessThan, Origin(),
+            root->appendNew(proc, Load8S, Origin(), ptr),
+            root->appendNew(proc, Origin(), 42)));
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+
+    int8_t value;
+    value = 42;
+    CHECK(invoke(*code, &value - 2, 0) == 0);
+    value = 127;
+    CHECK(invoke(*code, &value - 2, 0) == 0);
+    value = 41;
+    CHECK(invoke(*code, &value - 2, 0) == 42);
+    value = 0;
+    CHECK(invoke(*code, &value - 2, 0) == 42);
+    value = -1;
+    CHECK(invoke(*code, &value - 2, 0) == 42);
+}
+
+void testCheckTwoMegaCombos()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* index = root->appendNew(
+        proc, ZExt32, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    Value* ptr = root->appendNew(
+        proc, Add, Origin(), base,
+        root->appendNew(
+            proc, Shl, Origin(), index,
+            root->appendNew(proc, Origin(), 1)));
+
+    Value* predicate = root->appendNew(
+        proc, LessThan, Origin(),
+        root->appendNew(proc, Load8S, Origin(), ptr),
+        root->appendNew(proc, Origin(), 42));
+    
+    CheckValue* check = root->appendNew(proc, Check, Origin(), predicate);
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    CheckValue* check2 = root->appendNew(proc, Check, Origin(), predicate);
+    check2->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(43), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+
+    int8_t value;
+    value = 42;
+    CHECK(invoke(*code, &value - 2, 1) == 0);
+    value = 127;
+    CHECK(invoke(*code, &value - 2, 1) == 0);
+    value = 41;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+    value = 0;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+    value = -1;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+}
+
+void testCheckTwoNonRedundantMegaCombos()
+{
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* index = root->appendNew(
+        proc, ZExt32, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    Value* branchPredicate = root->appendNew(
+        proc, BitAnd, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)),
+        root->appendNew(proc, Origin(), 0xff));
+
+    Value* ptr = root->appendNew(
+        proc, Add, Origin(), base,
+        root->appendNew(
+            proc, Shl, Origin(), index,
+            root->appendNew(proc, Origin(), 1)));
+
+    Value* checkPredicate = root->appendNew(
+        proc, LessThan, Origin(),
+        root->appendNew(proc, Load8S, Origin(), ptr),
+        root->appendNew(proc, Origin(), 42));
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(), branchPredicate,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+    
+    CheckValue* check = thenCase->appendNew(proc, Check, Origin(), checkPredicate);
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(), thenCase->appendNew(proc, Origin(), 43));
+
+    CheckValue* check2 = elseCase->appendNew(proc, Check, Origin(), checkPredicate);
+    check2->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(44), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(), elseCase->appendNew(proc, Origin(), 45));
+
+    auto code = compile(proc);
+
+    int8_t value;
+
+    value = 42;
+    CHECK(invoke(*code, &value - 2, 1, true) == 43);
+    value = 127;
+    CHECK(invoke(*code, &value - 2, 1, true) == 43);
+    value = 41;
+    CHECK(invoke(*code, &value - 2, 1, true) == 42);
+    value = 0;
+    CHECK(invoke(*code, &value - 2, 1, true) == 42);
+    value = -1;
+    CHECK(invoke(*code, &value - 2, 1, true) == 42);
+
+    value = 42;
+    CHECK(invoke(*code, &value - 2, 1, false) == 45);
+    value = 127;
+    CHECK(invoke(*code, &value - 2, 1, false) == 45);
+    value = 41;
+    CHECK(invoke(*code, &value - 2, 1, false) == 44);
+    value = 0;
+    CHECK(invoke(*code, &value - 2, 1, false) == 44);
+    value = -1;
+    CHECK(invoke(*code, &value - 2, 1, false) == 44);
+}
+
+void testCheckAddImm()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 42);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->append(arg1);
+    checkAdd->append(arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isConstant());
+            CHECK(params[1].value() == 42);
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(42), FPRInfo::fpRegT1);
+            jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkAdd));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == 42.0);
+    CHECK(invoke(*code, 1) == 43.0);
+    CHECK(invoke(*code, 42) == 84.0);
+    CHECK(invoke(*code, 2147483647) == 2147483689.0);
+}
+
+void testCheckAddImmCommute()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 42);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg2, arg1);
+    checkAdd->append(arg1);
+    checkAdd->append(arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isConstant());
+            CHECK(params[1].value() == 42);
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(42), FPRInfo::fpRegT1);
+            jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkAdd));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == 42.0);
+    CHECK(invoke(*code, 1) == 43.0);
+    CHECK(invoke(*code, 42) == 84.0);
+    CHECK(invoke(*code, 2147483647) == 2147483689.0);
+}
+
+void testCheckAddImmSomeRegister()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 42);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->appendSomeRegister(arg1);
+    checkAdd->appendSomeRegister(arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkAdd));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == 42.0);
+    CHECK(invoke(*code, 1) == 43.0);
+    CHECK(invoke(*code, 42) == 84.0);
+    CHECK(invoke(*code, 2147483647) == 2147483689.0);
+}
+
+void testCheckAdd()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->appendSomeRegister(arg1);
+    checkAdd->appendSomeRegister(arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkAdd));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0, 42) == 42.0);
+    CHECK(invoke(*code, 1, 42) == 43.0);
+    CHECK(invoke(*code, 42, 42) == 84.0);
+    CHECK(invoke(*code, 2147483647, 42) == 2147483689.0);
+}
+
+void testCheckAdd64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->appendSomeRegister(arg1);
+    checkAdd->appendSomeRegister(arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkAdd));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll, 42ll) == 42.0);
+    CHECK(invoke(*code, 1ll, 42ll) == 43.0);
+    CHECK(invoke(*code, 42ll, 42ll) == 84.0);
+    CHECK(invoke(*code, 9223372036854775807ll, 42ll) == static_cast(9223372036854775807ll) + 42.0);
+}
+
+void testCheckAddFold(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            CHECK(!"Should have been folded");
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == a + b);
+}
+
+void testCheckAddFoldFail(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == 42);
+}
+
+void testCheckAddArgumentAliasing64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg3 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+
+    // Pretend to use all the args.
+    PatchpointValue* useArgs = root->appendNew(proc, Void, Origin());
+    useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+    useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Last use of first arg (here, arg1).
+    CheckValue* checkAdd1 = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Last use of second arg (here, arg2).
+    CheckValue* checkAdd2 = root->appendNew(proc, CheckAdd, Origin(), arg3, arg2);
+    checkAdd2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Keep arg3 live.
+    PatchpointValue* keepArg2Live = root->appendNew(proc, Void, Origin());
+    keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Only use of checkAdd1 and checkAdd2.
+    CheckValue* checkAdd3 = root->appendNew(proc, CheckAdd, Origin(), checkAdd1, checkAdd2);
+    checkAdd3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd3);
+
+    CHECK(compileAndRun(proc, 1, 2, 3) == 8);
+}
+
+void testCheckAddArgumentAliasing32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg3 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+
+    // Pretend to use all the args.
+    PatchpointValue* useArgs = root->appendNew(proc, Void, Origin());
+    useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+    useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Last use of first arg (here, arg1).
+    CheckValue* checkAdd1 = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Last use of second arg (here, arg3).
+    CheckValue* checkAdd2 = root->appendNew(proc, CheckAdd, Origin(), arg2, arg3);
+    checkAdd2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Keep arg3 live.
+    PatchpointValue* keepArg2Live = root->appendNew(proc, Void, Origin());
+    keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Only use of checkAdd1 and checkAdd2.
+    CheckValue* checkAdd3 = root->appendNew(proc, CheckAdd, Origin(), checkAdd1, checkAdd2);
+    checkAdd3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd3);
+
+    CHECK(compileAndRun(proc, 1, 2, 3) == 8);
+}
+
+void testCheckAddSelfOverflow64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg, arg);
+    checkAdd->append(arg);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(params[0].gpr(), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+
+    // Make sure the arg is not the destination of the operation.
+    PatchpointValue* opaqueUse = root->appendNew(proc, Void, Origin());
+    opaqueUse->append(ConstrainedValue(arg, ValueRep::SomeRegister));
+    opaqueUse->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll) == 0);
+    CHECK(invoke(*code, 1ll) == 2);
+    CHECK(invoke(*code, std::numeric_limits::max()) == std::numeric_limits::max());
+}
+
+void testCheckAddSelfOverflow32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg, arg);
+    checkAdd->append(arg);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(params[0].gpr(), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+
+    // Make sure the arg is not the destination of the operation.
+    PatchpointValue* opaqueUse = root->appendNew(proc, Void, Origin());
+    opaqueUse->append(ConstrainedValue(arg, ValueRep::SomeRegister));
+    opaqueUse->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll) == 0);
+    CHECK(invoke(*code, 1ll) == 2);
+    CHECK(invoke(*code, std::numeric_limits::max()) == std::numeric_limits::max());
+}
+
+void testCheckSubImm()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 42);
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->append(arg1);
+    checkSub->append(arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isConstant());
+            CHECK(params[1].value() == 42);
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(42), FPRInfo::fpRegT1);
+            jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkSub));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == -42.0);
+    CHECK(invoke(*code, 1) == -41.0);
+    CHECK(invoke(*code, 42) == 0.0);
+    CHECK(invoke(*code, -2147483647) == -2147483689.0);
+}
+
+void testCheckSubBadImm()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    int32_t badImm = std::numeric_limits::min();
+    Value* arg2 = root->appendNew(proc, Origin(), badImm);
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->append(arg1);
+    checkSub->append(arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+
+            if (params[1].isConstant()) {
+                CHECK(params[1].value() == badImm);
+                jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(badImm), FPRInfo::fpRegT1);
+            } else {
+                CHECK(params[1].isGPR());
+                jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            }
+            jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkSub));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == -static_cast(badImm));
+    CHECK(invoke(*code, -1) == -static_cast(badImm) - 1);
+    CHECK(invoke(*code, 1) == -static_cast(badImm) + 1);
+    CHECK(invoke(*code, 42) == -static_cast(badImm) + 42);
+}
+
+void testCheckSub()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->append(arg1);
+    checkSub->append(arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkSub));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0, 42) == -42.0);
+    CHECK(invoke(*code, 1, 42) == -41.0);
+    CHECK(invoke(*code, 42, 42) == 0.0);
+    CHECK(invoke(*code, -2147483647, 42) == -2147483689.0);
+}
+
+NEVER_INLINE double doubleSub(double a, double b)
+{
+    return a - b;
+}
+
+void testCheckSub64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->append(arg1);
+    checkSub->append(arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkSub));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll, 42ll) == -42.0);
+    CHECK(invoke(*code, 1ll, 42ll) == -41.0);
+    CHECK(invoke(*code, 42ll, 42ll) == 0.0);
+    CHECK(invoke(*code, -9223372036854775807ll, 42ll) == doubleSub(static_cast(-9223372036854775807ll), 42.0));
+}
+
+void testCheckSubFold(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            CHECK(!"Should have been folded");
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkSub);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == a - b);
+}
+
+void testCheckSubFoldFail(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkSub);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == 42);
+}
+
+void testCheckNeg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), 0);
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    CheckValue* checkNeg = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkNeg->append(arg2);
+    checkNeg->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 1);
+            CHECK(params[0].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT1);
+            jit.negateDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkNeg));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == 0.0);
+    CHECK(invoke(*code, 1) == -1.0);
+    CHECK(invoke(*code, 42) == -42.0);
+    CHECK(invoke(*code, -2147483647 - 1) == 2147483648.0);
+}
+
+void testCheckNeg64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), 0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    CheckValue* checkNeg = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkNeg->append(arg2);
+    checkNeg->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 1);
+            CHECK(params[0].isGPR());
+            jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT1);
+            jit.negateDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkNeg));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll) == 0.0);
+    CHECK(invoke(*code, 1ll) == -1.0);
+    CHECK(invoke(*code, 42ll) == -42.0);
+    CHECK(invoke(*code, -9223372036854775807ll - 1) == 9223372036854775808.0);
+}
+
+void testCheckMul()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->append(arg1);
+    checkMul->append(arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkMul));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0, 42) == 0.0);
+    CHECK(invoke(*code, 1, 42) == 42.0);
+    CHECK(invoke(*code, 42, 42) == 42.0 * 42.0);
+    CHECK(invoke(*code, 2147483647, 42) == 2147483647.0 * 42.0);
+}
+
+void testCheckMulMemory()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    int left;
+    int right;
+    
+    Value* arg1 = root->appendNew(
+        proc, Load, Int32, Origin(),
+        root->appendNew(proc, Origin(), &left));
+    Value* arg2 = root->appendNew(
+        proc, Load, Int32, Origin(),
+        root->appendNew(proc, Origin(), &right));
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->append(arg1);
+    checkMul->append(arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkMul));
+
+    auto code = compile(proc);
+
+    left = 0;
+    right = 42;
+    CHECK(invoke(*code) == 0.0);
+    
+    left = 1;
+    right = 42;
+    CHECK(invoke(*code) == 42.0);
+
+    left = 42;
+    right = 42;
+    CHECK(invoke(*code) == 42.0 * 42.0);
+
+    left = 2147483647;
+    right = 42;
+    CHECK(invoke(*code) == 2147483647.0 * 42.0);
+}
+
+void testCheckMul2()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 2);
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->append(arg1);
+    checkMul->append(arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isConstant());
+            CHECK(params[1].value() == 2);
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(2), FPRInfo::fpRegT1);
+            jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkMul));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == 0.0);
+    CHECK(invoke(*code, 1) == 2.0);
+    CHECK(invoke(*code, 42) == 42.0 * 2.0);
+    CHECK(invoke(*code, 2147483647) == 2147483647.0 * 2.0);
+}
+
+void testCheckMul64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->append(arg1);
+    checkMul->append(arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkMul));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0, 42) == 0.0);
+    CHECK(invoke(*code, 1, 42) == 42.0);
+    CHECK(invoke(*code, 42, 42) == 42.0 * 42.0);
+    CHECK(invoke(*code, 9223372036854775807ll, 42) == static_cast(9223372036854775807ll) * 42.0);
+}
+
+void testCheckMulFold(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            CHECK(!"Should have been folded");
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkMul);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == a * b);
+}
+
+void testCheckMulFoldFail(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkMul);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == 42);
+}
+
+void testCheckMulArgumentAliasing64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg3 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+
+    // Pretend to use all the args.
+    PatchpointValue* useArgs = root->appendNew(proc, Void, Origin());
+    useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+    useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Last use of first arg (here, arg1).
+    CheckValue* checkMul1 = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Last use of second arg (here, arg2).
+    CheckValue* checkMul2 = root->appendNew(proc, CheckMul, Origin(), arg3, arg2);
+    checkMul2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Keep arg3 live.
+    PatchpointValue* keepArg2Live = root->appendNew(proc, Void, Origin());
+    keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Only use of checkMul1 and checkMul2.
+    CheckValue* checkMul3 = root->appendNew(proc, CheckMul, Origin(), checkMul1, checkMul2);
+    checkMul3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkMul3);
+
+    CHECK(compileAndRun(proc, 2, 3, 4) == 72);
+}
+
+void testCheckMulArgumentAliasing32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg3 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+
+    // Pretend to use all the args.
+    PatchpointValue* useArgs = root->appendNew(proc, Void, Origin());
+    useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+    useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Last use of first arg (here, arg1).
+    CheckValue* checkMul1 = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Last use of second arg (here, arg3).
+    CheckValue* checkMul2 = root->appendNew(proc, CheckMul, Origin(), arg2, arg3);
+    checkMul2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Keep arg3 live.
+    PatchpointValue* keepArg2Live = root->appendNew(proc, Void, Origin());
+    keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Only use of checkMul1 and checkMul2.
+    CheckValue* checkMul3 = root->appendNew(proc, CheckMul, Origin(), checkMul1, checkMul2);
+    checkMul3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkMul3);
+
+    CHECK(compileAndRun(proc, 2, 3, 4) == 72);
+}
+
+void testCheckMul64SShr()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, SShr, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), 1));
+    Value* arg2 = root->appendNew(
+        proc, SShr, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+        root->appendNew(proc, Origin(), 1));
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->append(arg1);
+    checkMul->append(arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkMul));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll, 42ll) == 0.0);
+    CHECK(invoke(*code, 1ll, 42ll) == 0.0);
+    CHECK(invoke(*code, 42ll, 42ll) == (42.0 / 2.0) * (42.0 / 2.0));
+    CHECK(invoke(*code, 10000000000ll, 10000000000ll) == 25000000000000000000.0);
+}
+
+template
+void genericTestCompare(
+    B3::Opcode opcode, const LeftFunctor& leftFunctor, const RightFunctor& rightFunctor,
+    InputType left, InputType right, int result)
+{
+    // Using a compare.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* leftValue = leftFunctor(root, proc);
+        Value* rightValue = rightFunctor(root, proc);
+        Value* comparisonResult = root->appendNew(proc, opcode, Origin(), leftValue, rightValue);
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, NotEqual, Origin(),
+                comparisonResult,
+                root->appendIntConstant(proc, Origin(), comparisonResult->type(), 0)));
+
+        CHECK(compileAndRun(proc, left, right) == result);
+    }
+    
+    // Using a branch.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        BasicBlock* thenCase = proc.addBlock();
+        BasicBlock* elseCase = proc.addBlock();
+
+        Value* leftValue = leftFunctor(root, proc);
+        Value* rightValue = rightFunctor(root, proc);
+
+        root->appendNewControlValue(
+            proc, Branch, Origin(),
+            root->appendNew(proc, opcode, Origin(), leftValue, rightValue),
+            FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+        // We use a patchpoint on the then case to ensure that this doesn't get if-converted.
+        PatchpointValue* patchpoint = thenCase->appendNew(proc, Int32, Origin());
+        patchpoint->setGenerator(
+            [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                AllowMacroScratchRegisterUsage allowScratch(jit);
+                CHECK(params.size() == 1);
+                CHECK(params[0].isGPR());
+                jit.move(CCallHelpers::TrustedImm32(1), params[0].gpr());
+            });
+        thenCase->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+        elseCase->appendNewControlValue(
+            proc, Return, Origin(),
+            elseCase->appendNew(proc, Origin(), 0));
+
+        CHECK(compileAndRun(proc, left, right) == result);
+    }
+}
+
+template
+InputType modelCompare(B3::Opcode opcode, InputType left, InputType right)
+{
+    switch (opcode) {
+    case Equal:
+        return left == right;
+    case NotEqual:
+        return left != right;
+    case LessThan:
+        return left < right;
+    case GreaterThan:
+        return left > right;
+    case LessEqual:
+        return left <= right;
+    case GreaterEqual:
+        return left >= right;
+    case Above:
+        return static_cast::type>(left) >
+            static_cast::type>(right);
+    case Below:
+        return static_cast::type>(left) <
+            static_cast::type>(right);
+    case AboveEqual:
+        return static_cast::type>(left) >=
+            static_cast::type>(right);
+    case BelowEqual:
+        return static_cast::type>(left) <=
+            static_cast::type>(right);
+    case BitAnd:
+        return !!(left & right);
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return 0;
+    }
+}
+
+template
+void testCompareLoad(B3::Opcode opcode, B3::Opcode loadOpcode, int left, int right)
+{
+    int result = modelCompare(opcode, modelLoad(left), right);
+    
+    // Test addr-to-tmp
+    int slot = left;
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, loadOpcode, Int32, Origin(),
+                block->appendNew(proc, Origin(), &slot));
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+        },
+        left, right, result);
+
+    // Test addr-to-imm
+    slot = left;
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, loadOpcode, Int32, Origin(),
+                block->appendNew(proc, Origin(), &slot));
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), right);
+        },
+        left, right, result);
+
+    result = modelCompare(opcode, left, modelLoad(right));
+    
+    // Test tmp-to-addr
+    slot = right;
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, loadOpcode, Int32, Origin(),
+                block->appendNew(proc, Origin(), &slot));
+        },
+        left, right, result);
+
+    // Test imm-to-addr
+    slot = right;
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), left);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, loadOpcode, Int32, Origin(),
+                block->appendNew(proc, Origin(), &slot));
+        },
+        left, right, result);
+
+    // Test addr-to-addr, with the same addr.
+    slot = left;
+    Value* value;
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            value = block->appendNew(
+                proc, loadOpcode, Int32, Origin(),
+                block->appendNew(proc, Origin(), &slot));
+            return value;
+        },
+        [&] (BasicBlock*, Procedure&) {
+            return value;
+        },
+        left, left, modelCompare(opcode, modelLoad(left), modelLoad(left)));
+}
+
+void testCompareImpl(B3::Opcode opcode, int64_t left, int64_t right)
+{
+    int64_t result = modelCompare(opcode, left, right);
+    int32_t int32Result = modelCompare(opcode, static_cast(left), static_cast(right));
+    
+    // Test tmp-to-tmp.
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        },
+        left, right, result);
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+        },
+        left, right, int32Result);
+
+    // Test imm-to-tmp.
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), left);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        },
+        left, right, result);
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), left);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+        },
+        left, right, int32Result);
+
+    // Test tmp-to-imm.
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), right);
+        },
+        left, right, result);
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), right);
+        },
+        left, right, int32Result);
+
+    // Test imm-to-imm.
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), left);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), right);
+        },
+        left, right, result);
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), left);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), right);
+        },
+        left, right, int32Result);
+
+    testCompareLoad(opcode, Load, left, right);
+    testCompareLoad(opcode, Load8S, left, right);
+    testCompareLoad(opcode, Load8Z, left, right);
+    testCompareLoad(opcode, Load16S, left, right);
+    testCompareLoad(opcode, Load16Z, left, right);
+}
+
+void testCompare(B3::Opcode opcode, int64_t left, int64_t right)
+{
+    testCompareImpl(opcode, left, right);
+    testCompareImpl(opcode, left, right + 1);
+    testCompareImpl(opcode, left, right - 1);
+}
+
+void testEqualDouble(double left, double right, bool result)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)));
+
+    CHECK(compileAndRun(proc, left, right) == result);
+}
+
+int simpleFunction(int a, int b)
+{
+    return a + b;
+}
+
+void testCallSimple(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Int32, Origin(),
+            root->appendNew(proc, Origin(), bitwise_cast(simpleFunction)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a + b);
+}
+
+void testCallRare(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* common = proc.addBlock();
+    BasicBlock* rare = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        FrequentedBlock(rare, FrequencyClass::Rare),
+        FrequentedBlock(common));
+
+    common->appendNewControlValue(
+        proc, Return, Origin(), common->appendNew(proc, Origin(), 0));
+    
+    rare->appendNewControlValue(
+        proc, Return, Origin(),
+        rare->appendNew(
+            proc, Int32, Origin(),
+            rare->appendNew(proc, Origin(), bitwise_cast(simpleFunction)),
+            rare->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            rare->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+    CHECK(compileAndRun(proc, true, a, b) == a + b);
+}
+
+void testCallRareLive(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* common = proc.addBlock();
+    BasicBlock* rare = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        FrequentedBlock(rare, FrequencyClass::Rare),
+        FrequentedBlock(common));
+
+    common->appendNewControlValue(
+        proc, Return, Origin(), common->appendNew(proc, Origin(), 0));
+    
+    rare->appendNewControlValue(
+        proc, Return, Origin(),
+        rare->appendNew(
+            proc, Add, Origin(),
+            rare->appendNew(
+                proc, Int32, Origin(),
+                rare->appendNew(proc, Origin(), bitwise_cast(simpleFunction)),
+                rare->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                rare->appendNew(proc, Origin(), GPRInfo::argumentGPR2)),
+            rare->appendNew(
+                proc, Trunc, Origin(),
+                rare->appendNew(proc, Origin(), GPRInfo::argumentGPR3))));
+
+    CHECK(compileAndRun(proc, true, a, b, c) == a + b + c);
+}
+
+void testCallSimplePure(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Int32, Origin(), Effects::none(),
+            root->appendNew(proc, Origin(), bitwise_cast(simpleFunction)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a + b);
+}
+
+int functionWithHellaArguments(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m, int n, int o, int p, int q, int r, int s, int t, int u, int v, int w, int x, int y, int z)
+{
+    return (a << 0) + (b << 1) + (c << 2) + (d << 3) + (e << 4) + (f << 5) + (g << 6) + (h << 7) + (i << 8) + (j << 9) + (k << 10) + (l << 11) + (m << 12) + (n << 13) + (o << 14) + (p << 15) + (q << 16) + (r << 17) + (s << 18) + (t << 19) + (u << 20) + (v << 21) + (w << 22) + (x << 23) + (y << 24) + (z << 25);
+}
+
+void testCallFunctionWithHellaArguments()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector args;
+    for (unsigned i = 0; i < 26; ++i)
+        args.append(root->appendNew(proc, Origin(), i + 1));
+
+    CCallValue* call = root->appendNew(
+        proc, Int32, Origin(),
+        root->appendNew(proc, Origin(), bitwise_cast(functionWithHellaArguments)));
+    call->children().appendVector(args);
+    
+    root->appendNewControlValue(proc, Return, Origin(), call);
+
+    CHECK(compileAndRun(proc) == functionWithHellaArguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26));
+}
+
+void testReturnDouble(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), value));
+
+    CHECK(isIdentical(compileAndRun(proc), value));
+}
+
+void testReturnFloat(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), value));
+
+    CHECK(isIdentical(compileAndRun(proc), value));
+}
+
+double simpleFunctionDouble(double a, double b)
+{
+    return a + b;
+}
+
+void testCallSimpleDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Double, Origin(),
+            root->appendNew(proc, Origin(), bitwise_cast(simpleFunctionDouble)),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a + b);
+}
+
+float simpleFunctionFloat(float a, float b)
+{
+    return a + b;
+}
+
+void testCallSimpleFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Float, Origin(),
+            root->appendNew(proc, Origin(), bitwise_cast(simpleFunctionFloat)),
+            floatValue1,
+            floatValue2));
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), a + b));
+}
+
+double functionWithHellaDoubleArguments(double a, double b, double c, double d, double e, double f, double g, double h, double i, double j, double k, double l, double m, double n, double o, double p, double q, double r, double s, double t, double u, double v, double w, double x, double y, double z)
+{
+    return a * pow(2, 0) + b * pow(2, 1) + c * pow(2, 2) + d * pow(2, 3) + e * pow(2, 4) + f * pow(2, 5) + g * pow(2, 6) + h * pow(2, 7) + i * pow(2, 8) + j * pow(2, 9) + k * pow(2, 10) + l * pow(2, 11) + m * pow(2, 12) + n * pow(2, 13) + o * pow(2, 14) + p * pow(2, 15) + q * pow(2, 16) + r * pow(2, 17) + s * pow(2, 18) + t * pow(2, 19) + u * pow(2, 20) + v * pow(2, 21) + w * pow(2, 22) + x * pow(2, 23) + y * pow(2, 24) + z * pow(2, 25);
+}
+
+void testCallFunctionWithHellaDoubleArguments()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector args;
+    for (unsigned i = 0; i < 26; ++i)
+        args.append(root->appendNew(proc, Origin(), i + 1));
+
+    CCallValue* call = root->appendNew(
+        proc, Double, Origin(),
+        root->appendNew(proc, Origin(), bitwise_cast(functionWithHellaDoubleArguments)));
+    call->children().appendVector(args);
+    
+    root->appendNewControlValue(proc, Return, Origin(), call);
+
+    CHECK(compileAndRun(proc) == functionWithHellaDoubleArguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26));
+}
+
+float functionWithHellaFloatArguments(float a, float b, float c, float d, float e, float f, float g, float h, float i, float j, float k, float l, float m, float n, float o, float p, float q, float r, float s, float t, float u, float v, float w, float x, float y, float z)
+{
+    return a * pow(2, 0) + b * pow(2, 1) + c * pow(2, 2) + d * pow(2, 3) + e * pow(2, 4) + f * pow(2, 5) + g * pow(2, 6) + h * pow(2, 7) + i * pow(2, 8) + j * pow(2, 9) + k * pow(2, 10) + l * pow(2, 11) + m * pow(2, 12) + n * pow(2, 13) + o * pow(2, 14) + p * pow(2, 15) + q * pow(2, 16) + r * pow(2, 17) + s * pow(2, 18) + t * pow(2, 19) + u * pow(2, 20) + v * pow(2, 21) + w * pow(2, 22) + x * pow(2, 23) + y * pow(2, 24) + z * pow(2, 25);
+}
+
+void testCallFunctionWithHellaFloatArguments()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector args;
+    for (unsigned i = 0; i < 26; ++i)
+        args.append(root->appendNew(proc, Origin(), i + 1));
+
+    CCallValue* call = root->appendNew(
+        proc, Float, Origin(),
+        root->appendNew(proc, Origin(), bitwise_cast(functionWithHellaFloatArguments)));
+    call->children().appendVector(args);
+    
+    root->appendNewControlValue(proc, Return, Origin(), call);
+
+    CHECK(compileAndRun(proc) == functionWithHellaFloatArguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26));
+}
+
+void testChillDiv(int num, int den, int res)
+{
+    // Test non-constant.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+        CHECK(compileAndRun(proc, num, den) == res);
+    }
+
+    // Test constant.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(proc, Origin(), num),
+                root->appendNew(proc, Origin(), den)));
+        
+        CHECK(compileAndRun(proc) == res);
+    }
+}
+
+void testChillDivTwice(int num1, int den1, int num2, int den2, int res)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)))));
+    
+    CHECK(compileAndRun(proc, num1, den1, num2, den2) == res);
+}
+
+void testChillDiv64(int64_t num, int64_t den, int64_t res)
+{
+    if (!is64Bit())
+        return;
+
+    // Test non-constant.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        
+        CHECK(compileAndRun(proc, num, den) == res);
+    }
+
+    // Test constant.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(proc, Origin(), num),
+                root->appendNew(proc, Origin(), den)));
+        
+        CHECK(compileAndRun(proc) == res);
+    }
+}
+
+void testModArg(int64_t value)
+{
+    if (!value)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* result = root->appendNew(proc, Mod, Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(!compileAndRun(proc, value));
+}
+
+void testModArgs(int64_t numerator, int64_t denominator)
+{
+    if (!denominator)
+        return;
+    if (numerator == std::numeric_limits::min() && denominator == -1)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* result = root->appendNew(proc, Mod, Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testModImms(int64_t numerator, int64_t denominator)
+{
+    if (!denominator)
+        return;
+    if (numerator == std::numeric_limits::min() && denominator == -1)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), numerator);
+    Value* argument2 = root->appendNew(proc, Origin(), denominator);
+    Value* result = root->appendNew(proc, Mod, Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testModArg32(int32_t value)
+{
+    if (!value)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* result = root->appendNew(proc, Mod, Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(!compileAndRun(proc, value));
+}
+
+void testModArgs32(int32_t numerator, int32_t denominator)
+{
+    if (!denominator)
+        return;
+    if (numerator == std::numeric_limits::min() && denominator == -1)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, Mod, Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testModImms32(int32_t numerator, int32_t denominator)
+{
+    if (!denominator)
+        return;
+    if (numerator == std::numeric_limits::min() && denominator == -1)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), numerator);
+    Value* argument2 = root->appendNew(proc, Origin(), denominator);
+    Value* result = root->appendNew(proc, Mod, Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testChillModArg(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(!compileAndRun(proc, value));
+}
+
+void testChillModArgs(int64_t numerator, int64_t denominator)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testChillModImms(int64_t numerator, int64_t denominator)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), numerator);
+    Value* argument2 = root->appendNew(proc, Origin(), denominator);
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testChillModArg32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(!compileAndRun(proc, value));
+}
+
+void testChillModArgs32(int32_t numerator, int32_t denominator)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testChillModImms32(int32_t numerator, int32_t denominator)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), numerator);
+    Value* argument2 = root->appendNew(proc, Origin(), denominator);
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testSwitch(unsigned degree, unsigned gap = 1)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    BasicBlock* terminate = proc.addBlock();
+    terminate->appendNewControlValue(
+        proc, Return, Origin(),
+        terminate->appendNew(proc, Origin(), 0));
+
+    SwitchValue* switchValue = root->appendNew(
+        proc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    switchValue->setFallThrough(FrequentedBlock(terminate));
+
+    for (unsigned i = 0; i < degree; ++i) {
+        BasicBlock* newBlock = proc.addBlock();
+        newBlock->appendNewControlValue(
+            proc, Return, Origin(),
+            newBlock->appendNew(
+                proc, Origin(), (i & 1) ? GPRInfo::argumentGPR2 : GPRInfo::argumentGPR1));
+        switchValue->appendCase(SwitchCase(gap * i, FrequentedBlock(newBlock)));
+    }
+
+    auto code = compile(proc);
+
+    for (unsigned i = 0; i < degree; ++i) {
+        CHECK(invoke(*code, i * gap, 42, 11) == ((i & 1) ? 11 : 42));
+        if (gap > 1) {
+            CHECK(!invoke(*code, i * gap + 1, 42, 11));
+            CHECK(!invoke(*code, i * gap - 1, 42, 11));
+        }
+    }
+
+    CHECK(!invoke(*code, -1, 42, 11));
+    CHECK(!invoke(*code, degree * gap, 42, 11));
+    CHECK(!invoke(*code, degree * gap + 1, 42, 11));
+}
+
+void testSwitchChillDiv(unsigned degree, unsigned gap = 1)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* left = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* right = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+
+    BasicBlock* terminate = proc.addBlock();
+    terminate->appendNewControlValue(
+        proc, Return, Origin(),
+        terminate->appendNew(proc, Origin(), 0));
+
+    SwitchValue* switchValue = root->appendNew(
+        proc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    switchValue->setFallThrough(FrequentedBlock(terminate));
+
+    for (unsigned i = 0; i < degree; ++i) {
+        BasicBlock* newBlock = proc.addBlock();
+
+        newBlock->appendNewControlValue(
+            proc, Return, Origin(),
+            newBlock->appendNew(
+                proc, chill(Div), Origin(), (i & 1) ? right : left, (i & 1) ? left : right));
+        
+        switchValue->appendCase(SwitchCase(gap * i, FrequentedBlock(newBlock)));
+    }
+
+    auto code = compile(proc);
+
+    for (unsigned i = 0; i < degree; ++i) {
+        dataLog("i = ", i, "\n");
+        int32_t result = invoke(*code, i * gap, 42, 11);
+        dataLog("result = ", result, "\n");
+        CHECK(result == ((i & 1) ? 11/42 : 42/11));
+        if (gap > 1) {
+            CHECK(!invoke(*code, i * gap + 1, 42, 11));
+            CHECK(!invoke(*code, i * gap - 1, 42, 11));
+        }
+    }
+
+    CHECK(!invoke(*code, -1, 42, 11));
+    CHECK(!invoke(*code, degree * gap, 42, 11));
+    CHECK(!invoke(*code, degree * gap + 1, 42, 11));
+}
+
+void testSwitchTargettingSameBlock()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    BasicBlock* terminate = proc.addBlock();
+    terminate->appendNewControlValue(
+        proc, Return, Origin(),
+        terminate->appendNew(proc, Origin(), 5));
+
+    SwitchValue* switchValue = root->appendNew(
+        proc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    switchValue->setFallThrough(FrequentedBlock(terminate));
+
+    BasicBlock* otherTarget = proc.addBlock();
+    otherTarget->appendNewControlValue(
+        proc, Return, Origin(),
+        otherTarget->appendNew(proc, Origin(), 42));
+    switchValue->appendCase(SwitchCase(3, FrequentedBlock(otherTarget)));
+    switchValue->appendCase(SwitchCase(13, FrequentedBlock(otherTarget)));
+
+    auto code = compile(proc);
+
+    for (unsigned i = 0; i < 20; ++i) {
+        int32_t expected = (i == 3 || i == 13) ? 42 : 5;
+        CHECK(invoke(*code, i) == expected);
+    }
+}
+
+void testSwitchTargettingSameBlockFoldPathConstant()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    BasicBlock* terminate = proc.addBlock();
+    terminate->appendNewControlValue(
+        proc, Return, Origin(),
+        terminate->appendNew(proc, Origin(), 42));
+
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    SwitchValue* switchValue = root->appendNew(proc, Origin(), argument);
+    switchValue->setFallThrough(FrequentedBlock(terminate));
+
+    BasicBlock* otherTarget = proc.addBlock();
+    otherTarget->appendNewControlValue(
+        proc, Return, Origin(), argument);
+    switchValue->appendCase(SwitchCase(3, FrequentedBlock(otherTarget)));
+    switchValue->appendCase(SwitchCase(13, FrequentedBlock(otherTarget)));
+
+    auto code = compile(proc);
+
+    for (unsigned i = 0; i < 20; ++i) {
+        int32_t expected = (i == 3 || i == 13) ? i : 42;
+        CHECK(invoke(*code, i) == expected);
+    }
+}
+
+void testTruncFold(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), value)));
+
+    CHECK(compileAndRun(proc) == static_cast(value));
+}
+
+void testZExt32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZExt32, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testZExt32Fold(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZExt32, Origin(),
+            root->appendNew(proc, Origin(), value)));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt32, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value));
+}
+
+void testSExt32Fold(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt32, Origin(),
+            root->appendNew(proc, Origin(), value)));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value));
+}
+
+void testTruncZExt32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(
+                proc, ZExt32, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == value);
+}
+
+void testTruncSExt32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(
+                proc, SExt32, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == value);
+}
+
+void testSExt8(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt8, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt8Fold(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt8, Origin(),
+            root->appendNew(proc, Origin(), value)));
+
+    CHECK(compileAndRun(proc) == static_cast(static_cast(value)));
+}
+
+void testSExt8SExt8(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt8, Origin(),
+            root->appendNew(
+                proc, SExt8, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt8SExt16(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt8, Origin(),
+            root->appendNew(
+                proc, SExt16, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt8BitAnd(int32_t value, int32_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt8, Origin(),
+            root->appendNew(
+                proc, BitAnd, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), mask))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value & mask)));
+}
+
+void testBitAndSExt8(int32_t value, int32_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, SExt8, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+            root->appendNew(proc, Origin(), mask)));
+
+    CHECK(compileAndRun(proc, value) == (static_cast(static_cast(value)) & mask));
+}
+
+void testSExt16(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt16, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt16Fold(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt16, Origin(),
+            root->appendNew(proc, Origin(), value)));
+
+    CHECK(compileAndRun(proc) == static_cast(static_cast(value)));
+}
+
+void testSExt16SExt16(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt16, Origin(),
+            root->appendNew(
+                proc, SExt16, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt16SExt8(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt16, Origin(),
+            root->appendNew(
+                proc, SExt8, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt16BitAnd(int32_t value, int32_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt16, Origin(),
+            root->appendNew(
+                proc, BitAnd, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), mask))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value & mask)));
+}
+
+void testBitAndSExt16(int32_t value, int32_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, SExt16, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+            root->appendNew(proc, Origin(), mask)));
+
+    CHECK(compileAndRun(proc, value) == (static_cast(static_cast(value)) & mask));
+}
+
+void testSExt32BitAnd(int32_t value, int32_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt32, Origin(),
+            root->appendNew(
+                proc, BitAnd, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), mask))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value & mask));
+}
+
+void testBitAndSExt32(int32_t value, int64_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, SExt32, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+            root->appendNew(proc, Origin(), mask)));
+
+    CHECK(compileAndRun(proc, value) == (static_cast(value) & mask));
+}
+
+void testBasicSelect()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 42)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42, 1, 2) == 1);
+    CHECK(invoke(*code, 42, 642462, 32533) == 642462);
+    CHECK(invoke(*code, 43, 1, 2) == 2);
+    CHECK(invoke(*code, 43, 642462, 32533) == 32533);
+}
+
+void testSelectTest()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42, 1, 2) == 1);
+    CHECK(invoke(*code, 42, 642462, 32533) == 642462);
+    CHECK(invoke(*code, 0, 1, 2) == 2);
+    CHECK(invoke(*code, 0, 642462, 32533) == 32533);
+}
+
+void testSelectCompareDouble()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, LessThan, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, -1.0, 1.0, 1, 2) == 1);
+    CHECK(invoke(*code, 42.5, 42.51, 642462, 32533) == 642462);
+    CHECK(invoke(*code, PNaN, 0.0, 1, 2) == 2);
+    CHECK(invoke(*code, 42.51, 42.5, 642462, 32533) == 32533);
+    CHECK(invoke(*code, 42.52, 42.52, 524978245, 352) == 352);
+}
+
+template
+void testSelectCompareFloat(float a, float b, bool (*operation)(float, float))
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, opcode, Origin(),
+                floatValue1,
+                floatValue2),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), 42, -5), operation(a, b) ? 42 : -5));
+}
+
+void testSelectCompareFloat(float a, float b)
+{
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a == b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a != b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a < b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a > b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a <= b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a >= b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a != a || b != b || a == b; });
+}
+
+template
+void testSelectCompareFloatToDouble(float a, float b, bool (*operation)(float, float))
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* doubleValue1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* doubleValue2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, opcode, Origin(),
+                doubleValue1,
+                doubleValue2),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), 42, -5), operation(a, b) ? 42 : -5));
+}
+
+void testSelectCompareFloatToDouble(float a, float b)
+{
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a == b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a != b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a < b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a > b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a <= b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a >= b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a != a || b != b || a == b; });
+}
+
+void testSelectDouble()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 42)),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42, 1.5, 2.6) == 1.5);
+    CHECK(invoke(*code, 42, 642462.7, 32533.8) == 642462.7);
+    CHECK(invoke(*code, 43, 1.9, 2.0) == 2.0);
+    CHECK(invoke(*code, 43, 642462.1, 32533.2) == 32533.2);
+}
+
+void testSelectDoubleTest()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42, 1.5, 2.6) == 1.5);
+    CHECK(invoke(*code, 42, 642462.7, 32533.8) == 642462.7);
+    CHECK(invoke(*code, 0, 1.9, 2.0) == 2.0);
+    CHECK(invoke(*code, 0, 642462.1, 32533.2) == 32533.2);
+}
+
+void testSelectDoubleCompareDouble()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, LessThan, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR2),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR3)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, -1.0, 1.0, 1.1, 2.2) == 1.1);
+    CHECK(invoke(*code, 42.5, 42.51, 642462.3, 32533.4) == 642462.3);
+    CHECK(invoke(*code, PNaN, 0.0, 1.5, 2.6) == 2.6);
+    CHECK(invoke(*code, 42.51, 42.5, 642462.7, 32533.8) == 32533.8);
+    CHECK(invoke(*code, 42.52, 42.52, 524978245.9, 352.0) == 352.0);
+}
+
+void testSelectDoubleCompareFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, LessThan, Origin(),
+                floatValue1,
+                floatValue2),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)));
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), 42.1, -M_PI), a < b ? 42.1 : -M_PI));
+}
+
+void testSelectFloatCompareFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* argument3int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+    Value* argument4int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR3));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* floatValue3 = root->appendNew(proc, BitwiseCast, Origin(), argument3int32);
+    Value* floatValue4 = root->appendNew(proc, BitwiseCast, Origin(), argument4int32);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, LessThan, Origin(),
+                floatValue1,
+                floatValue2),
+            floatValue3,
+            floatValue4));
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), bitwise_cast(1.1f), bitwise_cast(-42.f)), a < b ? 1.1f : -42.f));
+}
+
+
+template
+void testSelectDoubleCompareDouble(bool (*operation)(double, double))
+{
+    { // Compare arguments and selected arguments are all different.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+        Value* arg3 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR3);
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, opcode, Origin(),
+                    arg0,
+                    arg1),
+                arg2,
+                arg3));
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, -66.5), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. "thenCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+        Value* arg3 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR3);
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, -66.5), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. "elseCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+        Value* arg3 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR3);
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, -66.5), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. Both cases are live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+        Value* arg3 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR3);
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, -66.5), expected));
+            }
+        }
+    }
+    { // The left argument is the same as the "elseCase" argument.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, opcode, Origin(),
+                    arg0,
+                    arg1),
+                arg2,
+                arg0));
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : left.value;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, left.value), expected));
+            }
+        }
+    }
+    { // The left argument is the same as the "elseCase" argument. "thenCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg0);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : left.value;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, left.value), expected));
+            }
+        }
+    }
+}
+
+void testSelectDoubleCompareDoubleWithAliasing()
+{
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a == b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a != b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a < b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a > b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a <= b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a >= b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a != a || b != b || a == b; });
+}
+
+template
+void testSelectFloatCompareFloat(bool (*operation)(float, float))
+{
+    { // Compare arguments and selected arguments are all different.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+        Value* arg3 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, opcode, Origin(),
+                    arg0,
+                    arg1),
+                arg2,
+                arg3));
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(-66.5f)), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. "thenCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+        Value* arg3 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(-66.5f)), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. "elseCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+        Value* arg3 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(-66.5f)), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. Both cases are live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+        Value* arg3 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(-66.5f)), expected));
+            }
+        }
+    }
+    { // The left argument is the same as the "elseCase" argument.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, opcode, Origin(),
+                    arg0,
+                    arg1),
+                arg2,
+                arg0));
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : left.value;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(left.value)), expected));
+            }
+        }
+    }
+    { // The left argument is the same as the "elseCase" argument. "thenCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg0);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : left.value;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(left.value)), expected));
+            }
+        }
+    }
+}
+
+void testSelectFloatCompareFloatWithAliasing()
+{
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a == b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a != b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a < b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a > b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a <= b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a >= b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a != a || b != b || a == b; });
+}
+
+void testSelectFold(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(proc, Origin(), value),
+                root->appendNew(proc, Origin(), 42)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 1, 2) == (value == 42 ? 1 : 2));
+    CHECK(invoke(*code, 642462, 32533) == (value == 42 ? 642462 : 32533));
+}
+
+void testSelectInvert()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(
+                    proc, NotEqual, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                    root->appendNew(proc, Origin(), 42)),
+                root->appendNew(proc, Origin(), 0)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42, 1, 2) == 1);
+    CHECK(invoke(*code, 42, 642462, 32533) == 642462);
+    CHECK(invoke(*code, 43, 1, 2) == 2);
+    CHECK(invoke(*code, 43, 642462, 32533) == 32533);
+}
+
+void testCheckSelect()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, BitAnd, Origin(),
+                    root->appendNew(
+                        proc, Trunc, Origin(),
+                        root->appendNew(
+                            proc, Origin(), GPRInfo::argumentGPR0)),
+                    root->appendNew(proc, Origin(), 0xff)),
+                root->appendNew(proc, Origin(), -42),
+                root->appendNew(proc, Origin(), 35)),
+            root->appendNew(proc, Origin(), 42)));
+    unsigned generationCount = 0;
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+
+            generationCount++;
+            jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(generationCount == 1);
+    CHECK(invoke(*code, true) == 0);
+    CHECK(invoke(*code, false) == 666);
+}
+
+void testCheckSelectCheckSelect()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, BitAnd, Origin(),
+                    root->appendNew(
+                        proc, Trunc, Origin(),
+                        root->appendNew(
+                            proc, Origin(), GPRInfo::argumentGPR0)),
+                    root->appendNew(proc, Origin(), 0xff)),
+                root->appendNew(proc, Origin(), -42),
+                root->appendNew(proc, Origin(), 35)),
+            root->appendNew(proc, Origin(), 42)));
+
+    unsigned generationCount = 0;
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+
+            generationCount++;
+            jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    
+    CheckValue* check2 = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, BitAnd, Origin(),
+                    root->appendNew(
+                        proc, Trunc, Origin(),
+                        root->appendNew(
+                            proc, Origin(), GPRInfo::argumentGPR1)),
+                    root->appendNew(proc, Origin(), 0xff)),
+                root->appendNew(proc, Origin(), -43),
+                root->appendNew(proc, Origin(), 36)),
+            root->appendNew(proc, Origin(), 43)));
+
+    unsigned generationCount2 = 0;
+    check2->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+
+            generationCount2++;
+            jit.move(CCallHelpers::TrustedImm32(667), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(generationCount == 1);
+    CHECK(generationCount2 == 1);
+    CHECK(invoke(*code, true, true) == 0);
+    CHECK(invoke(*code, false, true) == 666);
+    CHECK(invoke(*code, true, false) == 667);
+}
+
+void testCheckSelectAndCSE()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    auto* selectValue = root->appendNew(
+        proc, Select, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(
+                    proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), 0xff)),
+        root->appendNew(proc, Origin(), -42),
+        root->appendNew(proc, Origin(), 35));
+
+    auto* constant = root->appendNew(proc, Origin(), 42);
+    auto* addValue = root->appendNew(proc, Add, Origin(), selectValue, constant);
+
+    CheckValue* check = root->appendNew(proc, Check, Origin(), addValue);
+    unsigned generationCount = 0;
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+
+            generationCount++;
+            jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+
+    auto* addValue2 = root->appendNew(proc, Add, Origin(), selectValue, constant);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), addValue, addValue2));
+
+    auto code = compile(proc);
+    CHECK(generationCount == 1);
+    CHECK(invoke(*code, true) == 0);
+    CHECK(invoke(*code, false) == 666);
+}
+
+double b3Pow(double x, int y)
+{
+    if (y < 0 || y > 1000)
+        return pow(x, y);
+    double result = 1;
+    while (y) {
+        if (y & 1)
+            result *= x;
+        x *= x;
+        y >>= 1;
+    }
+    return result;
+}
+
+void testPowDoubleByIntegerLoop(double xOperand, int32_t yOperand)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* x = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* y = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    auto result = powDoubleInt32(proc, root, Origin(), x, y);
+    BasicBlock* continuation = result.first;
+    continuation->appendNewControlValue(proc, Return, Origin(), result.second);
+
+    CHECK(isIdentical(compileAndRun(proc, xOperand, yOperand), b3Pow(xOperand, yOperand)));
+}
+
+void testTruncOrHigh()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(
+                proc, BitOr, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 0x100000000))));
+
+    int64_t value = 0x123456781234;
+    CHECK(compileAndRun(proc, value) == 0x56781234);
+}
+
+void testTruncOrLow()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(
+                proc, BitOr, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 0x1000000))));
+
+    int64_t value = 0x123456781234;
+    CHECK(compileAndRun(proc, value) == 0x57781234);
+}
+
+void testBitAndOrHigh()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, BitOr, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 0x8)),
+            root->appendNew(proc, Origin(), 0x777777777777)));
+
+    int64_t value = 0x123456781234;
+    CHECK(compileAndRun(proc, value) == 0x123456701234ll);
+}
+
+void testBitAndOrLow()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, BitOr, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 0x1)),
+            root->appendNew(proc, Origin(), 0x777777777777)));
+
+    int64_t value = 0x123456781234;
+    CHECK(compileAndRun(proc, value) == 0x123456701235ll);
+}
+
+void testBranch64Equal(int64_t left, int64_t right)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Equal, Origin(), arg1, arg2),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    bool trueResult = true;
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(
+            proc, Load8Z, Origin(),
+            thenCase->appendNew(proc, Origin(), &trueResult)));
+
+    bool elseResult = false;
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(
+            proc, Load8Z, Origin(),
+            elseCase->appendNew(proc, Origin(), &elseResult)));
+
+    CHECK(compileAndRun(proc, left, right) == (left == right));
+}
+
+void testBranch64EqualImm(int64_t left, int64_t right)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), right);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Equal, Origin(), arg1, arg2),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    bool trueResult = true;
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(
+            proc, Load8Z, Origin(),
+            thenCase->appendNew(proc, Origin(), &trueResult)));
+
+    bool elseResult = false;
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(
+            proc, Load8Z, Origin(),
+            elseCase->appendNew(proc, Origin(), &elseResult)));
+
+    CHECK(compileAndRun(proc, left) == (left == right));
+}
+
+void testBranch64EqualMem(int64_t left, int64_t right)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* arg1 = root->appendNew(
+        proc, Load, pointerType(), Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Equal, Origin(), arg1, arg2),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    bool trueResult = true;
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(
+            proc, Load8Z, Origin(),
+            thenCase->appendNew(proc, Origin(), &trueResult)));
+
+    bool elseResult = false;
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(
+            proc, Load8Z, Origin(),
+            elseCase->appendNew(proc, Origin(), &elseResult)));
+
+    CHECK(compileAndRun(proc, &left, right) == (left == right));
+}
+
+void testBranch64EqualMemImm(int64_t left, int64_t right)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* arg1 = root->appendNew(
+        proc, Load, pointerType(), Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), right);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Equal, Origin(), arg1, arg2),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    bool trueResult = true;
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(
+            proc, Load8Z, Origin(),
+            thenCase->appendNew(proc, Origin(), &trueResult)));
+
+    bool elseResult = false;
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(
+            proc, Load8Z, Origin(),
+            elseCase->appendNew(proc, Origin(), &elseResult)));
+
+    CHECK(compileAndRun(proc, &left) == (left == right));
+}
+
+void testStore8Load8Z(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    
+    int8_t byte;
+    Value* ptr = root->appendNew(proc, Origin(), &byte);
+    
+    root->appendNew(
+        proc, Store8, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        ptr);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Load8Z, Origin(), ptr));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value));
+}
+
+void testStore16Load16Z(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    
+    int16_t byte;
+    Value* ptr = root->appendNew(proc, Origin(), &byte);
+    
+    root->appendNew(
+        proc, Store16, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        ptr);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Load16Z, Origin(), ptr));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value));
+}
+
+void testSShrShl32(int32_t value, int32_t sshrAmount, int32_t shlAmount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(
+                proc, Shl, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), shlAmount)),
+            root->appendNew(proc, Origin(), sshrAmount)));
+
+    CHECK(
+        compileAndRun(proc, value)
+        == ((value << (shlAmount & 31)) >> (sshrAmount & 31)));
+}
+
+void testSShrShl64(int64_t value, int32_t sshrAmount, int32_t shlAmount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(
+                proc, Shl, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), shlAmount)),
+            root->appendNew(proc, Origin(), sshrAmount)));
+
+    CHECK(
+        compileAndRun(proc, value)
+        == ((value << (shlAmount & 63)) >> (sshrAmount & 63)));
+}
+
+template
+void testRotR(T valueInt, int32_t shift)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    if (sizeof(T) == 4)
+        value = root->appendNew(proc, Trunc, Origin(), value);
+
+    Value* ammount = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    root->appendNewControlValue(proc, Return, Origin(),
+        root->appendNew(proc, RotR, Origin(), value, ammount));
+
+    CHECK_EQ(compileAndRun(proc, valueInt, shift), rotateRight(valueInt, shift));
+}
+
+template
+void testRotL(T valueInt, int32_t shift)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    if (sizeof(T) == 4)
+        value = root->appendNew(proc, Trunc, Origin(), value);
+
+    Value* ammount = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    root->appendNewControlValue(proc, Return, Origin(),
+        root->appendNew(proc, RotL, Origin(), value, ammount));
+
+    CHECK_EQ(compileAndRun(proc, valueInt, shift), rotateLeft(valueInt, shift));
+}
+
+template
+void testRotRWithImmShift(T valueInt, int32_t shift)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    if (sizeof(T) == 4)
+        value = root->appendNew(proc, Trunc, Origin(), value);
+
+    Value* ammount = root->appendIntConstant(proc, Origin(), Int32, shift);
+    root->appendNewControlValue(proc, Return, Origin(),
+        root->appendNew(proc, RotR, Origin(), value, ammount));
+
+    CHECK_EQ(compileAndRun(proc, valueInt, shift), rotateRight(valueInt, shift));
+}
+
+template
+void testRotLWithImmShift(T valueInt, int32_t shift)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    if (sizeof(T) == 4)
+        value = root->appendNew(proc, Trunc, Origin(), value);
+
+    Value* ammount = root->appendIntConstant(proc, Origin(), Int32, shift);
+    root->appendNewControlValue(proc, Return, Origin(),
+        root->appendNew(proc, RotL, Origin(), value, ammount));
+
+    CHECK_EQ(compileAndRun(proc, valueInt, shift), rotateLeft(valueInt, shift));
+}
+
+template
+void testComputeDivisionMagic(T value, T magicMultiplier, unsigned shift)
+{
+    DivisionMagic magic = computeDivisionMagic(value);
+    CHECK(magic.magicMultiplier == magicMultiplier);
+    CHECK(magic.shift == shift);
+}
+
+void testTrivialInfiniteLoop()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* loop = proc.addBlock();
+    root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+    loop->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+
+    compile(proc);
+}
+
+void testFoldPathEqual()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenBlock = proc.addBlock();
+    BasicBlock* elseBlock = proc.addBlock();
+
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(), arg, FrequentedBlock(thenBlock), FrequentedBlock(elseBlock));
+
+    thenBlock->appendNewControlValue(
+        proc, Return, Origin(),
+        thenBlock->appendNew(
+            proc, Equal, Origin(), arg, thenBlock->appendNew(proc, Origin(), 0)));
+
+    elseBlock->appendNewControlValue(
+        proc, Return, Origin(),
+        elseBlock->appendNew(
+            proc, Equal, Origin(), arg, elseBlock->appendNew(proc, Origin(), 0)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 0) == 1);
+    CHECK(invoke(*code, 1) == 0);
+    CHECK(invoke(*code, 42) == 0);
+}
+
+void testLShiftSelf32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Shl, Origin(), arg, arg));
+
+    auto code = compile(proc);
+
+    auto check = [&] (int32_t value) {
+        CHECK(invoke(*code, value) == value << (value & 31));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+}
+
+void testRShiftSelf32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, SShr, Origin(), arg, arg));
+
+    auto code = compile(proc);
+
+    auto check = [&] (int32_t value) {
+        CHECK(invoke(*code, value) == value >> (value & 31));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+}
+
+void testURShiftSelf32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, ZShr, Origin(), arg, arg));
+
+    auto code = compile(proc);
+
+    auto check = [&] (uint32_t value) {
+        CHECK(invoke(*code, value) == value >> (value & 31));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+}
+
+void testLShiftSelf64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(), arg, root->appendNew(proc, Trunc, Origin(), arg)));
+
+    auto code = compile(proc);
+
+    auto check = [&] (int64_t value) {
+        CHECK(invoke(*code, value) == value << (value & 63));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+    check(63);
+    check(64);
+}
+
+void testRShiftSelf64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(), arg, root->appendNew(proc, Trunc, Origin(), arg)));
+
+    auto code = compile(proc);
+
+    auto check = [&] (int64_t value) {
+        CHECK(invoke(*code, value) == value >> (value & 63));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+    check(63);
+    check(64);
+}
+
+void testURShiftSelf64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(), arg, root->appendNew(proc, Trunc, Origin(), arg)));
+
+    auto code = compile(proc);
+
+    auto check = [&] (uint64_t value) {
+        CHECK(invoke(*code, value) == value >> (value & 63));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+    check(63);
+    check(64);
+}
+
+void testPatchpointDoubleRegs()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    
+    PatchpointValue* patchpoint = root->appendNew(proc, Double, Origin());
+    patchpoint->append(arg, ValueRep(FPRInfo::fpRegT0));
+    patchpoint->resultConstraint = ValueRep(FPRInfo::fpRegT0);
+
+    unsigned numCalls = 0;
+    patchpoint->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            numCalls++;
+        });
+
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    auto code = compile(proc);
+    CHECK(numCalls == 1);
+    CHECK(invoke(*code, 42.5) == 42.5);
+}
+
+void testSpillDefSmallerThanUse()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    // Move32.
+    Value* arg32 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg64 = root->appendNew(proc, ZExt32, Origin(), arg32);
+
+    // Make sure arg64 is on the stack.
+    PatchpointValue* forceSpill = root->appendNew(proc, Int64, Origin());
+    RegisterSet clobberSet = RegisterSet::allGPRs();
+    clobberSet.exclude(RegisterSet::stackRegisters());
+    clobberSet.exclude(RegisterSet::reservedHardwareRegisters());
+    clobberSet.clear(GPRInfo::returnValueGPR); // Force the return value for aliasing below.
+    forceSpill->clobberLate(clobberSet);
+    forceSpill->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.xor64(params[0].gpr(), params[0].gpr());
+        });
+
+    // On x86, Sub admit an address for any operand. If it uses the stack, the top bits must be zero.
+    Value* result = root->appendNew(proc, Sub, Origin(), forceSpill, arg64);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 0xffffffff00000000) == 0);
+}
+
+void testSpillUseLargerThanDef()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    RegisterSet clobberSet = RegisterSet::allGPRs();
+    clobberSet.exclude(RegisterSet::stackRegisters());
+    clobberSet.exclude(RegisterSet::reservedHardwareRegisters());
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            condition),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    Value* truncated = thenCase->appendNew(proc, ZExt32, Origin(),
+        thenCase->appendNew(proc, Trunc, Origin(), argument));
+    UpsilonValue* thenResult = thenCase->appendNew(proc, Origin(), truncated);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    UpsilonValue* elseResult = elseCase->appendNew(proc, Origin(), argument);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    for (unsigned i = 0; i < 100; ++i) {
+        PatchpointValue* preventTailDuplication = tail->appendNew(proc, Void, Origin());
+        preventTailDuplication->clobberLate(clobberSet);
+        preventTailDuplication->setGenerator([] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
+    PatchpointValue* forceSpill = tail->appendNew(proc, Void, Origin());
+    forceSpill->clobberLate(clobberSet);
+    forceSpill->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            clobberSet.forEach([&] (Reg reg) {
+                jit.move(CCallHelpers::TrustedImm64(0xffffffffffffffff), reg.gpr());
+            });
+        });
+
+    Value* phi = tail->appendNew(proc, Phi, Int64, Origin());
+    thenResult->setPhi(phi);
+    elseResult->setPhi(phi);
+    tail->appendNewControlValue(proc, Return, Origin(), phi);
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 1, 0xffffffff00000000) == 0);
+    CHECK(invoke(*code, 0, 0xffffffff00000000) == 0xffffffff00000000);
+
+    // A second time since the previous run is still on the stack.
+    CHECK(invoke(*code, 1, 0xffffffff00000000) == 0);
+
+}
+
+void testLateRegister()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    // This works by making all but 1 register be input to the first patchpoint as LateRegister.
+    // The other 1 register is just a regular Register input. We assert our result is the regular
+    // register input. There would be no other way for the register allocator to arrange things
+    // because LateRegister interferes with the result.
+    // Then, the second patchpoint takes the result of the first as an argument and asks for
+    // it in a register that was a LateRegister. This is to incentivize the register allocator
+    // to use that LateRegister as the result for the first patchpoint. But of course it can not do that.
+    // So it must issue a mov after the first patchpoint from the first's result into the second's input.
+
+    RegisterSet regs = RegisterSet::allGPRs();
+    regs.exclude(RegisterSet::stackRegisters());
+    regs.exclude(RegisterSet::reservedHardwareRegisters());
+    Vector lateUseArgs;
+    unsigned result = 0;
+    for (GPRReg reg = CCallHelpers::firstRegister(); reg <= CCallHelpers::lastRegister(); reg = CCallHelpers::nextRegister(reg)) {
+        if (!regs.get(reg))
+            continue;
+        result++;
+        if (reg == GPRInfo::regT0)
+            continue;
+        Value* value = root->appendNew(proc, Origin(), 1);
+        lateUseArgs.append(value);
+    }
+    Value* regularUse = root->appendNew(proc, Origin(), 1);
+    PatchpointValue* firstPatchpoint = root->appendNew(proc, Int64, Origin());
+    {
+        unsigned i = 0;
+        for (GPRReg reg = CCallHelpers::firstRegister(); reg <= CCallHelpers::lastRegister(); reg = CCallHelpers::nextRegister(reg)) {
+            if (!regs.get(reg))
+                continue;
+            if (reg == GPRInfo::regT0)
+                continue;
+            Value* value = lateUseArgs[i++];
+            firstPatchpoint->append(value, ValueRep::lateReg(reg));
+        }
+        firstPatchpoint->append(regularUse, ValueRep::reg(GPRInfo::regT0));
+    }
+
+    firstPatchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params[0].gpr() == GPRInfo::regT0);
+            // Note that regT0 should also start off as 1, so we're implicitly starting our add with 1, which is also an argument.
+            unsigned skipped = 0;
+            for (unsigned i = 1; i < params.size(); i++) {
+                if (params[i].gpr() == params[0].gpr()) {
+                    skipped = i;
+                    continue;
+                }
+                jit.add64(params[i].gpr(), params[0].gpr());
+            }
+            CHECK(!!skipped);
+        });
+
+    PatchpointValue* secondPatchpoint = root->appendNew(proc, Int64, Origin());
+    secondPatchpoint->append(firstPatchpoint, ValueRep::reg(GPRInfo::regT1));
+    secondPatchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params[1].gpr() == GPRInfo::regT1);
+            jit.nop();
+            jit.nop();
+            jit.move(params[1].gpr(), params[0].gpr());
+            jit.nop();
+            jit.nop();
+        });
+    root->appendNewControlValue(proc, Return, Origin(), secondPatchpoint);
+    
+    auto code = compile(proc);
+    CHECK(invoke(*code) == result);
+}
+
+void interpreterPrint(Vector* stream, intptr_t value)
+{
+    stream->append(value);
+}
+
+void testInterpreter()
+{
+    // This implements a silly interpreter to test building custom switch statements using
+    // Patchpoint.
+    
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* dispatch = proc.addBlock();
+    BasicBlock* addToDataPointer = proc.addBlock();
+    BasicBlock* addToCodePointer = proc.addBlock();
+    BasicBlock* addToCodePointerTaken = proc.addBlock();
+    BasicBlock* addToCodePointerNotTaken = proc.addBlock();
+    BasicBlock* addToData = proc.addBlock();
+    BasicBlock* print = proc.addBlock();
+    BasicBlock* stop = proc.addBlock();
+    
+    Variable* dataPointer = proc.addVariable(pointerType());
+    Variable* codePointer = proc.addVariable(pointerType());
+    
+    root->appendNew(
+        proc, Set, Origin(), dataPointer,
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNew(
+        proc, Set, Origin(), codePointer,
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* context = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+
+    // NOTE: It's totally valid for this patchpoint to be tail-duplicated.
+    Value* codePointerValue =
+        dispatch->appendNew(proc, B3::Get, Origin(), codePointer);
+    Value* opcode = dispatch->appendNew(
+        proc, Load, pointerType(), Origin(), codePointerValue);
+    PatchpointValue* polyJump = dispatch->appendNew(proc, Void, Origin());
+    polyJump->effects = Effects();
+    polyJump->effects.terminal = true;
+    polyJump->appendSomeRegister(opcode);
+    polyJump->clobber(RegisterSet::macroScratchRegisters());
+    polyJump->numGPScratchRegisters++;
+    dispatch->appendSuccessor(FrequentedBlock(addToDataPointer));
+    dispatch->appendSuccessor(FrequentedBlock(addToCodePointer));
+    dispatch->appendSuccessor(FrequentedBlock(addToData));
+    dispatch->appendSuccessor(FrequentedBlock(print));
+    dispatch->appendSuccessor(FrequentedBlock(stop));
+    
+    // Our "opcodes".
+    static const intptr_t AddDP = 0;
+    static const intptr_t AddCP = 1;
+    static const intptr_t Add = 2;
+    static const intptr_t Print = 3;
+    static const intptr_t Stop = 4;
+    
+    polyJump->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            Vector> labels = params.successorLabels();
+
+            MacroAssemblerCodePtr* jumpTable = bitwise_cast(
+                params.proc().addDataSection(sizeof(MacroAssemblerCodePtr) * labels.size()));
+
+            jit.move(CCallHelpers::TrustedImmPtr(jumpTable), params.gpScratch(0));
+            jit.jump(CCallHelpers::BaseIndex(params.gpScratch(0), params[0].gpr(), CCallHelpers::timesPtr()));
+            
+            jit.addLinkTask(
+                [&, jumpTable, labels] (LinkBuffer& linkBuffer) {
+                    for (unsigned i = labels.size(); i--;)
+                        jumpTable[i] = linkBuffer.locationOf(*labels[i]);
+                });
+        });
+    
+    // AddDP : adds  to DP.
+    codePointerValue =
+        addToDataPointer->appendNew(proc, B3::Get, Origin(), codePointer);
+    addToDataPointer->appendNew(
+        proc, Set, Origin(), dataPointer,
+        addToDataPointer->appendNew(
+            proc, B3::Add, Origin(),
+            addToDataPointer->appendNew(proc, B3::Get, Origin(), dataPointer),
+            addToDataPointer->appendNew(
+                proc, Mul, Origin(),
+                addToDataPointer->appendNew(
+                    proc, Load, pointerType(), Origin(), codePointerValue, sizeof(intptr_t)),
+                addToDataPointer->appendIntConstant(
+                    proc, Origin(), pointerType(), sizeof(intptr_t)))));
+    addToDataPointer->appendNew(
+        proc, Set, Origin(), codePointer,
+        addToDataPointer->appendNew(
+            proc, B3::Add, Origin(), codePointerValue,
+            addToDataPointer->appendIntConstant(
+                proc, Origin(), pointerType(), sizeof(intptr_t) * 2)));
+    addToDataPointer->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+    
+    // AddCP : adds  to CP if the current value at DP is non-zero, otherwise
+    // falls through normally.
+    codePointerValue =
+        addToCodePointer->appendNew(proc, B3::Get, Origin(), codePointer);
+    Value* dataPointerValue =
+        addToCodePointer->appendNew(proc, B3::Get, Origin(), dataPointer);
+    addToCodePointer->appendNewControlValue(
+        proc, Branch, Origin(),
+        addToCodePointer->appendNew(
+            proc, Load, pointerType(), Origin(), dataPointerValue),
+        FrequentedBlock(addToCodePointerTaken), FrequentedBlock(addToCodePointerNotTaken));
+    addToCodePointerTaken->appendNew(
+        proc, Set, Origin(), codePointer,
+        addToCodePointerTaken->appendNew(
+            proc, B3::Add, Origin(), codePointerValue,
+            addToCodePointerTaken->appendNew(
+                proc, Mul, Origin(),
+                addToCodePointerTaken->appendNew(
+                    proc, Load, pointerType(), Origin(), codePointerValue, sizeof(intptr_t)),
+                addToCodePointerTaken->appendIntConstant(
+                    proc, Origin(), pointerType(), sizeof(intptr_t)))));
+    addToCodePointerTaken->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+    addToCodePointerNotTaken->appendNew(
+        proc, Set, Origin(), codePointer,
+        addToCodePointerNotTaken->appendNew(
+            proc, B3::Add, Origin(), codePointerValue,
+            addToCodePointerNotTaken->appendIntConstant(
+                proc, Origin(), pointerType(), sizeof(intptr_t) * 2)));
+    addToCodePointerNotTaken->appendNewControlValue(
+        proc, Jump, Origin(), FrequentedBlock(dispatch));
+
+    // Add : adds  to the slot pointed to by DP.
+    codePointerValue = addToData->appendNew(proc, B3::Get, Origin(), codePointer);
+    dataPointerValue = addToData->appendNew(proc, B3::Get, Origin(), dataPointer);
+    addToData->appendNew(
+        proc, Store, Origin(),
+        addToData->appendNew(
+            proc, B3::Add, Origin(),
+            addToData->appendNew(
+                proc, Load, pointerType(), Origin(), dataPointerValue),
+            addToData->appendNew(
+                proc, Load, pointerType(), Origin(), codePointerValue, sizeof(intptr_t))),
+        dataPointerValue);
+    addToData->appendNew(
+        proc, Set, Origin(), codePointer,
+        addToData->appendNew(
+            proc, B3::Add, Origin(), codePointerValue,
+            addToData->appendIntConstant(proc, Origin(), pointerType(), sizeof(intptr_t) * 2)));
+    addToData->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+    
+    // Print: "prints" the value pointed to by DP. What this actually means is that the value is
+    // appended to the stream vector by the interpreterPrint function.
+    codePointerValue = print->appendNew(proc, B3::Get, Origin(), codePointer);
+    dataPointerValue = print->appendNew(proc, B3::Get, Origin(), dataPointer);
+    print->appendNew(
+        proc, Void, Origin(),
+        print->appendNew(
+            proc, Origin(), bitwise_cast(interpreterPrint)),
+        context,
+        print->appendNew(proc, Load, pointerType(), Origin(), dataPointerValue));
+    print->appendNew(
+        proc, Set, Origin(), codePointer,
+        print->appendNew(
+            proc, B3::Add, Origin(), codePointerValue,
+            print->appendIntConstant(proc, Origin(), pointerType(), sizeof(intptr_t))));
+    print->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+    
+    // Stop: returns.
+    stop->appendNewControlValue(
+        proc, Return, Origin(),
+        stop->appendIntConstant(proc, Origin(), pointerType(), 0));
+    
+    auto interpreter = compile(proc);
+    
+    Vector data;
+    Vector code;
+    Vector stream;
+    
+    data.append(1);
+    data.append(0);
+    
+    if (shouldBeVerbose())
+        dataLog("data = ", listDump(data), "\n");
+    
+    // We'll write a program that prints the numbers 1..100.
+    // We expect DP to point at #0.
+    code.append(AddCP);
+    code.append(6); // go to loop body
+    
+    // Loop re-entry:
+    // We expect DP to point at #1 and for #1 to be offset by -100.
+    code.append(Add);
+    code.append(100);
+    
+    code.append(AddDP);
+    code.append(-1);
+    
+    // Loop header:
+    // We expect DP to point at #0.
+    code.append(AddDP);
+    code.append(1);
+    
+    code.append(Add);
+    code.append(1);
+    
+    code.append(Print);
+    
+    code.append(Add);
+    code.append(-100);
+    
+    // We want to stop if it's zero and continue if it's non-zero. AddCP takes the branch if it's
+    // non-zero.
+    code.append(AddCP);
+    code.append(-11); // go to loop re-entry.
+    
+    code.append(Stop);
+    
+    if (shouldBeVerbose())
+        dataLog("code = ", listDump(code), "\n");
+    
+    CHECK(!invoke(*interpreter, data.data(), code.data(), &stream));
+    
+    CHECK(stream.size() == 100);
+    for (unsigned i = 0; i < 100; ++i)
+        CHECK(stream[i] == i + 1);
+    
+    if (shouldBeVerbose())
+        dataLog("stream = ", listDump(stream), "\n");
+}
+
+void testReduceStrengthCheckBottomUseInAnotherBlock()
+{
+    Procedure proc;
+    
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    
+    CheckValue* check = one->appendNew(
+        proc, Check, Origin(), one->appendNew(proc, Origin(), 1));
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+
+            jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    Value* arg = one->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    one->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(two));
+    
+    check = two->appendNew(
+        proc, CheckAdd, Origin(), arg,
+        two->appendNew(proc, Origin(), 1));
+    check->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            CHECK(!"Should not execute");
+        });
+    two->appendNewControlValue(proc, Return, Origin(), check);
+    
+    proc.resetReachability();
+    reduceStrength(proc);
+}
+
+void testResetReachabilityDanglingReference()
+{
+    Procedure proc;
+    
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    
+    UpsilonValue* upsilon = one->appendNew(
+        proc, Origin(), one->appendNew(proc, Origin(), 42));
+    one->appendNewControlValue(proc, Oops, Origin());
+    
+    Value* phi = two->appendNew(proc, Phi, Int32, Origin());
+    upsilon->setPhi(phi);
+    two->appendNewControlValue(proc, Oops, Origin());
+    
+    proc.resetReachability();
+    validate(proc);
+}
+
+void testEntrySwitchSimple()
+{
+    Procedure proc;
+    proc.setNumEntrypoints(3);
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    BasicBlock* three = proc.addBlock();
+    
+    root->appendNew(proc, EntrySwitch, Origin());
+    root->appendSuccessor(FrequentedBlock(one));
+    root->appendSuccessor(FrequentedBlock(two));
+    root->appendSuccessor(FrequentedBlock(three));
+    
+    one->appendNew(
+        proc, Return, Origin(),
+        one->appendNew(
+            proc, Add, Origin(),
+            one->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            one->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    
+    two->appendNew(
+        proc, Return, Origin(),
+        two->appendNew(
+            proc, Sub, Origin(),
+            two->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            two->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    
+    three->appendNew(
+        proc, Return, Origin(),
+        three->appendNew(
+            proc, Mul, Origin(),
+            three->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            three->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    
+    prepareForGeneration(proc);
+    
+    CCallHelpers jit(vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+    CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+    CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+    CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+    
+    MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+    
+    CHECK(invoke(labelOne, 1, 2) == 3);
+    CHECK(invoke(labelTwo, 1, 2) == -1);
+    CHECK(invoke(labelThree, 1, 2) == 2);
+    CHECK(invoke(labelOne, -1, 2) == 1);
+    CHECK(invoke(labelTwo, -1, 2) == -3);
+    CHECK(invoke(labelThree, -1, 2) == -2);
+}
+
+void testEntrySwitchNoEntrySwitch()
+{
+    Procedure proc;
+    proc.setNumEntrypoints(3);
+    
+    BasicBlock* root = proc.addBlock();
+    
+    root->appendNew(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    
+    prepareForGeneration(proc);
+    
+    CCallHelpers jit(vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+    CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+    CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+    CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+    
+    MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+    
+    CHECK_EQ(invoke(labelOne, 1, 2), 3);
+    CHECK_EQ(invoke(labelTwo, 1, 2), 3);
+    CHECK_EQ(invoke(labelThree, 1, 2), 3);
+    CHECK_EQ(invoke(labelOne, -1, 2), 1);
+    CHECK_EQ(invoke(labelTwo, -1, 2), 1);
+    CHECK_EQ(invoke(labelThree, -1, 2), 1);
+}
+
+void testEntrySwitchWithCommonPaths()
+{
+    Procedure proc;
+    proc.setNumEntrypoints(3);
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    BasicBlock* three = proc.addBlock();
+    BasicBlock* end = proc.addBlock();
+    
+    root->appendNew(proc, EntrySwitch, Origin());
+    root->appendSuccessor(FrequentedBlock(one));
+    root->appendSuccessor(FrequentedBlock(two));
+    root->appendSuccessor(FrequentedBlock(three));
+    
+    UpsilonValue* upsilonOne = one->appendNew(
+        proc, Origin(),
+        one->appendNew(
+            proc, Add, Origin(),
+            one->appendNew(
+                proc, Trunc, Origin(),
+                one->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            one->appendNew(
+                proc, Trunc, Origin(),
+                one->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    one->appendNew(proc, Jump, Origin());
+    one->setSuccessors(FrequentedBlock(end));
+    
+    UpsilonValue* upsilonTwo = two->appendNew(
+        proc, Origin(),
+        two->appendNew(
+            proc, Sub, Origin(),
+            two->appendNew(
+                proc, Trunc, Origin(),
+                two->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            two->appendNew(
+                proc, Trunc, Origin(),
+                two->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    two->appendNew(proc, Jump, Origin());
+    two->setSuccessors(FrequentedBlock(end));
+    
+    UpsilonValue* upsilonThree = three->appendNew(
+        proc, Origin(),
+        three->appendNew(
+            proc, Mul, Origin(),
+            three->appendNew(
+                proc, Trunc, Origin(),
+                three->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            three->appendNew(
+                proc, Trunc, Origin(),
+                three->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    three->appendNew(proc, Jump, Origin());
+    three->setSuccessors(FrequentedBlock(end));
+    
+    Value* phi = end->appendNew(proc, Phi, Int32, Origin());
+    upsilonOne->setPhi(phi);
+    upsilonTwo->setPhi(phi);
+    upsilonThree->setPhi(phi);
+    
+    end->appendNew(
+        proc, Return, Origin(),
+        end->appendNew(
+            proc, chill(Mod), Origin(),
+            phi, end->appendNew(
+                proc, Trunc, Origin(),
+                end->appendNew(proc, Origin(), GPRInfo::argumentGPR2))));
+    
+    prepareForGeneration(proc);
+    
+    CCallHelpers jit(vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+    CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+    CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+    CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+    
+    MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+    
+    CHECK_EQ(invoke(labelOne, 1, 2, 10), 3);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 10), -1);
+    CHECK_EQ(invoke(labelThree, 1, 2, 10), 2);
+    CHECK_EQ(invoke(labelOne, -1, 2, 10), 1);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 10), -3);
+    CHECK_EQ(invoke(labelThree, -1, 2, 10), -2);
+    CHECK_EQ(invoke(labelOne, 1, 2, 2), 1);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 2), -1);
+    CHECK_EQ(invoke(labelThree, 1, 2, 2), 0);
+    CHECK_EQ(invoke(labelOne, -1, 2, 2), 1);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 2), -1);
+    CHECK_EQ(invoke(labelThree, -1, 2, 2), 0);
+    CHECK_EQ(invoke(labelOne, 1, 2, 0), 0);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 0), 0);
+    CHECK_EQ(invoke(labelThree, 1, 2, 0), 0);
+    CHECK_EQ(invoke(labelOne, -1, 2, 0), 0);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 0), 0);
+    CHECK_EQ(invoke(labelThree, -1, 2, 0), 0);
+}
+
+void testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint()
+{
+    Procedure proc;
+    proc.setNumEntrypoints(3);
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* negate = proc.addBlock();
+    BasicBlock* dispatch = proc.addBlock();
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    BasicBlock* three = proc.addBlock();
+    BasicBlock* end = proc.addBlock();
+
+    UpsilonValue* upsilonBase = root->appendNew(
+        proc, Origin(), root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    root->appendNew(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR3),
+            root->appendNew(proc, Origin(), 0xff)));
+    root->setSuccessors(FrequentedBlock(negate), FrequentedBlock(dispatch));
+    
+    UpsilonValue* upsilonNegate = negate->appendNew(
+        proc, Origin(),
+        negate->appendNew(
+            proc, Neg, Origin(),
+            negate->appendNew(
+                proc, Trunc, Origin(),
+                negate->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+    negate->appendNew(proc, Jump, Origin());
+    negate->setSuccessors(FrequentedBlock(dispatch));
+    
+    Value* arg0 = dispatch->appendNew(proc, Phi, Int32, Origin());
+    upsilonBase->setPhi(arg0);
+    upsilonNegate->setPhi(arg0);
+    dispatch->appendNew(proc, EntrySwitch, Origin());
+    dispatch->appendSuccessor(FrequentedBlock(one));
+    dispatch->appendSuccessor(FrequentedBlock(two));
+    dispatch->appendSuccessor(FrequentedBlock(three));
+    
+    UpsilonValue* upsilonOne = one->appendNew(
+        proc, Origin(),
+        one->appendNew(
+            proc, Add, Origin(),
+            arg0, one->appendNew(
+                proc, Trunc, Origin(),
+                one->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    one->appendNew(proc, Jump, Origin());
+    one->setSuccessors(FrequentedBlock(end));
+    
+    UpsilonValue* upsilonTwo = two->appendNew(
+        proc, Origin(),
+        two->appendNew(
+            proc, Sub, Origin(),
+            arg0, two->appendNew(
+                proc, Trunc, Origin(),
+                two->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    two->appendNew(proc, Jump, Origin());
+    two->setSuccessors(FrequentedBlock(end));
+    
+    UpsilonValue* upsilonThree = three->appendNew(
+        proc, Origin(),
+        three->appendNew(
+            proc, Mul, Origin(),
+            arg0, three->appendNew(
+                proc, Trunc, Origin(),
+                three->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    three->appendNew(proc, Jump, Origin());
+    three->setSuccessors(FrequentedBlock(end));
+    
+    Value* phi = end->appendNew(proc, Phi, Int32, Origin());
+    upsilonOne->setPhi(phi);
+    upsilonTwo->setPhi(phi);
+    upsilonThree->setPhi(phi);
+    
+    end->appendNew(
+        proc, Return, Origin(),
+        end->appendNew(
+            proc, chill(Mod), Origin(),
+            phi, end->appendNew(
+                proc, Trunc, Origin(),
+                end->appendNew(proc, Origin(), GPRInfo::argumentGPR2))));
+    
+    prepareForGeneration(proc);
+    
+    CCallHelpers jit(vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+    CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+    CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+    CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+    
+    MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+    
+    CHECK_EQ(invoke(labelOne, 1, 2, 10, false), 3);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 10, false), -1);
+    CHECK_EQ(invoke(labelThree, 1, 2, 10, false), 2);
+    CHECK_EQ(invoke(labelOne, -1, 2, 10, false), 1);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 10, false), -3);
+    CHECK_EQ(invoke(labelThree, -1, 2, 10, false), -2);
+    CHECK_EQ(invoke(labelOne, 1, 2, 10, true), 1);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 10, true), -3);
+    CHECK_EQ(invoke(labelThree, 1, 2, 10, true), -2);
+    CHECK_EQ(invoke(labelOne, -1, 2, 10, true), 3);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 10, true), -1);
+    CHECK_EQ(invoke(labelThree, -1, 2, 10, true), 2);
+    CHECK_EQ(invoke(labelOne, 1, 2, 2, false), 1);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 2, false), -1);
+    CHECK_EQ(invoke(labelThree, 1, 2, 2, false), 0);
+    CHECK_EQ(invoke(labelOne, -1, 2, 2, false), 1);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 2, false), -1);
+    CHECK_EQ(invoke(labelThree, -1, 2, 2, false), 0);
+    CHECK_EQ(invoke(labelOne, 1, 2, 0, false), 0);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 0, false), 0);
+    CHECK_EQ(invoke(labelThree, 1, 2, 0, false), 0);
+    CHECK_EQ(invoke(labelOne, -1, 2, 0, false), 0);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 0, false), 0);
+    CHECK_EQ(invoke(labelThree, -1, 2, 0, false), 0);
+}
+
+void testEntrySwitchLoop()
+{
+    // This is a completely absurd use of EntrySwitch, where it impacts the loop condition. This
+    // should cause duplication of either nearly the entire Procedure. At time of writing, we ended
+    // up duplicating all of it, which is fine. It's important to test this case, to make sure that
+    // the duplication algorithm can handle interesting control flow.
+    
+    Procedure proc;
+    proc.setNumEntrypoints(2);
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* loopHeader = proc.addBlock();
+    BasicBlock* loopFooter = proc.addBlock();
+    BasicBlock* end = proc.addBlock();
+
+    UpsilonValue* initialValue = root->appendNew(
+        proc, Origin(), root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    root->appendNew(proc, Jump, Origin());
+    root->setSuccessors(loopHeader);
+    
+    Value* valueInLoop = loopHeader->appendNew(proc, Phi, Int32, Origin());
+    initialValue->setPhi(valueInLoop);
+    Value* newValue = loopHeader->appendNew(
+        proc, Add, Origin(), valueInLoop,
+        loopHeader->appendNew(proc, Origin(), 1));
+    loopHeader->appendNew(proc, EntrySwitch, Origin());
+    loopHeader->appendSuccessor(end);
+    loopHeader->appendSuccessor(loopFooter);
+    
+    loopFooter->appendNew(proc, Origin(), newValue, valueInLoop);
+    loopFooter->appendNew(
+        proc, Branch, Origin(),
+        loopFooter->appendNew(
+            proc, LessThan, Origin(), newValue,
+            loopFooter->appendNew(proc, Origin(), 100)));
+    loopFooter->setSuccessors(loopHeader, end);
+    
+    end->appendNew(proc, Return, Origin(), newValue);
+    
+    prepareForGeneration(proc);
+    
+    CCallHelpers jit(vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+    CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+    CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+    
+    MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+
+    CHECK(invoke(labelOne, 0) == 1);
+    CHECK(invoke(labelOne, 42) == 43);
+    CHECK(invoke(labelOne, 1000) == 1001);
+    
+    CHECK(invoke(labelTwo, 0) == 100);
+    CHECK(invoke(labelTwo, 42) == 100);
+    CHECK(invoke(labelTwo, 1000) == 1001);
+}
+
+void testSomeEarlyRegister()
+{
+    auto run = [&] (bool succeed) {
+        Procedure proc;
+        
+        BasicBlock* root = proc.addBlock();
+        
+        PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+        patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+        bool ranFirstPatchpoint = false;
+        patchpoint->setGenerator(
+            [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+                CHECK(params[0].gpr() == GPRInfo::returnValueGPR);
+                ranFirstPatchpoint = true;
+            });
+        
+        Value* arg = patchpoint;
+        
+        patchpoint = root->appendNew(proc, Int32, Origin());
+        patchpoint->appendSomeRegister(arg);
+        if (succeed)
+            patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+        bool ranSecondPatchpoint = false;
+        patchpoint->setGenerator(
+            [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+                if (succeed)
+                    CHECK(params[0].gpr() != params[1].gpr());
+                else
+                    CHECK(params[0].gpr() == params[1].gpr());
+                ranSecondPatchpoint = true;
+            });
+        
+        root->appendNew(proc, Return, Origin(), patchpoint);
+        
+        compile(proc);
+        CHECK(ranFirstPatchpoint);
+        CHECK(ranSecondPatchpoint);
+    };
+    
+    run(true);
+    run(false);
+}
+
+void testBranchBitAndImmFusion(
+    B3::Opcode valueModifier, Type valueType, int64_t constant,
+    Air::Opcode expectedOpcode, Air::Arg::Kind firstKind)
+{
+    // Currently this test should pass on all CPUs. But some CPUs may not support this fused
+    // instruction. It's OK to skip this test on those CPUs.
+    
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    
+    Value* left = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    
+    if (valueModifier != Identity) {
+        if (MemoryValue::accepts(valueModifier))
+            left = root->appendNew(proc, valueModifier, valueType, Origin(), left);
+        else
+            left = root->appendNew(proc, valueModifier, valueType, Origin(), left);
+    }
+    
+    root->appendNew(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(), left,
+            root->appendIntConstant(proc, Origin(), valueType, constant)));
+    root->setSuccessors(FrequentedBlock(one), FrequentedBlock(two));
+    
+    one->appendNew(proc, Oops, Origin());
+    two->appendNew(proc, Oops, Origin());
+
+    lowerToAirForTesting(proc);
+
+    // The first basic block must end in a BranchTest64(resCond, tmp, bitImm).
+    Air::Inst terminal = proc.code()[0]->last();
+    CHECK_EQ(terminal.kind.opcode, expectedOpcode);
+    CHECK_EQ(terminal.args[0].kind(), Air::Arg::ResCond);
+    CHECK_EQ(terminal.args[1].kind(), firstKind);
+    CHECK(terminal.args[2].kind() == Air::Arg::BitImm || terminal.args[2].kind() == Air::Arg::BitImm64);
+}
+
+void testTerminalPatchpointThatNeedsToBeSpilled()
+{
+    // This is a unit test for how FTL's heap allocation fast paths behave.
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* success = proc.addBlock();
+    BasicBlock* slowPath = proc.addBlock();
+    
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->effects.terminal = true;
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    
+    root->appendSuccessor(success);
+    root->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+    
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(CCallHelpers::TrustedImm32(42), params[0].gpr());
+            
+            CCallHelpers::Jump jumpToSuccess;
+            if (!params.fallsThroughToSuccessor(0))
+                jumpToSuccess = jit.jump();
+            
+            Vector> labels = params.successorLabels();
+            
+            params.addLatePath(
+                [=] (CCallHelpers& jit) {
+                    if (jumpToSuccess.isSet())
+                        jumpToSuccess.linkTo(*labels[0], &jit);
+                });
+        });
+    
+    Vector args;
+    {
+        RegisterSet fillAllGPRsSet = RegisterSet::allGPRs();
+        fillAllGPRsSet.exclude(RegisterSet::stackRegisters());
+        fillAllGPRsSet.exclude(RegisterSet::reservedHardwareRegisters());
+
+        for (unsigned i = 0; i < fillAllGPRsSet.numberOfSetRegisters(); i++)
+            args.append(success->appendNew(proc, Origin(), i));
+    }
+
+    {
+        // Now force all values into every available register.
+        PatchpointValue* p = success->appendNew(proc, Void, Origin());
+        for (Value* v : args)
+            p->append(v, ValueRep::SomeRegister);
+        p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
+    {
+        // Now require the original patchpoint to be materialized into a register.
+        PatchpointValue* p = success->appendNew(proc, Void, Origin());
+        p->append(patchpoint, ValueRep::SomeRegister);
+        p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
+    success->appendNew(proc, Return, Origin(), success->appendNew(proc, Origin(), 10));
+    
+    slowPath->appendNew(proc, Return, Origin(), slowPath->appendNew(proc, Origin(), 20));
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code), 10);
+}
+
+void testTerminalPatchpointThatNeedsToBeSpilled2()
+{
+    // This is a unit test for how FTL's heap allocation fast paths behave.
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* success = proc.addBlock();
+    BasicBlock* slowPath = proc.addBlock();
+
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+
+    root->appendNew(
+        proc, Branch, Origin(), arg);
+    root->appendSuccessor(one);
+    root->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+    
+    PatchpointValue* patchpoint = one->appendNew(proc, Int32, Origin());
+    patchpoint->effects.terminal = true;
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    patchpoint->append(arg, ValueRep::SomeRegister);
+    
+    one->appendSuccessor(success);
+    one->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+    
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(CCallHelpers::TrustedImm32(666), params[0].gpr());
+            auto goToFastPath = jit.branch32(CCallHelpers::Equal, params[1].gpr(), CCallHelpers::TrustedImm32(42));
+            auto jumpToSlow = jit.jump();
+            
+            // Make sure the asserts here pass.
+            params.fallsThroughToSuccessor(0);
+            params.fallsThroughToSuccessor(1);
+
+            Vector> labels = params.successorLabels();
+            
+            params.addLatePath(
+                [=] (CCallHelpers& jit) {
+                    goToFastPath.linkTo(*labels[0], &jit);
+                    jumpToSlow.linkTo(*labels[1], &jit);
+                });
+        });
+    
+    Vector args;
+    {
+        RegisterSet fillAllGPRsSet = RegisterSet::allGPRs();
+        fillAllGPRsSet.exclude(RegisterSet::stackRegisters());
+        fillAllGPRsSet.exclude(RegisterSet::reservedHardwareRegisters());
+
+        for (unsigned i = 0; i < fillAllGPRsSet.numberOfSetRegisters(); i++)
+            args.append(success->appendNew(proc, Origin(), i));
+    }
+
+    {
+        // Now force all values into every available register.
+        PatchpointValue* p = success->appendNew(proc, Void, Origin());
+        for (Value* v : args)
+            p->append(v, ValueRep::SomeRegister);
+        p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
+    {
+        // Now require the original patchpoint to be materialized into a register.
+        PatchpointValue* p = success->appendNew(proc, Void, Origin());
+        p->append(patchpoint, ValueRep::SomeRegister);
+        p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
+    success->appendNew(proc, Return, Origin(), patchpoint);
+    
+    slowPath->appendNew(proc, Return, Origin(), arg);
+    
+    auto original1 = Options::maxB3TailDupBlockSize();
+    auto original2 = Options::maxB3TailDupBlockSuccessors();
+
+    // Tail duplication will break the critical edge we're trying to test because it
+    // will clone the slowPath block for both edges to it!
+    Options::maxB3TailDupBlockSize() = 0;
+    Options::maxB3TailDupBlockSuccessors() = 0;
+
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 1), 1);
+    CHECK_EQ(invoke(*code, 0), 0);
+    CHECK_EQ(invoke(*code, 42), 666);
+
+    Options::maxB3TailDupBlockSize() = original1;
+    Options::maxB3TailDupBlockSuccessors() = original2;
+}
+
+void testPatchpointTerminalReturnValue(bool successIsRare)
+{
+    // This is a unit test for how FTL's heap allocation fast paths behave.
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* success = proc.addBlock();
+    BasicBlock* slowPath = proc.addBlock();
+    BasicBlock* continuation = proc.addBlock();
+    
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->effects.terminal = true;
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    
+    if (successIsRare) {
+        root->appendSuccessor(FrequentedBlock(success, FrequencyClass::Rare));
+        root->appendSuccessor(slowPath);
+    } else {
+        root->appendSuccessor(success);
+        root->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+    }
+    
+    patchpoint->appendSomeRegister(arg);
+    
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            
+            CCallHelpers::Jump jumpToSlow =
+                jit.branch32(CCallHelpers::Above, params[1].gpr(), CCallHelpers::TrustedImm32(42));
+            
+            jit.add32(CCallHelpers::TrustedImm32(31), params[1].gpr(), params[0].gpr());
+            
+            CCallHelpers::Jump jumpToSuccess;
+            if (!params.fallsThroughToSuccessor(0))
+                jumpToSuccess = jit.jump();
+            
+            Vector> labels = params.successorLabels();
+            
+            params.addLatePath(
+                [=] (CCallHelpers& jit) {
+                    jumpToSlow.linkTo(*labels[1], &jit);
+                    if (jumpToSuccess.isSet())
+                        jumpToSuccess.linkTo(*labels[0], &jit);
+                });
+        });
+    
+    UpsilonValue* successUpsilon = success->appendNew(proc, Origin(), patchpoint);
+    success->appendNew(proc, Jump, Origin());
+    success->setSuccessors(continuation);
+    
+    UpsilonValue* slowPathUpsilon = slowPath->appendNew(
+        proc, Origin(), slowPath->appendNew(proc, Origin(), 666));
+    slowPath->appendNew(proc, Jump, Origin());
+    slowPath->setSuccessors(continuation);
+    
+    Value* phi = continuation->appendNew(proc, Phi, Int32, Origin());
+    successUpsilon->setPhi(phi);
+    slowPathUpsilon->setPhi(phi);
+    continuation->appendNew(proc, Return, Origin(), phi);
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 0), 31);
+    CHECK_EQ(invoke(*code, 1), 32);
+    CHECK_EQ(invoke(*code, 41), 72);
+    CHECK_EQ(invoke(*code, 42), 73);
+    CHECK_EQ(invoke(*code, 43), 666);
+    CHECK_EQ(invoke(*code, -1), 666);
+}
+
+void testMemoryFence()
+{
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    
+    root->appendNew(proc, Origin());
+    root->appendNew(proc, Return, Origin(), root->appendIntConstant(proc, Origin(), Int32, 42));
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code), 42);
+    if (isX86())
+        checkUsesInstruction(*code, "lock or $0x0, (%rsp)");
+    if (isARM64())
+        checkUsesInstruction(*code, "dmb    ish");
+    checkDoesNotUseInstruction(*code, "mfence");
+    checkDoesNotUseInstruction(*code, "dmb    ishst");
+}
+
+void testStoreFence()
+{
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    
+    root->appendNew(proc, Origin(), HeapRange::top(), HeapRange());
+    root->appendNew(proc, Return, Origin(), root->appendIntConstant(proc, Origin(), Int32, 42));
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code), 42);
+    checkDoesNotUseInstruction(*code, "lock");
+    checkDoesNotUseInstruction(*code, "mfence");
+    if (isARM64())
+        checkUsesInstruction(*code, "dmb    ishst");
+}
+
+void testLoadFence()
+{
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    
+    root->appendNew(proc, Origin(), HeapRange(), HeapRange::top());
+    root->appendNew(proc, Return, Origin(), root->appendIntConstant(proc, Origin(), Int32, 42));
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code), 42);
+    checkDoesNotUseInstruction(*code, "lock");
+    checkDoesNotUseInstruction(*code, "mfence");
+    if (isARM64())
+        checkUsesInstruction(*code, "dmb    ish");
+    checkDoesNotUseInstruction(*code, "dmb    ishst");
+}
+
+void testTrappingLoad()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    MemoryValue* value = root->appendNew(
+        proc, trapping(Load), Int32, Origin(),
+        root->appendNew(proc, Origin(), &x));
+    Effects expectedEffects;
+    expectedEffects.exitsSideways = true;
+    expectedEffects.controlDependent= true;
+    expectedEffects.reads = HeapRange::top();
+    CHECK_EQ(value->range(), HeapRange::top());
+    CHECK_EQ(value->effects(), expectedEffects);
+    value->setRange(HeapRange(0));
+    CHECK_EQ(value->range(), HeapRange(0));
+    CHECK_EQ(value->effects(), expectedEffects); // We still reads top!
+    root->appendNew(proc, Return, Origin(), value);
+    CHECK_EQ(compileAndRun(proc), 42);
+    unsigned trapsCount = 0;
+    for (Air::BasicBlock* block : proc.code()) {
+        for (Air::Inst& inst : *block) {
+            if (inst.kind.traps)
+                trapsCount++;
+        }
+    }
+    CHECK_EQ(trapsCount, 1u);
+}
+
+void testTrappingStore()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    MemoryValue* value = root->appendNew(
+        proc, trapping(Store), Origin(),
+        root->appendNew(proc, Origin(), 111),
+        root->appendNew(proc, Origin(), &x));
+    Effects expectedEffects;
+    expectedEffects.exitsSideways = true;
+    expectedEffects.controlDependent= true;
+    expectedEffects.reads = HeapRange::top();
+    expectedEffects.writes = HeapRange::top();
+    CHECK_EQ(value->range(), HeapRange::top());
+    CHECK_EQ(value->effects(), expectedEffects);
+    value->setRange(HeapRange(0));
+    CHECK_EQ(value->range(), HeapRange(0));
+    expectedEffects.writes = HeapRange(0);
+    CHECK_EQ(value->effects(), expectedEffects); // We still reads top!
+    root->appendNew(proc, Return, Origin());
+    compileAndRun(proc);
+    CHECK_EQ(x, 111);
+    unsigned trapsCount = 0;
+    for (Air::BasicBlock* block : proc.code()) {
+        for (Air::Inst& inst : *block) {
+            if (inst.kind.traps)
+                trapsCount++;
+        }
+    }
+    CHECK_EQ(trapsCount, 1u);
+}
+
+void testTrappingLoadAddStore()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    ConstPtrValue* ptr = root->appendNew(proc, Origin(), &x);
+    root->appendNew(
+        proc, trapping(Store), Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, trapping(Load), Int32, Origin(), ptr),
+            root->appendNew(proc, Origin(), 3)),
+        ptr);
+    root->appendNew(proc, Return, Origin());
+    compileAndRun(proc);
+    CHECK_EQ(x, 45);
+    bool traps = false;
+    for (Air::BasicBlock* block : proc.code()) {
+        for (Air::Inst& inst : *block) {
+            if (inst.kind.traps)
+                traps = true;
+        }
+    }
+    CHECK(traps);
+}
+
+void testTrappingLoadDCE()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    root->appendNew(
+        proc, trapping(Load), Int32, Origin(),
+        root->appendNew(proc, Origin(), &x));
+    root->appendNew(proc, Return, Origin());
+    compileAndRun(proc);
+    unsigned trapsCount = 0;
+    for (Air::BasicBlock* block : proc.code()) {
+        for (Air::Inst& inst : *block) {
+            if (inst.kind.traps)
+                trapsCount++;
+        }
+    }
+    CHECK_EQ(trapsCount, 1u);
+}
+
+void testTrappingStoreElimination()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    Value* ptr = root->appendNew(proc, Origin(), &x);
+    root->appendNew(
+        proc, trapping(Store), Origin(),
+        root->appendNew(proc, Origin(), 43),
+        ptr);
+    root->appendNew(
+        proc, trapping(Store), Origin(),
+        root->appendNew(proc, Origin(), 44),
+        ptr);
+    root->appendNew(proc, Return, Origin());
+    compileAndRun(proc);
+    unsigned storeCount = 0;
+    for (Value* value : proc.values()) {
+        if (MemoryValue::isStore(value->opcode()))
+            storeCount++;
+    }
+    CHECK_EQ(storeCount, 2u);
+}
+
+void testMoveConstants()
+{
+    auto check = [] (Procedure& proc) {
+        proc.resetReachability();
+        
+        if (shouldBeVerbose()) {
+            dataLog("IR before:\n");
+            dataLog(proc);
+        }
+        
+        moveConstants(proc);
+        
+        if (shouldBeVerbose()) {
+            dataLog("IR after:\n");
+            dataLog(proc);
+        }
+        
+        UseCounts useCounts(proc);
+        unsigned count = 0;
+        for (Value* value : proc.values()) {
+            if (useCounts.numUses(value) && value->hasInt64())
+                count++;
+        }
+        
+        if (count == 1)
+            return;
+        
+        crashLock.lock();
+        dataLog("Fail in testMoveConstants: got more than one Const64:\n");
+        dataLog(proc);
+        CRASH();
+    };
+
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* a = root->appendNew(
+            proc, Load, pointerType(), Origin(), 
+            root->appendNew(proc, Origin(), 0x123412341234));
+        Value* b = root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), 0x123412341334));
+        root->appendNew(proc, Void, Origin(), a, b);
+        root->appendNew(proc, Return, Origin());
+        check(proc);
+    }
+    
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* x = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* a = root->appendNew(
+            proc, Add, Origin(), x, root->appendNew(proc, Origin(), 0x123412341234));
+        Value* b = root->appendNew(
+            proc, Add, Origin(), x, root->appendNew(proc, Origin(), -0x123412341234));
+        root->appendNew(proc, Void, Origin(), a, b);
+        root->appendNew(proc, Return, Origin());
+        check(proc);
+    }
+}
+
+void testPCOriginMapDoesntInsertNops()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    CCallHelpers::Label watchpointLabel;
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Void, Origin());
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            watchpointLabel = jit.watchpointLabel();
+        });
+
+    patchpoint = root->appendNew(proc, Void, Origin());
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            CCallHelpers::Label labelIgnoringWatchpoints = jit.labelIgnoringWatchpoints();
+
+            CHECK(watchpointLabel == labelIgnoringWatchpoints);
+        });
+
+    root->appendNew(proc, Return, Origin());
+
+    compile(proc);
+}
+
+void testPinRegisters()
+{
+    auto go = [&] (bool pin) {
+        Procedure proc;
+        RegisterSet csrs;
+        csrs.merge(RegisterSet::calleeSaveRegisters());
+        csrs.exclude(RegisterSet::stackRegisters());
+        if (pin) {
+            csrs.forEach(
+                [&] (Reg reg) {
+                    proc.pinRegister(reg);
+                });
+        }
+        BasicBlock* root = proc.addBlock();
+        Value* a = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* b = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* c = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+        Value* d = root->appendNew(proc, Origin(), GPRInfo::regCS0);
+        root->appendNew(
+            proc, Void, Origin(),
+            root->appendNew(proc, Origin(), static_cast(0x1234)));
+        root->appendNew(
+            proc, Void, Origin(),
+            root->appendNew(proc, Origin(), static_cast(0x1235)),
+            a, b, c);
+        PatchpointValue* patchpoint = root->appendNew(proc, Void, Origin());
+        patchpoint->appendSomeRegister(d);
+        patchpoint->setGenerator(
+            [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+                CHECK_EQ(params[0].gpr(), GPRInfo::regCS0);
+            });
+        root->appendNew(proc, Return, Origin());
+        auto code = compile(proc);
+        bool usesCSRs = false;
+        for (Air::BasicBlock* block : proc.code()) {
+            for (Air::Inst& inst : *block) {
+                if (inst.kind.opcode == Air::Patch && inst.origin == patchpoint)
+                    continue;
+                inst.forEachTmpFast(
+                    [&] (Air::Tmp tmp) {
+                        if (tmp.isReg())
+                            usesCSRs |= csrs.get(tmp.reg());
+                    });
+            }
+        }
+        for (const RegisterAtOffset& regAtOffset : proc.calleeSaveRegisters())
+            usesCSRs |= csrs.get(regAtOffset.reg());
+        CHECK_EQ(usesCSRs, !pin);
+    };
+    
+    go(true);
+    go(false);
+}
+
+void testX86LeaAddAddShlLeft()
+{
+    // Add(Add(Shl(@x, $c), @y), $d)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Shl, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                root->appendNew(proc, Origin(), 2)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), 100));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea 0x64(%rdi,%rsi,4), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), (1 + (2 << 2)) + 100);
+}
+
+void testX86LeaAddAddShlRight()
+{
+    // Add(Add(@x, Shl(@y, $c)), $d)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(
+                proc, Shl, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                root->appendNew(proc, Origin(), 2))),
+        root->appendNew(proc, Origin(), 100));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea 0x64(%rdi,%rsi,4), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), (1 + (2 << 2)) + 100);
+}
+
+void testX86LeaAddAdd()
+{
+    // Add(Add(@x, @y), $c)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), 100));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkDisassembly(
+        *code,
+        [&] (const char* disassembly) -> bool {
+            return strstr(disassembly, "lea 0x64(%rdi,%rsi), %rax")
+                || strstr(disassembly, "lea 0x64(%rsi,%rdi), %rax");
+        },
+        "Expected to find something like lea 0x64(%rdi,%rsi), %rax but didn't!");
+    CHECK_EQ(invoke(*code, 1, 2), (1 + 2) + 100);
+}
+
+void testX86LeaAddShlRight()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 2)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea (%rdi,%rsi,4), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (2 << 2));
+}
+
+void testX86LeaAddShlLeftScale1()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 0)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkDisassembly(
+        *code,
+        [&] (const char* disassembly) -> bool {
+            return strstr(disassembly, "lea (%rdi,%rsi), %rax")
+                || strstr(disassembly, "lea (%rsi,%rdi), %rax");
+        },
+        "Expected to find something like lea (%rdi,%rsi), %rax but didn't!");
+    CHECK_EQ(invoke(*code, 1, 2), 1 + 2);
+}
+
+void testX86LeaAddShlLeftScale2()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 1)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea (%rdi,%rsi,2), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (2 << 1));
+}
+
+void testX86LeaAddShlLeftScale4()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 2)),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea (%rdi,%rsi,4), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (2 << 2));
+}
+
+void testX86LeaAddShlLeftScale8()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 3)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea (%rdi,%rsi,8), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (2 << 3));
+}
+
+void testAddShl32()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 32)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (static_cast(2) << static_cast(32)));
+}
+
+void testAddShl64()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 64)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 1, 2), 1 + 2);
+}
+
+void testAddShl65()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 65)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (2 << 1));
+}
+
+void testReduceStrengthReassociation(bool flip)
+{
+    // Add(Add(@x, $c), @y) -> Add(Add(@x, @y), $c)
+    // and
+    // Add(@y, Add(@x, $c)) -> Add(Add(@x, @y), $c)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    
+    Value* innerAdd = root->appendNew(
+        proc, Add, Origin(), arg1,
+        root->appendNew(proc, Origin(), 42));
+    
+    Value* outerAdd;
+    if (flip)
+        outerAdd = root->appendNew(proc, Add, Origin(), arg2, innerAdd);
+    else
+        outerAdd = root->appendNew(proc, Add, Origin(), innerAdd, arg2);
+    
+    root->appendNew(proc, Return, Origin(), outerAdd);
+    
+    proc.resetReachability();
+
+    if (shouldBeVerbose()) {
+        dataLog("IR before reduceStrength:\n");
+        dataLog(proc);
+    }
+    
+    reduceStrength(proc);
+    
+    if (shouldBeVerbose()) {
+        dataLog("IR after reduceStrength:\n");
+        dataLog(proc);
+    }
+    
+    CHECK_EQ(root->last()->opcode(), Return);
+    CHECK_EQ(root->last()->child(0)->opcode(), Add);
+    CHECK(root->last()->child(0)->child(1)->isIntPtr(42));
+    CHECK_EQ(root->last()->child(0)->child(0)->opcode(), Add);
+    CHECK(
+        (root->last()->child(0)->child(0)->child(0) == arg1 && root->last()->child(0)->child(0)->child(1) == arg2) ||
+        (root->last()->child(0)->child(0)->child(0) == arg2 && root->last()->child(0)->child(0)->child(1) == arg1));
+}
+
+void testLoadBaseIndexShift2()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNew(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(
+                proc, Add, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(
+                    proc, Shl, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                    root->appendNew(proc, Origin(), 2)))));
+    auto code = compile(proc);
+    if (isX86())
+        checkUsesInstruction(*code, "(%rdi,%rsi,4)");
+    int32_t value = 12341234;
+    char* ptr = bitwise_cast(&value);
+    for (unsigned i = 0; i < 10; ++i)
+        CHECK_EQ(invoke(*code, ptr - (static_cast(1) << static_cast(2)) * i, i), 12341234);
+}
+
+void testLoadBaseIndexShift32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNew(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(
+                proc, Add, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(
+                    proc, Shl, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                    root->appendNew(proc, Origin(), 32)))));
+    auto code = compile(proc);
+    int32_t value = 12341234;
+    char* ptr = bitwise_cast(&value);
+    for (unsigned i = 0; i < 10; ++i)
+        CHECK_EQ(invoke(*code, ptr - (static_cast(1) << static_cast(32)) * i, i), 12341234);
+}
+
+void testOptimizeMaterialization()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNew(
+        proc, Void, Origin(),
+        root->appendNew(proc, Origin(), 0x123423453456llu),
+        root->appendNew(proc, Origin(), 0x123423453456llu + 35));
+    root->appendNew(proc, Return, Origin());
+    
+    auto code = compile(proc);
+    bool found = false;
+    for (Air::BasicBlock* block : proc.code()) {
+        for (Air::Inst& inst : *block) {
+            if (inst.kind.opcode != Air::Add64)
+                continue;
+            if (inst.args[0] != Air::Arg::imm(35))
+                continue;
+            found = true;
+        }
+    }
+    CHECK(found);
+}
+
+void testWasmBoundsCheck(unsigned offset)
+{
+    Procedure proc;
+    GPRReg pinned = GPRInfo::argumentGPR1;
+    proc.pinRegister(pinned);
+
+    proc.setWasmBoundsCheckGenerator([=] (CCallHelpers& jit, GPRReg pinnedGPR, unsigned actualOffset) {
+        CHECK_EQ(pinnedGPR, pinned);
+        CHECK_EQ(actualOffset, offset);
+
+        // This should always work because a function this simple should never have callee
+        // saves.
+        jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+        jit.emitFunctionEpilogue();
+        jit.ret();
+    });
+
+    BasicBlock* root = proc.addBlock();
+    Value* left = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    if (pointerType() != Int32)
+        left = root->appendNew(proc, Trunc, Origin(), left);
+    root->appendNew(proc, Origin(), left, pinned, offset);
+    Value* result = root->appendNew(proc, Origin(), 0x42);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 1, 2 + offset), 0x42);
+    CHECK_EQ(invoke(*code, 3, 2 + offset), 42);
+    CHECK_EQ(invoke(*code, 2, 2 + offset), 42);
+}
+
+void testWasmAddress()
+{
+    Procedure proc;
+    GPRReg pinnedGPR = GPRInfo::argumentGPR2;
+    proc.pinRegister(pinnedGPR);
+
+    unsigned loopCount = 100;
+    Vector values(loopCount);
+    unsigned numToStore = 42;
+
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* header = proc.addBlock();
+    BasicBlock* body = proc.addBlock();
+    BasicBlock* continuation = proc.addBlock();
+
+    // Root
+    Value* loopCountValue = root->appendNew(proc, Trunc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* valueToStore = root->appendNew(proc, Trunc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    UpsilonValue* beginUpsilon = root->appendNew(proc, Origin(), root->appendNew(proc, Origin(), 0));
+    root->appendNewControlValue(proc, Jump, Origin(), header);
+
+    // Header
+    Value* indexPhi = header->appendNew(proc, Phi, Int32, Origin());
+    header->appendNewControlValue(proc, Branch, Origin(),
+        header->appendNew(proc, Below, Origin(), indexPhi, loopCountValue),
+        body, continuation);
+
+    // Body
+    Value* pointer = body->appendNew(proc, Mul, Origin(), indexPhi,
+        body->appendNew(proc, Origin(), sizeof(unsigned)));
+    pointer = body->appendNew(proc, ZExt32, Origin(), pointer);
+    body->appendNew(proc, Store, Origin(), valueToStore,
+        body->appendNew(proc, Origin(), pointer, pinnedGPR));
+    UpsilonValue* incUpsilon = body->appendNew(proc, Origin(),
+        body->appendNew(proc, Add, Origin(), indexPhi,
+            body->appendNew(proc, Origin(), 1)));
+    body->appendNewControlValue(proc, Jump, Origin(), header);
+
+    // Continuation
+    continuation->appendNewControlValue(proc, Return, Origin());
+
+    beginUpsilon->setPhi(indexPhi);
+    incUpsilon->setPhi(indexPhi);
+
+
+    auto code = compile(proc);
+    invoke(*code, loopCount, numToStore, values.data());
+    for (unsigned value : values)
+        CHECK_EQ(numToStore, value);
+}
+
+// Make sure the compiler does not try to optimize anything out.
+NEVER_INLINE double zero()
+{
+    return 0.;
+}
+
+double negativeZero()
+{
+    return -zero();
+}
+
+#define RUN(test) do {                          \
+        if (!shouldRun(#test))                  \
+            break;                              \
+        tasks.append(                           \
+            createSharedTask(           \
+                [&] () {                        \
+                    dataLog(#test "...\n");     \
+                    test;                       \
+                    dataLog(#test ": OK!\n");   \
+                }));                            \
+    } while (false);
+
+#define RUN_UNARY(test, values) \
+    for (auto a : values) {                             \
+        CString testStr = toCString(#test, "(", a.name, ")"); \
+        if (!shouldRun(testStr.data()))                 \
+            continue;                                   \
+        tasks.append(createSharedTask(          \
+            [=] () {                                    \
+                dataLog(toCString(testStr, "...\n"));   \
+                test(a.value);                          \
+                dataLog(toCString(testStr, ": OK!\n")); \
+            }));                                        \
+    }
+
+#define RUN_BINARY(test, valuesA, valuesB) \
+    for (auto a : valuesA) {                                \
+        for (auto b : valuesB) {                            \
+            CString testStr = toCString(#test, "(", a.name, ", ", b.name, ")"); \
+            if (!shouldRun(testStr.data()))                 \
+                continue;                                   \
+            tasks.append(createSharedTask(          \
+                [=] () {                                    \
+                    dataLog(toCString(testStr, "...\n"));   \
+                    test(a.value, b.value);                 \
+                    dataLog(toCString(testStr, ": OK!\n")); \
+                }));                                        \
+        }                                                   \
+    }
+
+void run(const char* filter)
+{
+    JSC::initializeThreading();
+    vm = &VM::create(LargeHeap).leakRef();
+
+    Deque>> tasks;
+
+    auto shouldRun = [&] (const char* testName) -> bool {
+        return !filter || !!strcasestr(testName, filter);
+    };
+
+    // We run this test first because it fiddles with some
+    // JSC options.
+    testTerminalPatchpointThatNeedsToBeSpilled2();
+
+    RUN(test42());
+    RUN(testLoad42());
+    RUN(testLoadOffsetImm9Max());
+    RUN(testLoadOffsetImm9MaxPlusOne());
+    RUN(testLoadOffsetImm9MaxPlusTwo());
+    RUN(testLoadOffsetImm9Min());
+    RUN(testLoadOffsetImm9MinMinusOne());
+    RUN(testLoadOffsetScaledUnsignedImm12Max());
+    RUN(testLoadOffsetScaledUnsignedOverImm12Max());
+    RUN(testArg(43));
+    RUN(testReturnConst64(5));
+    RUN(testReturnConst64(-42));
+    RUN(testReturnVoid());
+
+    RUN(testAddArg(111));
+    RUN(testAddArgs(1, 1));
+    RUN(testAddArgs(1, 2));
+    RUN(testAddArgImm(1, 2));
+    RUN(testAddArgImm(0, 2));
+    RUN(testAddArgImm(1, 0));
+    RUN(testAddImmArg(1, 2));
+    RUN(testAddImmArg(0, 2));
+    RUN(testAddImmArg(1, 0));
+    RUN_BINARY(testAddArgMem, int64Operands(), int64Operands());
+    RUN_BINARY(testAddMemArg, int64Operands(), int64Operands());
+    RUN_BINARY(testAddImmMem, int64Operands(), int64Operands());
+    RUN_UNARY(testAddArg32, int32Operands());
+    RUN(testAddArgs32(1, 1));
+    RUN(testAddArgs32(1, 2));
+    RUN_BINARY(testAddArgMem32, int32Operands(), int32Operands());
+    RUN_BINARY(testAddMemArg32, int32Operands(), int32Operands());
+    RUN_BINARY(testAddImmMem32, int32Operands(), int32Operands());
+    RUN(testAddArgZeroImmZDef());
+    RUN(testAddLoadTwice());
+
+    RUN(testAddArgDouble(M_PI));
+    RUN(testAddArgsDouble(M_PI, 1));
+    RUN(testAddArgsDouble(M_PI, -M_PI));
+    RUN(testAddArgImmDouble(M_PI, 1));
+    RUN(testAddArgImmDouble(M_PI, 0));
+    RUN(testAddArgImmDouble(M_PI, negativeZero()));
+    RUN(testAddArgImmDouble(0, 0));
+    RUN(testAddArgImmDouble(0, negativeZero()));
+    RUN(testAddArgImmDouble(negativeZero(), 0));
+    RUN(testAddArgImmDouble(negativeZero(), negativeZero()));
+    RUN(testAddImmArgDouble(M_PI, 1));
+    RUN(testAddImmArgDouble(M_PI, 0));
+    RUN(testAddImmArgDouble(M_PI, negativeZero()));
+    RUN(testAddImmArgDouble(0, 0));
+    RUN(testAddImmArgDouble(0, negativeZero()));
+    RUN(testAddImmArgDouble(negativeZero(), 0));
+    RUN(testAddImmArgDouble(negativeZero(), negativeZero()));
+    RUN(testAddImmsDouble(M_PI, 1));
+    RUN(testAddImmsDouble(M_PI, 0));
+    RUN(testAddImmsDouble(M_PI, negativeZero()));
+    RUN(testAddImmsDouble(0, 0));
+    RUN(testAddImmsDouble(0, negativeZero()));
+    RUN(testAddImmsDouble(negativeZero(), negativeZero()));
+    RUN_UNARY(testAddArgFloat, floatingPointOperands());
+    RUN_BINARY(testAddArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testAddFPRArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testAddArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testAddImmArgFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testAddImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testAddArgFloatWithUselessDoubleConversion, floatingPointOperands());
+    RUN_BINARY(testAddArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testAddArgsFloatWithEffectfulDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN(testMulArg(5));
+    RUN(testMulAddArg(5));
+    RUN(testMulAddArg(85));
+    RUN(testMulArgStore(5));
+    RUN(testMulArgStore(85));
+    RUN(testMulArgs(1, 1));
+    RUN(testMulArgs(1, 2));
+    RUN(testMulArgs(3, 3));
+    RUN(testMulArgImm(1, 2));
+    RUN(testMulArgImm(1, 4));
+    RUN(testMulArgImm(1, 8));
+    RUN(testMulArgImm(1, 16));
+    RUN(testMulArgImm(1, 0x80000000llu));
+    RUN(testMulArgImm(1, 0x800000000000llu));
+    RUN(testMulArgImm(7, 2));
+    RUN(testMulArgImm(7, 4));
+    RUN(testMulArgImm(7, 8));
+    RUN(testMulArgImm(7, 16));
+    RUN(testMulArgImm(7, 0x80000000llu));
+    RUN(testMulArgImm(7, 0x800000000000llu));
+    RUN(testMulArgImm(-42, 2));
+    RUN(testMulArgImm(-42, 4));
+    RUN(testMulArgImm(-42, 8));
+    RUN(testMulArgImm(-42, 16));
+    RUN(testMulArgImm(-42, 0x80000000llu));
+    RUN(testMulArgImm(-42, 0x800000000000llu));
+    RUN(testMulArgImm(0, 2));
+    RUN(testMulArgImm(1, 0));
+    RUN(testMulArgImm(3, 3));
+    RUN(testMulArgImm(3, -1));
+    RUN(testMulArgImm(-3, -1));
+    RUN(testMulArgImm(0, -1));
+    RUN(testMulImmArg(1, 2));
+    RUN(testMulImmArg(0, 2));
+    RUN(testMulImmArg(1, 0));
+    RUN(testMulImmArg(3, 3));
+    RUN(testMulArgs32(1, 1));
+    RUN(testMulArgs32(1, 2));
+    RUN(testMulLoadTwice());
+    RUN(testMulAddArgsLeft());
+    RUN(testMulAddArgsRight());
+    RUN(testMulAddArgsLeft32());
+    RUN(testMulAddArgsRight32());
+    RUN(testMulSubArgsLeft());
+    RUN(testMulSubArgsRight());
+    RUN(testMulSubArgsLeft32());
+    RUN(testMulSubArgsRight32());
+    RUN(testMulNegArgs());
+    RUN(testMulNegArgs32());
+
+    RUN_UNARY(testMulArgDouble, floatingPointOperands());
+    RUN_BINARY(testMulArgsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulArgImmDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulImmArgDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulImmsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testMulArgFloat, floatingPointOperands());
+    RUN_BINARY(testMulArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulImmArgFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testMulArgFloatWithUselessDoubleConversion, floatingPointOperands());
+    RUN_BINARY(testMulArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulArgsFloatWithEffectfulDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN(testDivArgDouble(M_PI));
+    RUN(testDivArgsDouble(M_PI, 1));
+    RUN(testDivArgsDouble(M_PI, -M_PI));
+    RUN(testDivArgImmDouble(M_PI, 1));
+    RUN(testDivArgImmDouble(M_PI, 0));
+    RUN(testDivArgImmDouble(M_PI, negativeZero()));
+    RUN(testDivArgImmDouble(0, 0));
+    RUN(testDivArgImmDouble(0, negativeZero()));
+    RUN(testDivArgImmDouble(negativeZero(), 0));
+    RUN(testDivArgImmDouble(negativeZero(), negativeZero()));
+    RUN(testDivImmArgDouble(M_PI, 1));
+    RUN(testDivImmArgDouble(M_PI, 0));
+    RUN(testDivImmArgDouble(M_PI, negativeZero()));
+    RUN(testDivImmArgDouble(0, 0));
+    RUN(testDivImmArgDouble(0, negativeZero()));
+    RUN(testDivImmArgDouble(negativeZero(), 0));
+    RUN(testDivImmArgDouble(negativeZero(), negativeZero()));
+    RUN(testDivImmsDouble(M_PI, 1));
+    RUN(testDivImmsDouble(M_PI, 0));
+    RUN(testDivImmsDouble(M_PI, negativeZero()));
+    RUN(testDivImmsDouble(0, 0));
+    RUN(testDivImmsDouble(0, negativeZero()));
+    RUN(testDivImmsDouble(negativeZero(), negativeZero()));
+    RUN_UNARY(testDivArgFloat, floatingPointOperands());
+    RUN_BINARY(testDivArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testDivArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testDivImmArgFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testDivImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testDivArgFloatWithUselessDoubleConversion, floatingPointOperands());
+    RUN_BINARY(testDivArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testDivArgsFloatWithEffectfulDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN_BINARY(testUDivArgsInt32, int32Operands(), int32Operands());
+    RUN_BINARY(testUDivArgsInt64, int64Operands(), int64Operands());
+
+    RUN_UNARY(testModArgDouble, floatingPointOperands());
+    RUN_BINARY(testModArgsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModArgImmDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModImmArgDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModImmsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testModArgFloat, floatingPointOperands());
+    RUN_BINARY(testModArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModImmArgFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModImmsFloat, floatingPointOperands(), floatingPointOperands());
+
+    RUN_BINARY(testUModArgsInt32, int32Operands(), int32Operands());
+    RUN_BINARY(testUModArgsInt64, int64Operands(), int64Operands());
+
+    RUN(testSubArg(24));
+    RUN(testSubArgs(1, 1));
+    RUN(testSubArgs(1, 2));
+    RUN(testSubArgs(13, -42));
+    RUN(testSubArgs(-13, 42));
+    RUN(testSubArgImm(1, 1));
+    RUN(testSubArgImm(1, 2));
+    RUN(testSubArgImm(13, -42));
+    RUN(testSubArgImm(-13, 42));
+    RUN(testSubArgImm(42, 0));
+    RUN(testSubImmArg(1, 1));
+    RUN(testSubImmArg(1, 2));
+    RUN(testSubImmArg(13, -42));
+    RUN(testSubImmArg(-13, 42));
+    RUN_BINARY(testSubArgMem, int64Operands(), int64Operands());
+    RUN_BINARY(testSubMemArg, int64Operands(), int64Operands());
+    RUN_BINARY(testSubImmMem, int32Operands(), int32Operands());
+    RUN_BINARY(testSubMemImm, int32Operands(), int32Operands());
+    RUN_UNARY(testNegValueSubOne, int32Operands());
+
+    RUN(testSubArgs32(1, 1));
+    RUN(testSubArgs32(1, 2));
+    RUN(testSubArgs32(13, -42));
+    RUN(testSubArgs32(-13, 42));
+    RUN(testSubArgImm32(1, 1));
+    RUN(testSubArgImm32(1, 2));
+    RUN(testSubArgImm32(13, -42));
+    RUN(testSubArgImm32(-13, 42));
+    RUN(testSubImmArg32(1, 1));
+    RUN(testSubImmArg32(1, 2));
+    RUN(testSubImmArg32(13, -42));
+    RUN(testSubImmArg32(-13, 42));
+    RUN_BINARY(testSubArgMem32, int32Operands(), int32Operands());
+    RUN_BINARY(testSubMemArg32, int32Operands(), int32Operands());
+    RUN_BINARY(testSubImmMem32, int32Operands(), int32Operands());
+    RUN_BINARY(testSubMemImm32, int32Operands(), int32Operands());
+    RUN_UNARY(testNegValueSubOne32, int64Operands());
+
+    RUN_UNARY(testSubArgDouble, floatingPointOperands());
+    RUN_BINARY(testSubArgsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubArgImmDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubImmArgDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubImmsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testSubArgFloat, floatingPointOperands());
+    RUN_BINARY(testSubArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubImmArgFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testSubArgFloatWithUselessDoubleConversion, floatingPointOperands());
+    RUN_BINARY(testSubArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubArgsFloatWithEffectfulDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN_UNARY(testNegDouble, floatingPointOperands());
+    RUN_UNARY(testNegFloat, floatingPointOperands());
+    RUN_UNARY(testNegFloatWithUselessDoubleConversion, floatingPointOperands());
+
+    RUN(testBitAndArgs(43, 43));
+    RUN(testBitAndArgs(43, 0));
+    RUN(testBitAndArgs(10, 3));
+    RUN(testBitAndArgs(42, 0xffffffffffffffff));
+    RUN(testBitAndSameArg(43));
+    RUN(testBitAndSameArg(0));
+    RUN(testBitAndSameArg(3));
+    RUN(testBitAndSameArg(0xffffffffffffffff));
+    RUN(testBitAndImms(43, 43));
+    RUN(testBitAndImms(43, 0));
+    RUN(testBitAndImms(10, 3));
+    RUN(testBitAndImms(42, 0xffffffffffffffff));
+    RUN(testBitAndArgImm(43, 43));
+    RUN(testBitAndArgImm(43, 0));
+    RUN(testBitAndArgImm(10, 3));
+    RUN(testBitAndArgImm(42, 0xffffffffffffffff));
+    RUN(testBitAndArgImm(42, 0xff));
+    RUN(testBitAndArgImm(300, 0xff));
+    RUN(testBitAndArgImm(-300, 0xff));
+    RUN(testBitAndArgImm(42, 0xffff));
+    RUN(testBitAndArgImm(40000, 0xffff));
+    RUN(testBitAndArgImm(-40000, 0xffff));
+    RUN(testBitAndImmArg(43, 43));
+    RUN(testBitAndImmArg(43, 0));
+    RUN(testBitAndImmArg(10, 3));
+    RUN(testBitAndImmArg(42, 0xffffffffffffffff));
+    RUN(testBitAndBitAndArgImmImm(2, 7, 3));
+    RUN(testBitAndBitAndArgImmImm(1, 6, 6));
+    RUN(testBitAndBitAndArgImmImm(0xffff, 24, 7));
+    RUN(testBitAndImmBitAndArgImm(7, 2, 3));
+    RUN(testBitAndImmBitAndArgImm(6, 1, 6));
+    RUN(testBitAndImmBitAndArgImm(24, 0xffff, 7));
+    RUN(testBitAndArgs32(43, 43));
+    RUN(testBitAndArgs32(43, 0));
+    RUN(testBitAndArgs32(10, 3));
+    RUN(testBitAndArgs32(42, 0xffffffff));
+    RUN(testBitAndSameArg32(43));
+    RUN(testBitAndSameArg32(0));
+    RUN(testBitAndSameArg32(3));
+    RUN(testBitAndSameArg32(0xffffffff));
+    RUN(testBitAndImms32(43, 43));
+    RUN(testBitAndImms32(43, 0));
+    RUN(testBitAndImms32(10, 3));
+    RUN(testBitAndImms32(42, 0xffffffff));
+    RUN(testBitAndArgImm32(43, 43));
+    RUN(testBitAndArgImm32(43, 0));
+    RUN(testBitAndArgImm32(10, 3));
+    RUN(testBitAndArgImm32(42, 0xffffffff));
+    RUN(testBitAndImmArg32(43, 43));
+    RUN(testBitAndImmArg32(43, 0));
+    RUN(testBitAndImmArg32(10, 3));
+    RUN(testBitAndImmArg32(42, 0xffffffff));
+    RUN(testBitAndImmArg32(42, 0xff));
+    RUN(testBitAndImmArg32(300, 0xff));
+    RUN(testBitAndImmArg32(-300, 0xff));
+    RUN(testBitAndImmArg32(42, 0xffff));
+    RUN(testBitAndImmArg32(40000, 0xffff));
+    RUN(testBitAndImmArg32(-40000, 0xffff));
+    RUN(testBitAndBitAndArgImmImm32(2, 7, 3));
+    RUN(testBitAndBitAndArgImmImm32(1, 6, 6));
+    RUN(testBitAndBitAndArgImmImm32(0xffff, 24, 7));
+    RUN(testBitAndImmBitAndArgImm32(7, 2, 3));
+    RUN(testBitAndImmBitAndArgImm32(6, 1, 6));
+    RUN(testBitAndImmBitAndArgImm32(24, 0xffff, 7));
+    RUN_BINARY(testBitAndWithMaskReturnsBooleans, int64Operands(), int64Operands());
+    RUN_UNARY(testBitAndArgDouble, floatingPointOperands());
+    RUN_BINARY(testBitAndArgsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitAndArgImmDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitAndImmsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testBitAndArgFloat, floatingPointOperands());
+    RUN_BINARY(testBitAndArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitAndArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitAndImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitAndArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN(testBitOrArgs(43, 43));
+    RUN(testBitOrArgs(43, 0));
+    RUN(testBitOrArgs(10, 3));
+    RUN(testBitOrArgs(42, 0xffffffffffffffff));
+    RUN(testBitOrSameArg(43));
+    RUN(testBitOrSameArg(0));
+    RUN(testBitOrSameArg(3));
+    RUN(testBitOrSameArg(0xffffffffffffffff));
+    RUN(testBitOrImms(43, 43));
+    RUN(testBitOrImms(43, 0));
+    RUN(testBitOrImms(10, 3));
+    RUN(testBitOrImms(42, 0xffffffffffffffff));
+    RUN(testBitOrArgImm(43, 43));
+    RUN(testBitOrArgImm(43, 0));
+    RUN(testBitOrArgImm(10, 3));
+    RUN(testBitOrArgImm(42, 0xffffffffffffffff));
+    RUN(testBitOrImmArg(43, 43));
+    RUN(testBitOrImmArg(43, 0));
+    RUN(testBitOrImmArg(10, 3));
+    RUN(testBitOrImmArg(42, 0xffffffffffffffff));
+    RUN(testBitOrBitOrArgImmImm(2, 7, 3));
+    RUN(testBitOrBitOrArgImmImm(1, 6, 6));
+    RUN(testBitOrBitOrArgImmImm(0xffff, 24, 7));
+    RUN(testBitOrImmBitOrArgImm(7, 2, 3));
+    RUN(testBitOrImmBitOrArgImm(6, 1, 6));
+    RUN(testBitOrImmBitOrArgImm(24, 0xffff, 7));
+    RUN(testBitOrArgs32(43, 43));
+    RUN(testBitOrArgs32(43, 0));
+    RUN(testBitOrArgs32(10, 3));
+    RUN(testBitOrArgs32(42, 0xffffffff));
+    RUN(testBitOrSameArg32(43));
+    RUN(testBitOrSameArg32(0));
+    RUN(testBitOrSameArg32(3));
+    RUN(testBitOrSameArg32(0xffffffff));
+    RUN(testBitOrImms32(43, 43));
+    RUN(testBitOrImms32(43, 0));
+    RUN(testBitOrImms32(10, 3));
+    RUN(testBitOrImms32(42, 0xffffffff));
+    RUN(testBitOrArgImm32(43, 43));
+    RUN(testBitOrArgImm32(43, 0));
+    RUN(testBitOrArgImm32(10, 3));
+    RUN(testBitOrArgImm32(42, 0xffffffff));
+    RUN(testBitOrImmArg32(43, 43));
+    RUN(testBitOrImmArg32(43, 0));
+    RUN(testBitOrImmArg32(10, 3));
+    RUN(testBitOrImmArg32(42, 0xffffffff));
+    RUN(testBitOrBitOrArgImmImm32(2, 7, 3));
+    RUN(testBitOrBitOrArgImmImm32(1, 6, 6));
+    RUN(testBitOrBitOrArgImmImm32(0xffff, 24, 7));
+    RUN(testBitOrImmBitOrArgImm32(7, 2, 3));
+    RUN(testBitOrImmBitOrArgImm32(6, 1, 6));
+    RUN(testBitOrImmBitOrArgImm32(24, 0xffff, 7));
+    RUN_UNARY(testBitOrArgDouble, floatingPointOperands());
+    RUN_BINARY(testBitOrArgsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitOrArgImmDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitOrImmsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testBitOrArgFloat, floatingPointOperands());
+    RUN_BINARY(testBitOrArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitOrArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitOrImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitOrArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN_BINARY(testBitXorArgs, int64Operands(), int64Operands());
+    RUN_UNARY(testBitXorSameArg, int64Operands());
+    RUN_BINARY(testBitXorImms, int64Operands(), int64Operands());
+    RUN_BINARY(testBitXorArgImm, int64Operands(), int64Operands());
+    RUN_BINARY(testBitXorImmArg, int64Operands(), int64Operands());
+    RUN(testBitXorBitXorArgImmImm(2, 7, 3));
+    RUN(testBitXorBitXorArgImmImm(1, 6, 6));
+    RUN(testBitXorBitXorArgImmImm(0xffff, 24, 7));
+    RUN(testBitXorImmBitXorArgImm(7, 2, 3));
+    RUN(testBitXorImmBitXorArgImm(6, 1, 6));
+    RUN(testBitXorImmBitXorArgImm(24, 0xffff, 7));
+    RUN(testBitXorArgs32(43, 43));
+    RUN(testBitXorArgs32(43, 0));
+    RUN(testBitXorArgs32(10, 3));
+    RUN(testBitXorArgs32(42, 0xffffffff));
+    RUN(testBitXorSameArg32(43));
+    RUN(testBitXorSameArg32(0));
+    RUN(testBitXorSameArg32(3));
+    RUN(testBitXorSameArg32(0xffffffff));
+    RUN(testBitXorImms32(43, 43));
+    RUN(testBitXorImms32(43, 0));
+    RUN(testBitXorImms32(10, 3));
+    RUN(testBitXorImms32(42, 0xffffffff));
+    RUN(testBitXorArgImm32(43, 43));
+    RUN(testBitXorArgImm32(43, 0));
+    RUN(testBitXorArgImm32(10, 3));
+    RUN(testBitXorArgImm32(42, 0xffffffff));
+    RUN(testBitXorImmArg32(43, 43));
+    RUN(testBitXorImmArg32(43, 0));
+    RUN(testBitXorImmArg32(10, 3));
+    RUN(testBitXorImmArg32(42, 0xffffffff));
+    RUN(testBitXorBitXorArgImmImm32(2, 7, 3));
+    RUN(testBitXorBitXorArgImmImm32(1, 6, 6));
+    RUN(testBitXorBitXorArgImmImm32(0xffff, 24, 7));
+    RUN(testBitXorImmBitXorArgImm32(7, 2, 3));
+    RUN(testBitXorImmBitXorArgImm32(6, 1, 6));
+    RUN(testBitXorImmBitXorArgImm32(24, 0xffff, 7));
+
+    RUN_UNARY(testBitNotArg, int64Operands());
+    RUN_UNARY(testBitNotImm, int64Operands());
+    RUN_UNARY(testBitNotMem, int64Operands());
+    RUN_UNARY(testBitNotArg32, int32Operands());
+    RUN_UNARY(testBitNotImm32, int32Operands());
+    RUN_UNARY(testBitNotMem32, int32Operands());
+    RUN_BINARY(testBitNotOnBooleanAndBranch32, int32Operands(), int32Operands());
+
+    RUN(testShlArgs(1, 0));
+    RUN(testShlArgs(1, 1));
+    RUN(testShlArgs(1, 62));
+    RUN(testShlArgs(0xffffffffffffffff, 0));
+    RUN(testShlArgs(0xffffffffffffffff, 1));
+    RUN(testShlArgs(0xffffffffffffffff, 63));
+    RUN(testShlImms(1, 0));
+    RUN(testShlImms(1, 1));
+    RUN(testShlImms(1, 62));
+    RUN(testShlImms(1, 65));
+    RUN(testShlImms(0xffffffffffffffff, 0));
+    RUN(testShlImms(0xffffffffffffffff, 1));
+    RUN(testShlImms(0xffffffffffffffff, 63));
+    RUN(testShlArgImm(1, 0));
+    RUN(testShlArgImm(1, 1));
+    RUN(testShlArgImm(1, 62));
+    RUN(testShlArgImm(1, 65));
+    RUN(testShlArgImm(0xffffffffffffffff, 0));
+    RUN(testShlArgImm(0xffffffffffffffff, 1));
+    RUN(testShlArgImm(0xffffffffffffffff, 63));
+    RUN(testShlArg32(2));
+    RUN(testShlArgs32(1, 0));
+    RUN(testShlArgs32(1, 1));
+    RUN(testShlArgs32(1, 62));
+    RUN(testShlImms32(1, 33));
+    RUN(testShlArgs32(0xffffffff, 0));
+    RUN(testShlArgs32(0xffffffff, 1));
+    RUN(testShlArgs32(0xffffffff, 63));
+    RUN(testShlImms32(1, 0));
+    RUN(testShlImms32(1, 1));
+    RUN(testShlImms32(1, 62));
+    RUN(testShlImms32(1, 33));
+    RUN(testShlImms32(0xffffffff, 0));
+    RUN(testShlImms32(0xffffffff, 1));
+    RUN(testShlImms32(0xffffffff, 63));
+    RUN(testShlArgImm32(1, 0));
+    RUN(testShlArgImm32(1, 1));
+    RUN(testShlArgImm32(1, 62));
+    RUN(testShlArgImm32(0xffffffff, 0));
+    RUN(testShlArgImm32(0xffffffff, 1));
+    RUN(testShlArgImm32(0xffffffff, 63));
+
+    RUN(testSShrArgs(1, 0));
+    RUN(testSShrArgs(1, 1));
+    RUN(testSShrArgs(1, 62));
+    RUN(testSShrArgs(0xffffffffffffffff, 0));
+    RUN(testSShrArgs(0xffffffffffffffff, 1));
+    RUN(testSShrArgs(0xffffffffffffffff, 63));
+    RUN(testSShrImms(1, 0));
+    RUN(testSShrImms(1, 1));
+    RUN(testSShrImms(1, 62));
+    RUN(testSShrImms(1, 65));
+    RUN(testSShrImms(0xffffffffffffffff, 0));
+    RUN(testSShrImms(0xffffffffffffffff, 1));
+    RUN(testSShrImms(0xffffffffffffffff, 63));
+    RUN(testSShrArgImm(1, 0));
+    RUN(testSShrArgImm(1, 1));
+    RUN(testSShrArgImm(1, 62));
+    RUN(testSShrArgImm(1, 65));
+    RUN(testSShrArgImm(0xffffffffffffffff, 0));
+    RUN(testSShrArgImm(0xffffffffffffffff, 1));
+    RUN(testSShrArgImm(0xffffffffffffffff, 63));
+    RUN(testSShrArg32(32));
+    RUN(testSShrArgs32(1, 0));
+    RUN(testSShrArgs32(1, 1));
+    RUN(testSShrArgs32(1, 62));
+    RUN(testSShrArgs32(1, 33));
+    RUN(testSShrArgs32(0xffffffff, 0));
+    RUN(testSShrArgs32(0xffffffff, 1));
+    RUN(testSShrArgs32(0xffffffff, 63));
+    RUN(testSShrImms32(1, 0));
+    RUN(testSShrImms32(1, 1));
+    RUN(testSShrImms32(1, 62));
+    RUN(testSShrImms32(1, 33));
+    RUN(testSShrImms32(0xffffffff, 0));
+    RUN(testSShrImms32(0xffffffff, 1));
+    RUN(testSShrImms32(0xffffffff, 63));
+    RUN(testSShrArgImm32(1, 0));
+    RUN(testSShrArgImm32(1, 1));
+    RUN(testSShrArgImm32(1, 62));
+    RUN(testSShrArgImm32(0xffffffff, 0));
+    RUN(testSShrArgImm32(0xffffffff, 1));
+    RUN(testSShrArgImm32(0xffffffff, 63));
+
+    RUN(testZShrArgs(1, 0));
+    RUN(testZShrArgs(1, 1));
+    RUN(testZShrArgs(1, 62));
+    RUN(testZShrArgs(0xffffffffffffffff, 0));
+    RUN(testZShrArgs(0xffffffffffffffff, 1));
+    RUN(testZShrArgs(0xffffffffffffffff, 63));
+    RUN(testZShrImms(1, 0));
+    RUN(testZShrImms(1, 1));
+    RUN(testZShrImms(1, 62));
+    RUN(testZShrImms(1, 65));
+    RUN(testZShrImms(0xffffffffffffffff, 0));
+    RUN(testZShrImms(0xffffffffffffffff, 1));
+    RUN(testZShrImms(0xffffffffffffffff, 63));
+    RUN(testZShrArgImm(1, 0));
+    RUN(testZShrArgImm(1, 1));
+    RUN(testZShrArgImm(1, 62));
+    RUN(testZShrArgImm(1, 65));
+    RUN(testZShrArgImm(0xffffffffffffffff, 0));
+    RUN(testZShrArgImm(0xffffffffffffffff, 1));
+    RUN(testZShrArgImm(0xffffffffffffffff, 63));
+    RUN(testZShrArg32(32));
+    RUN(testZShrArgs32(1, 0));
+    RUN(testZShrArgs32(1, 1));
+    RUN(testZShrArgs32(1, 62));
+    RUN(testZShrArgs32(1, 33));
+    RUN(testZShrArgs32(0xffffffff, 0));
+    RUN(testZShrArgs32(0xffffffff, 1));
+    RUN(testZShrArgs32(0xffffffff, 63));
+    RUN(testZShrImms32(1, 0));
+    RUN(testZShrImms32(1, 1));
+    RUN(testZShrImms32(1, 62));
+    RUN(testZShrImms32(1, 33));
+    RUN(testZShrImms32(0xffffffff, 0));
+    RUN(testZShrImms32(0xffffffff, 1));
+    RUN(testZShrImms32(0xffffffff, 63));
+    RUN(testZShrArgImm32(1, 0));
+    RUN(testZShrArgImm32(1, 1));
+    RUN(testZShrArgImm32(1, 62));
+    RUN(testZShrArgImm32(0xffffffff, 0));
+    RUN(testZShrArgImm32(0xffffffff, 1));
+    RUN(testZShrArgImm32(0xffffffff, 63));
+
+    RUN_UNARY(testClzArg64, int64Operands());
+    RUN_UNARY(testClzMem64, int64Operands());
+    RUN_UNARY(testClzArg32, int32Operands());
+    RUN_UNARY(testClzMem32, int64Operands());
+
+    RUN_UNARY(testAbsArg, floatingPointOperands());
+    RUN_UNARY(testAbsImm, floatingPointOperands());
+    RUN_UNARY(testAbsMem, floatingPointOperands());
+    RUN_UNARY(testAbsAbsArg, floatingPointOperands());
+    RUN_UNARY(testAbsBitwiseCastArg, floatingPointOperands());
+    RUN_UNARY(testBitwiseCastAbsBitwiseCastArg, floatingPointOperands());
+    RUN_UNARY(testAbsArg, floatingPointOperands());
+    RUN_UNARY(testAbsImm, floatingPointOperands());
+    RUN_UNARY(testAbsMem, floatingPointOperands());
+    RUN_UNARY(testAbsAbsArg, floatingPointOperands());
+    RUN_UNARY(testAbsBitwiseCastArg, floatingPointOperands());
+    RUN_UNARY(testBitwiseCastAbsBitwiseCastArg, floatingPointOperands());
+    RUN_UNARY(testAbsArgWithUselessDoubleConversion, floatingPointOperands());
+    RUN_UNARY(testAbsArgWithEffectfulDoubleConversion, floatingPointOperands());
+
+    RUN_UNARY(testCeilArg, floatingPointOperands());
+    RUN_UNARY(testCeilImm, floatingPointOperands());
+    RUN_UNARY(testCeilMem, floatingPointOperands());
+    RUN_UNARY(testCeilCeilArg, floatingPointOperands());
+    RUN_UNARY(testFloorCeilArg, floatingPointOperands());
+    RUN_UNARY(testCeilIToD64, int64Operands());
+    RUN_UNARY(testCeilIToD32, int32Operands());
+    RUN_UNARY(testCeilArg, floatingPointOperands());
+    RUN_UNARY(testCeilImm, floatingPointOperands());
+    RUN_UNARY(testCeilMem, floatingPointOperands());
+    RUN_UNARY(testCeilCeilArg, floatingPointOperands());
+    RUN_UNARY(testFloorCeilArg, floatingPointOperands());
+    RUN_UNARY(testCeilArgWithUselessDoubleConversion, floatingPointOperands());
+    RUN_UNARY(testCeilArgWithEffectfulDoubleConversion, floatingPointOperands());
+
+    RUN_UNARY(testFloorArg, floatingPointOperands());
+    RUN_UNARY(testFloorImm, floatingPointOperands());
+    RUN_UNARY(testFloorMem, floatingPointOperands());
+    RUN_UNARY(testFloorFloorArg, floatingPointOperands());
+    RUN_UNARY(testCeilFloorArg, floatingPointOperands());
+    RUN_UNARY(testFloorIToD64, int64Operands());
+    RUN_UNARY(testFloorIToD32, int32Operands());
+    RUN_UNARY(testFloorArg, floatingPointOperands());
+    RUN_UNARY(testFloorImm, floatingPointOperands());
+    RUN_UNARY(testFloorMem, floatingPointOperands());
+    RUN_UNARY(testFloorFloorArg, floatingPointOperands());
+    RUN_UNARY(testCeilFloorArg, floatingPointOperands());
+    RUN_UNARY(testFloorArgWithUselessDoubleConversion, floatingPointOperands());
+    RUN_UNARY(testFloorArgWithEffectfulDoubleConversion, floatingPointOperands());
+
+    RUN_UNARY(testSqrtArg, floatingPointOperands());
+    RUN_UNARY(testSqrtImm, floatingPointOperands());
+    RUN_UNARY(testSqrtMem, floatingPointOperands());
+    RUN_UNARY(testSqrtArg, floatingPointOperands());
+    RUN_UNARY(testSqrtImm, floatingPointOperands());
+    RUN_UNARY(testSqrtMem, floatingPointOperands());
+    RUN_UNARY(testSqrtArgWithUselessDoubleConversion, floatingPointOperands());
+    RUN_UNARY(testSqrtArgWithEffectfulDoubleConversion, floatingPointOperands());
+
+    RUN_BINARY(testCompareTwoFloatToDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testCompareOneFloatToDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testCompareFloatToDoubleThroughPhi, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testDoubleToFloatThroughPhi, floatingPointOperands());
+    RUN(testReduceFloatToDoubleValidates());
+    RUN_UNARY(testDoubleProducerPhiToFloatConversion, floatingPointOperands());
+    RUN_UNARY(testDoubleProducerPhiToFloatConversionWithDoubleConsumer, floatingPointOperands());
+    RUN_BINARY(testDoubleProducerPhiWithNonFloatConst, floatingPointOperands(), floatingPointOperands());
+
+    RUN_UNARY(testDoubleArgToInt64BitwiseCast, floatingPointOperands());
+    RUN_UNARY(testDoubleImmToInt64BitwiseCast, floatingPointOperands());
+    RUN_UNARY(testTwoBitwiseCastOnDouble, floatingPointOperands());
+    RUN_UNARY(testBitwiseCastOnDoubleInMemory, floatingPointOperands());
+    RUN_UNARY(testBitwiseCastOnDoubleInMemoryIndexed, floatingPointOperands());
+    RUN_UNARY(testInt64BArgToDoubleBitwiseCast, int64Operands());
+    RUN_UNARY(testInt64BImmToDoubleBitwiseCast, int64Operands());
+    RUN_UNARY(testTwoBitwiseCastOnInt64, int64Operands());
+    RUN_UNARY(testBitwiseCastOnInt64InMemory, int64Operands());
+    RUN_UNARY(testBitwiseCastOnInt64InMemoryIndexed, int64Operands());
+    RUN_UNARY(testFloatImmToInt32BitwiseCast, floatingPointOperands());
+    RUN_UNARY(testBitwiseCastOnFloatInMemory, floatingPointOperands());
+    RUN_UNARY(testInt32BArgToFloatBitwiseCast, int32Operands());
+    RUN_UNARY(testInt32BImmToFloatBitwiseCast, int32Operands());
+    RUN_UNARY(testTwoBitwiseCastOnInt32, int32Operands());
+    RUN_UNARY(testBitwiseCastOnInt32InMemory, int32Operands());
+
+    RUN_UNARY(testConvertDoubleToFloatArg, floatingPointOperands());
+    RUN_UNARY(testConvertDoubleToFloatImm, floatingPointOperands());
+    RUN_UNARY(testConvertDoubleToFloatMem, floatingPointOperands());
+    RUN_UNARY(testConvertFloatToDoubleArg, floatingPointOperands());
+    RUN_UNARY(testConvertFloatToDoubleImm, floatingPointOperands());
+    RUN_UNARY(testConvertFloatToDoubleMem, floatingPointOperands());
+    RUN_UNARY(testConvertDoubleToFloatToDoubleToFloat, floatingPointOperands());
+    RUN_UNARY(testStoreFloat, floatingPointOperands());
+    RUN_UNARY(testStoreDoubleConstantAsFloat, floatingPointOperands());
+    RUN_UNARY(testLoadFloatConvertDoubleConvertFloatStoreFloat, floatingPointOperands());
+    RUN_UNARY(testFroundArg, floatingPointOperands());
+    RUN_UNARY(testFroundMem, floatingPointOperands());
+
+    RUN(testIToD64Arg());
+    RUN(testIToF64Arg());
+    RUN(testIToD32Arg());
+    RUN(testIToF32Arg());
+    RUN(testIToD64Mem());
+    RUN(testIToF64Mem());
+    RUN(testIToD32Mem());
+    RUN(testIToF32Mem());
+    RUN_UNARY(testIToD64Imm, int64Operands());
+    RUN_UNARY(testIToF64Imm, int64Operands());
+    RUN_UNARY(testIToD32Imm, int32Operands());
+    RUN_UNARY(testIToF32Imm, int32Operands());
+    RUN(testIToDReducedToIToF64Arg());
+    RUN(testIToDReducedToIToF32Arg());
+
+    RUN(testStore32(44));
+    RUN(testStoreConstant(49));
+    RUN(testStoreConstantPtr(49));
+    RUN(testStore8Arg());
+    RUN(testStore8Imm());
+    RUN(testStorePartial8BitRegisterOnX86());
+    RUN(testStore16Arg());
+    RUN(testStore16Imm());
+    RUN(testTrunc((static_cast(1) << 40) + 42));
+    RUN(testAdd1(45));
+    RUN(testAdd1Ptr(51));
+    RUN(testAdd1Ptr(bitwise_cast(vm)));
+    RUN(testNeg32(52));
+    RUN(testNegPtr(53));
+    RUN(testStoreAddLoad32(46));
+    RUN(testStoreAddLoadImm32(46));
+    RUN(testStoreAddLoad64(4600));
+    RUN(testStoreAddLoadImm64(4600));
+    RUN(testStoreAddLoad8(4, Load8Z));
+    RUN(testStoreAddLoadImm8(4, Load8Z));
+    RUN(testStoreAddLoad8(4, Load8S));
+    RUN(testStoreAddLoadImm8(4, Load8S));
+    RUN(testStoreAddLoad16(6, Load16Z));
+    RUN(testStoreAddLoadImm16(6, Load16Z));
+    RUN(testStoreAddLoad16(6, Load16S));
+    RUN(testStoreAddLoadImm16(6, Load16S));
+    RUN(testStoreAddLoad32Index(46));
+    RUN(testStoreAddLoadImm32Index(46));
+    RUN(testStoreAddLoad64Index(4600));
+    RUN(testStoreAddLoadImm64Index(4600));
+    RUN(testStoreAddLoad8Index(4, Load8Z));
+    RUN(testStoreAddLoadImm8Index(4, Load8Z));
+    RUN(testStoreAddLoad8Index(4, Load8S));
+    RUN(testStoreAddLoadImm8Index(4, Load8S));
+    RUN(testStoreAddLoad16Index(6, Load16Z));
+    RUN(testStoreAddLoadImm16Index(6, Load16Z));
+    RUN(testStoreAddLoad16Index(6, Load16S));
+    RUN(testStoreAddLoadImm16Index(6, Load16S));
+    RUN(testStoreSubLoad(46));
+    RUN(testStoreAddLoadInterference(52));
+    RUN(testStoreAddAndLoad(47, 0xffff));
+    RUN(testStoreAddAndLoad(470000, 0xffff));
+    RUN(testStoreNegLoad32(54));
+    RUN(testStoreNegLoadPtr(55));
+    RUN(testAdd1Uncommuted(48));
+    RUN(testLoadOffset());
+    RUN(testLoadOffsetNotConstant());
+    RUN(testLoadOffsetUsingAdd());
+    RUN(testLoadOffsetUsingAddInterference());
+    RUN(testLoadOffsetUsingAddNotConstant());
+    RUN(testLoadAddrShift(0));
+    RUN(testLoadAddrShift(1));
+    RUN(testLoadAddrShift(2));
+    RUN(testLoadAddrShift(3));
+    RUN(testFramePointer());
+    RUN(testOverrideFramePointer());
+    RUN(testStackSlot());
+    RUN(testLoadFromFramePointer());
+    RUN(testStoreLoadStackSlot(50));
+    
+    RUN(testBranch());
+    RUN(testBranchPtr());
+    RUN(testDiamond());
+    RUN(testBranchNotEqual());
+    RUN(testBranchNotEqualCommute());
+    RUN(testBranchNotEqualNotEqual());
+    RUN(testBranchEqual());
+    RUN(testBranchEqualEqual());
+    RUN(testBranchEqualCommute());
+    RUN(testBranchEqualEqual1());
+    RUN_BINARY(testBranchEqualOrUnorderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedDoubleArgImm, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedFloatArgImm, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedDoubleImms, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedFloatImms, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN(testBranchFold(42));
+    RUN(testBranchFold(0));
+    RUN(testDiamondFold(42));
+    RUN(testDiamondFold(0));
+    RUN(testBranchNotEqualFoldPtr(42));
+    RUN(testBranchNotEqualFoldPtr(0));
+    RUN(testBranchEqualFoldPtr(42));
+    RUN(testBranchEqualFoldPtr(0));
+    RUN(testBranchLoadPtr());
+    RUN(testBranchLoad32());
+    RUN(testBranchLoad8S());
+    RUN(testBranchLoad8Z());
+    RUN(testBranchLoad16S());
+    RUN(testBranchLoad16Z());
+    RUN(testBranch8WithLoad8ZIndex());
+
+    RUN(testComplex(64, 128));
+    RUN(testComplex(4, 128));
+    RUN(testComplex(4, 256));
+    RUN(testComplex(4, 384));
+
+    RUN(testSimplePatchpoint());
+    RUN(testSimplePatchpointWithoutOuputClobbersGPArgs());
+    RUN(testSimplePatchpointWithOuputClobbersGPArgs());
+    RUN(testSimplePatchpointWithoutOuputClobbersFPArgs());
+    RUN(testSimplePatchpointWithOuputClobbersFPArgs());
+    RUN(testPatchpointWithEarlyClobber());
+    RUN(testPatchpointCallArg());
+    RUN(testPatchpointFixedRegister());
+    RUN(testPatchpointAny(ValueRep::WarmAny));
+    RUN(testPatchpointAny(ValueRep::ColdAny));
+    RUN(testPatchpointGPScratch());
+    RUN(testPatchpointFPScratch());
+    RUN(testPatchpointLotsOfLateAnys());
+    RUN(testPatchpointAnyImm(ValueRep::WarmAny));
+    RUN(testPatchpointAnyImm(ValueRep::ColdAny));
+    RUN(testPatchpointAnyImm(ValueRep::LateColdAny));
+    RUN(testPatchpointManyImms());
+    RUN(testPatchpointWithRegisterResult());
+    RUN(testPatchpointWithStackArgumentResult());
+    RUN(testPatchpointWithAnyResult());
+    RUN(testSimpleCheck());
+    RUN(testCheckFalse());
+    RUN(testCheckTrue());
+    RUN(testCheckLessThan());
+    RUN(testCheckMegaCombo());
+    RUN(testCheckTrickyMegaCombo());
+    RUN(testCheckTwoMegaCombos());
+    RUN(testCheckTwoNonRedundantMegaCombos());
+    RUN(testCheckAddImm());
+    RUN(testCheckAddImmCommute());
+    RUN(testCheckAddImmSomeRegister());
+    RUN(testCheckAdd());
+    RUN(testCheckAdd64());
+    RUN(testCheckAddFold(100, 200));
+    RUN(testCheckAddFoldFail(2147483647, 100));
+    RUN(testCheckAddArgumentAliasing64());
+    RUN(testCheckAddArgumentAliasing32());
+    RUN(testCheckAddSelfOverflow64());
+    RUN(testCheckAddSelfOverflow32());
+    RUN(testCheckSubImm());
+    RUN(testCheckSubBadImm());
+    RUN(testCheckSub());
+    RUN(testCheckSub64());
+    RUN(testCheckSubFold(100, 200));
+    RUN(testCheckSubFoldFail(-2147483647, 100));
+    RUN(testCheckNeg());
+    RUN(testCheckNeg64());
+    RUN(testCheckMul());
+    RUN(testCheckMulMemory());
+    RUN(testCheckMul2());
+    RUN(testCheckMul64());
+    RUN(testCheckMulFold(100, 200));
+    RUN(testCheckMulFoldFail(2147483647, 100));
+    RUN(testCheckMulArgumentAliasing64());
+    RUN(testCheckMulArgumentAliasing32());
+
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(Equal, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(NotEqual, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(LessThan, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(GreaterThan, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(LessEqual, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(GreaterEqual, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(Below, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(Above, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(BelowEqual, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(AboveEqual, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(BitAnd, a, b); }, int64Operands(), int64Operands());
+
+    RUN(testEqualDouble(42, 42, true));
+    RUN(testEqualDouble(0, -0, true));
+    RUN(testEqualDouble(42, 43, false));
+    RUN(testEqualDouble(PNaN, 42, false));
+    RUN(testEqualDouble(42, PNaN, false));
+    RUN(testEqualDouble(PNaN, PNaN, false));
+
+    RUN(testLoad(60));
+    RUN(testLoad(-60));
+    RUN(testLoad(1000));
+    RUN(testLoad(-1000));
+    RUN(testLoad(1000000));
+    RUN(testLoad(-1000000));
+    RUN(testLoad(1000000000));
+    RUN(testLoad(-1000000000));
+    RUN_UNARY(testLoad, int64Operands());
+    RUN_UNARY(testLoad, floatingPointOperands());
+    RUN_UNARY(testLoad, floatingPointOperands());
+    
+    RUN(testLoad(Load8S, 60));
+    RUN(testLoad(Load8S, -60));
+    RUN(testLoad(Load8S, 1000));
+    RUN(testLoad(Load8S, -1000));
+    RUN(testLoad(Load8S, 1000000));
+    RUN(testLoad(Load8S, -1000000));
+    RUN(testLoad(Load8S, 1000000000));
+    RUN(testLoad(Load8S, -1000000000));
+    
+    RUN(testLoad(Load8Z, 60));
+    RUN(testLoad(Load8Z, -60));
+    RUN(testLoad(Load8Z, 1000));
+    RUN(testLoad(Load8Z, -1000));
+    RUN(testLoad(Load8Z, 1000000));
+    RUN(testLoad(Load8Z, -1000000));
+    RUN(testLoad(Load8Z, 1000000000));
+    RUN(testLoad(Load8Z, -1000000000));
+
+    RUN(testLoad(Load16S, 60));
+    RUN(testLoad(Load16S, -60));
+    RUN(testLoad(Load16S, 1000));
+    RUN(testLoad(Load16S, -1000));
+    RUN(testLoad(Load16S, 1000000));
+    RUN(testLoad(Load16S, -1000000));
+    RUN(testLoad(Load16S, 1000000000));
+    RUN(testLoad(Load16S, -1000000000));
+    
+    RUN(testLoad(Load16Z, 60));
+    RUN(testLoad(Load16Z, -60));
+    RUN(testLoad(Load16Z, 1000));
+    RUN(testLoad(Load16Z, -1000));
+    RUN(testLoad(Load16Z, 1000000));
+    RUN(testLoad(Load16Z, -1000000));
+    RUN(testLoad(Load16Z, 1000000000));
+    RUN(testLoad(Load16Z, -1000000000));
+
+    RUN(testSpillGP());
+    RUN(testSpillFP());
+
+    RUN(testInt32ToDoublePartialRegisterStall());
+    RUN(testInt32ToDoublePartialRegisterWithoutStall());
+
+    RUN(testCallSimple(1, 2));
+    RUN(testCallRare(1, 2));
+    RUN(testCallRareLive(1, 2, 3));
+    RUN(testCallSimplePure(1, 2));
+    RUN(testCallFunctionWithHellaArguments());
+
+    RUN(testReturnDouble(0.0));
+    RUN(testReturnDouble(negativeZero()));
+    RUN(testReturnDouble(42.5));
+    RUN_UNARY(testReturnFloat, floatingPointOperands());
+
+    RUN(testCallSimpleDouble(1, 2));
+    RUN(testCallFunctionWithHellaDoubleArguments());
+    RUN_BINARY(testCallSimpleFloat, floatingPointOperands(), floatingPointOperands());
+    RUN(testCallFunctionWithHellaFloatArguments());
+
+    RUN(testChillDiv(4, 2, 2));
+    RUN(testChillDiv(1, 0, 0));
+    RUN(testChillDiv(0, 0, 0));
+    RUN(testChillDiv(1, -1, -1));
+    RUN(testChillDiv(-2147483647 - 1, 0, 0));
+    RUN(testChillDiv(-2147483647 - 1, 1, -2147483647 - 1));
+    RUN(testChillDiv(-2147483647 - 1, -1, -2147483647 - 1));
+    RUN(testChillDiv(-2147483647 - 1, 2, -1073741824));
+    RUN(testChillDiv64(4, 2, 2));
+    RUN(testChillDiv64(1, 0, 0));
+    RUN(testChillDiv64(0, 0, 0));
+    RUN(testChillDiv64(1, -1, -1));
+    RUN(testChillDiv64(-9223372036854775807ll - 1, 0, 0));
+    RUN(testChillDiv64(-9223372036854775807ll - 1, 1, -9223372036854775807ll - 1));
+    RUN(testChillDiv64(-9223372036854775807ll - 1, -1, -9223372036854775807ll - 1));
+    RUN(testChillDiv64(-9223372036854775807ll - 1, 2, -4611686018427387904));
+    RUN(testChillDivTwice(4, 2, 6, 2, 5));
+    RUN(testChillDivTwice(4, 0, 6, 2, 3));
+    RUN(testChillDivTwice(4, 2, 6, 0, 2));
+
+    RUN_UNARY(testModArg, int64Operands());
+    RUN_BINARY(testModArgs, int64Operands(), int64Operands());
+    RUN_BINARY(testModImms, int64Operands(), int64Operands());
+    RUN_UNARY(testModArg32, int32Operands());
+    RUN_BINARY(testModArgs32, int32Operands(), int32Operands());
+    RUN_BINARY(testModImms32, int32Operands(), int32Operands());
+    RUN_UNARY(testChillModArg, int64Operands());
+    RUN_BINARY(testChillModArgs, int64Operands(), int64Operands());
+    RUN_BINARY(testChillModImms, int64Operands(), int64Operands());
+    RUN_UNARY(testChillModArg32, int32Operands());
+    RUN_BINARY(testChillModArgs32, int32Operands(), int32Operands());
+    RUN_BINARY(testChillModImms32, int32Operands(), int32Operands());
+
+    RUN(testSwitch(0, 1));
+    RUN(testSwitch(1, 1));
+    RUN(testSwitch(2, 1));
+    RUN(testSwitch(2, 2));
+    RUN(testSwitch(10, 1));
+    RUN(testSwitch(10, 2));
+    RUN(testSwitch(100, 1));
+    RUN(testSwitch(100, 100));
+
+    RUN(testSwitchChillDiv(0, 1));
+    RUN(testSwitchChillDiv(1, 1));
+    RUN(testSwitchChillDiv(2, 1));
+    RUN(testSwitchChillDiv(2, 2));
+    RUN(testSwitchChillDiv(10, 1));
+    RUN(testSwitchChillDiv(10, 2));
+    RUN(testSwitchChillDiv(100, 1));
+    RUN(testSwitchChillDiv(100, 100));
+
+    RUN(testSwitchTargettingSameBlock());
+    RUN(testSwitchTargettingSameBlockFoldPathConstant());
+
+    RUN(testTrunc(0));
+    RUN(testTrunc(1));
+    RUN(testTrunc(-1));
+    RUN(testTrunc(1000000000000ll));
+    RUN(testTrunc(-1000000000000ll));
+    RUN(testTruncFold(0));
+    RUN(testTruncFold(1));
+    RUN(testTruncFold(-1));
+    RUN(testTruncFold(1000000000000ll));
+    RUN(testTruncFold(-1000000000000ll));
+    
+    RUN(testZExt32(0));
+    RUN(testZExt32(1));
+    RUN(testZExt32(-1));
+    RUN(testZExt32(1000000000ll));
+    RUN(testZExt32(-1000000000ll));
+    RUN(testZExt32Fold(0));
+    RUN(testZExt32Fold(1));
+    RUN(testZExt32Fold(-1));
+    RUN(testZExt32Fold(1000000000ll));
+    RUN(testZExt32Fold(-1000000000ll));
+
+    RUN(testSExt32(0));
+    RUN(testSExt32(1));
+    RUN(testSExt32(-1));
+    RUN(testSExt32(1000000000ll));
+    RUN(testSExt32(-1000000000ll));
+    RUN(testSExt32Fold(0));
+    RUN(testSExt32Fold(1));
+    RUN(testSExt32Fold(-1));
+    RUN(testSExt32Fold(1000000000ll));
+    RUN(testSExt32Fold(-1000000000ll));
+
+    RUN(testTruncZExt32(0));
+    RUN(testTruncZExt32(1));
+    RUN(testTruncZExt32(-1));
+    RUN(testTruncZExt32(1000000000ll));
+    RUN(testTruncZExt32(-1000000000ll));
+    RUN(testTruncSExt32(0));
+    RUN(testTruncSExt32(1));
+    RUN(testTruncSExt32(-1));
+    RUN(testTruncSExt32(1000000000ll));
+    RUN(testTruncSExt32(-1000000000ll));
+
+    RUN(testSExt8(0));
+    RUN(testSExt8(1));
+    RUN(testSExt8(42));
+    RUN(testSExt8(-1));
+    RUN(testSExt8(0xff));
+    RUN(testSExt8(0x100));
+    RUN(testSExt8Fold(0));
+    RUN(testSExt8Fold(1));
+    RUN(testSExt8Fold(42));
+    RUN(testSExt8Fold(-1));
+    RUN(testSExt8Fold(0xff));
+    RUN(testSExt8Fold(0x100));
+    RUN(testSExt8SExt8(0));
+    RUN(testSExt8SExt8(1));
+    RUN(testSExt8SExt8(42));
+    RUN(testSExt8SExt8(-1));
+    RUN(testSExt8SExt8(0xff));
+    RUN(testSExt8SExt8(0x100));
+    RUN(testSExt8SExt16(0));
+    RUN(testSExt8SExt16(1));
+    RUN(testSExt8SExt16(42));
+    RUN(testSExt8SExt16(-1));
+    RUN(testSExt8SExt16(0xff));
+    RUN(testSExt8SExt16(0x100));
+    RUN(testSExt8SExt16(0xffff));
+    RUN(testSExt8SExt16(0x10000));
+    RUN(testSExt8BitAnd(0, 0));
+    RUN(testSExt8BitAnd(1, 0));
+    RUN(testSExt8BitAnd(42, 0));
+    RUN(testSExt8BitAnd(-1, 0));
+    RUN(testSExt8BitAnd(0xff, 0));
+    RUN(testSExt8BitAnd(0x100, 0));
+    RUN(testSExt8BitAnd(0xffff, 0));
+    RUN(testSExt8BitAnd(0x10000, 0));
+    RUN(testSExt8BitAnd(0, 0xf));
+    RUN(testSExt8BitAnd(1, 0xf));
+    RUN(testSExt8BitAnd(42, 0xf));
+    RUN(testSExt8BitAnd(-1, 0xf));
+    RUN(testSExt8BitAnd(0xff, 0xf));
+    RUN(testSExt8BitAnd(0x100, 0xf));
+    RUN(testSExt8BitAnd(0xffff, 0xf));
+    RUN(testSExt8BitAnd(0x10000, 0xf));
+    RUN(testSExt8BitAnd(0, 0xff));
+    RUN(testSExt8BitAnd(1, 0xff));
+    RUN(testSExt8BitAnd(42, 0xff));
+    RUN(testSExt8BitAnd(-1, 0xff));
+    RUN(testSExt8BitAnd(0xff, 0xff));
+    RUN(testSExt8BitAnd(0x100, 0xff));
+    RUN(testSExt8BitAnd(0xffff, 0xff));
+    RUN(testSExt8BitAnd(0x10000, 0xff));
+    RUN(testSExt8BitAnd(0, 0x80));
+    RUN(testSExt8BitAnd(1, 0x80));
+    RUN(testSExt8BitAnd(42, 0x80));
+    RUN(testSExt8BitAnd(-1, 0x80));
+    RUN(testSExt8BitAnd(0xff, 0x80));
+    RUN(testSExt8BitAnd(0x100, 0x80));
+    RUN(testSExt8BitAnd(0xffff, 0x80));
+    RUN(testSExt8BitAnd(0x10000, 0x80));
+    RUN(testBitAndSExt8(0, 0xf));
+    RUN(testBitAndSExt8(1, 0xf));
+    RUN(testBitAndSExt8(42, 0xf));
+    RUN(testBitAndSExt8(-1, 0xf));
+    RUN(testBitAndSExt8(0xff, 0xf));
+    RUN(testBitAndSExt8(0x100, 0xf));
+    RUN(testBitAndSExt8(0xffff, 0xf));
+    RUN(testBitAndSExt8(0x10000, 0xf));
+    RUN(testBitAndSExt8(0, 0xff));
+    RUN(testBitAndSExt8(1, 0xff));
+    RUN(testBitAndSExt8(42, 0xff));
+    RUN(testBitAndSExt8(-1, 0xff));
+    RUN(testBitAndSExt8(0xff, 0xff));
+    RUN(testBitAndSExt8(0x100, 0xff));
+    RUN(testBitAndSExt8(0xffff, 0xff));
+    RUN(testBitAndSExt8(0x10000, 0xff));
+    RUN(testBitAndSExt8(0, 0xfff));
+    RUN(testBitAndSExt8(1, 0xfff));
+    RUN(testBitAndSExt8(42, 0xfff));
+    RUN(testBitAndSExt8(-1, 0xfff));
+    RUN(testBitAndSExt8(0xff, 0xfff));
+    RUN(testBitAndSExt8(0x100, 0xfff));
+    RUN(testBitAndSExt8(0xffff, 0xfff));
+    RUN(testBitAndSExt8(0x10000, 0xfff));
+
+    RUN(testSExt16(0));
+    RUN(testSExt16(1));
+    RUN(testSExt16(42));
+    RUN(testSExt16(-1));
+    RUN(testSExt16(0xffff));
+    RUN(testSExt16(0x10000));
+    RUN(testSExt16Fold(0));
+    RUN(testSExt16Fold(1));
+    RUN(testSExt16Fold(42));
+    RUN(testSExt16Fold(-1));
+    RUN(testSExt16Fold(0xffff));
+    RUN(testSExt16Fold(0x10000));
+    RUN(testSExt16SExt8(0));
+    RUN(testSExt16SExt8(1));
+    RUN(testSExt16SExt8(42));
+    RUN(testSExt16SExt8(-1));
+    RUN(testSExt16SExt8(0xffff));
+    RUN(testSExt16SExt8(0x10000));
+    RUN(testSExt16SExt16(0));
+    RUN(testSExt16SExt16(1));
+    RUN(testSExt16SExt16(42));
+    RUN(testSExt16SExt16(-1));
+    RUN(testSExt16SExt16(0xffff));
+    RUN(testSExt16SExt16(0x10000));
+    RUN(testSExt16SExt16(0xffffff));
+    RUN(testSExt16SExt16(0x1000000));
+    RUN(testSExt16BitAnd(0, 0));
+    RUN(testSExt16BitAnd(1, 0));
+    RUN(testSExt16BitAnd(42, 0));
+    RUN(testSExt16BitAnd(-1, 0));
+    RUN(testSExt16BitAnd(0xffff, 0));
+    RUN(testSExt16BitAnd(0x10000, 0));
+    RUN(testSExt16BitAnd(0xffffff, 0));
+    RUN(testSExt16BitAnd(0x1000000, 0));
+    RUN(testSExt16BitAnd(0, 0xf));
+    RUN(testSExt16BitAnd(1, 0xf));
+    RUN(testSExt16BitAnd(42, 0xf));
+    RUN(testSExt16BitAnd(-1, 0xf));
+    RUN(testSExt16BitAnd(0xffff, 0xf));
+    RUN(testSExt16BitAnd(0x10000, 0xf));
+    RUN(testSExt16BitAnd(0xffffff, 0xf));
+    RUN(testSExt16BitAnd(0x1000000, 0xf));
+    RUN(testSExt16BitAnd(0, 0xffff));
+    RUN(testSExt16BitAnd(1, 0xffff));
+    RUN(testSExt16BitAnd(42, 0xffff));
+    RUN(testSExt16BitAnd(-1, 0xffff));
+    RUN(testSExt16BitAnd(0xffff, 0xffff));
+    RUN(testSExt16BitAnd(0x10000, 0xffff));
+    RUN(testSExt16BitAnd(0xffffff, 0xffff));
+    RUN(testSExt16BitAnd(0x1000000, 0xffff));
+    RUN(testSExt16BitAnd(0, 0x8000));
+    RUN(testSExt16BitAnd(1, 0x8000));
+    RUN(testSExt16BitAnd(42, 0x8000));
+    RUN(testSExt16BitAnd(-1, 0x8000));
+    RUN(testSExt16BitAnd(0xffff, 0x8000));
+    RUN(testSExt16BitAnd(0x10000, 0x8000));
+    RUN(testSExt16BitAnd(0xffffff, 0x8000));
+    RUN(testSExt16BitAnd(0x1000000, 0x8000));
+    RUN(testBitAndSExt16(0, 0xf));
+    RUN(testBitAndSExt16(1, 0xf));
+    RUN(testBitAndSExt16(42, 0xf));
+    RUN(testBitAndSExt16(-1, 0xf));
+    RUN(testBitAndSExt16(0xffff, 0xf));
+    RUN(testBitAndSExt16(0x10000, 0xf));
+    RUN(testBitAndSExt16(0xffffff, 0xf));
+    RUN(testBitAndSExt16(0x1000000, 0xf));
+    RUN(testBitAndSExt16(0, 0xffff));
+    RUN(testBitAndSExt16(1, 0xffff));
+    RUN(testBitAndSExt16(42, 0xffff));
+    RUN(testBitAndSExt16(-1, 0xffff));
+    RUN(testBitAndSExt16(0xffff, 0xffff));
+    RUN(testBitAndSExt16(0x10000, 0xffff));
+    RUN(testBitAndSExt16(0xffffff, 0xffff));
+    RUN(testBitAndSExt16(0x1000000, 0xffff));
+    RUN(testBitAndSExt16(0, 0xfffff));
+    RUN(testBitAndSExt16(1, 0xfffff));
+    RUN(testBitAndSExt16(42, 0xfffff));
+    RUN(testBitAndSExt16(-1, 0xfffff));
+    RUN(testBitAndSExt16(0xffff, 0xfffff));
+    RUN(testBitAndSExt16(0x10000, 0xfffff));
+    RUN(testBitAndSExt16(0xffffff, 0xfffff));
+    RUN(testBitAndSExt16(0x1000000, 0xfffff));
+
+    RUN(testSExt32BitAnd(0, 0));
+    RUN(testSExt32BitAnd(1, 0));
+    RUN(testSExt32BitAnd(42, 0));
+    RUN(testSExt32BitAnd(-1, 0));
+    RUN(testSExt32BitAnd(0x80000000, 0));
+    RUN(testSExt32BitAnd(0, 0xf));
+    RUN(testSExt32BitAnd(1, 0xf));
+    RUN(testSExt32BitAnd(42, 0xf));
+    RUN(testSExt32BitAnd(-1, 0xf));
+    RUN(testSExt32BitAnd(0x80000000, 0xf));
+    RUN(testSExt32BitAnd(0, 0x80000000));
+    RUN(testSExt32BitAnd(1, 0x80000000));
+    RUN(testSExt32BitAnd(42, 0x80000000));
+    RUN(testSExt32BitAnd(-1, 0x80000000));
+    RUN(testSExt32BitAnd(0x80000000, 0x80000000));
+    RUN(testBitAndSExt32(0, 0xf));
+    RUN(testBitAndSExt32(1, 0xf));
+    RUN(testBitAndSExt32(42, 0xf));
+    RUN(testBitAndSExt32(-1, 0xf));
+    RUN(testBitAndSExt32(0xffff, 0xf));
+    RUN(testBitAndSExt32(0x10000, 0xf));
+    RUN(testBitAndSExt32(0xffffff, 0xf));
+    RUN(testBitAndSExt32(0x1000000, 0xf));
+    RUN(testBitAndSExt32(0, 0xffff00000000llu));
+    RUN(testBitAndSExt32(1, 0xffff00000000llu));
+    RUN(testBitAndSExt32(42, 0xffff00000000llu));
+    RUN(testBitAndSExt32(-1, 0xffff00000000llu));
+    RUN(testBitAndSExt32(0x80000000, 0xffff00000000llu));
+
+    RUN(testBasicSelect());
+    RUN(testSelectTest());
+    RUN(testSelectCompareDouble());
+    RUN_BINARY(testSelectCompareFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSelectCompareFloatToDouble, floatingPointOperands(), floatingPointOperands());
+    RUN(testSelectDouble());
+    RUN(testSelectDoubleTest());
+    RUN(testSelectDoubleCompareDouble());
+    RUN_BINARY(testSelectDoubleCompareFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSelectFloatCompareFloat, floatingPointOperands(), floatingPointOperands());
+    RUN(testSelectDoubleCompareDoubleWithAliasing());
+    RUN(testSelectFloatCompareFloatWithAliasing());
+    RUN(testSelectFold(42));
+    RUN(testSelectFold(43));
+    RUN(testSelectInvert());
+    RUN(testCheckSelect());
+    RUN(testCheckSelectCheckSelect());
+    RUN(testCheckSelectAndCSE());
+    RUN_BINARY(testPowDoubleByIntegerLoop, floatingPointOperands(), int64Operands());
+
+    RUN(testTruncOrHigh());
+    RUN(testTruncOrLow());
+    RUN(testBitAndOrHigh());
+    RUN(testBitAndOrLow());
+
+    RUN(testBranch64Equal(0, 0));
+    RUN(testBranch64Equal(1, 1));
+    RUN(testBranch64Equal(-1, -1));
+    RUN(testBranch64Equal(1, -1));
+    RUN(testBranch64Equal(-1, 1));
+    RUN(testBranch64EqualImm(0, 0));
+    RUN(testBranch64EqualImm(1, 1));
+    RUN(testBranch64EqualImm(-1, -1));
+    RUN(testBranch64EqualImm(1, -1));
+    RUN(testBranch64EqualImm(-1, 1));
+    RUN(testBranch64EqualMem(0, 0));
+    RUN(testBranch64EqualMem(1, 1));
+    RUN(testBranch64EqualMem(-1, -1));
+    RUN(testBranch64EqualMem(1, -1));
+    RUN(testBranch64EqualMem(-1, 1));
+    RUN(testBranch64EqualMemImm(0, 0));
+    RUN(testBranch64EqualMemImm(1, 1));
+    RUN(testBranch64EqualMemImm(-1, -1));
+    RUN(testBranch64EqualMemImm(1, -1));
+    RUN(testBranch64EqualMemImm(-1, 1));
+
+    RUN(testStore8Load8Z(0));
+    RUN(testStore8Load8Z(123));
+    RUN(testStore8Load8Z(12345));
+    RUN(testStore8Load8Z(-123));
+
+    RUN(testStore16Load16Z(0));
+    RUN(testStore16Load16Z(123));
+    RUN(testStore16Load16Z(12345));
+    RUN(testStore16Load16Z(12345678));
+    RUN(testStore16Load16Z(-123));
+
+    RUN(testSShrShl32(42, 24, 24));
+    RUN(testSShrShl32(-42, 24, 24));
+    RUN(testSShrShl32(4200, 24, 24));
+    RUN(testSShrShl32(-4200, 24, 24));
+    RUN(testSShrShl32(4200000, 24, 24));
+    RUN(testSShrShl32(-4200000, 24, 24));
+
+    RUN(testSShrShl32(42, 16, 16));
+    RUN(testSShrShl32(-42, 16, 16));
+    RUN(testSShrShl32(4200, 16, 16));
+    RUN(testSShrShl32(-4200, 16, 16));
+    RUN(testSShrShl32(4200000, 16, 16));
+    RUN(testSShrShl32(-4200000, 16, 16));
+
+    RUN(testSShrShl32(42, 8, 8));
+    RUN(testSShrShl32(-42, 8, 8));
+    RUN(testSShrShl32(4200, 8, 8));
+    RUN(testSShrShl32(-4200, 8, 8));
+    RUN(testSShrShl32(4200000, 8, 8));
+    RUN(testSShrShl32(-4200000, 8, 8));
+    RUN(testSShrShl32(420000000, 8, 8));
+    RUN(testSShrShl32(-420000000, 8, 8));
+
+    RUN(testSShrShl64(42, 56, 56));
+    RUN(testSShrShl64(-42, 56, 56));
+    RUN(testSShrShl64(4200, 56, 56));
+    RUN(testSShrShl64(-4200, 56, 56));
+    RUN(testSShrShl64(4200000, 56, 56));
+    RUN(testSShrShl64(-4200000, 56, 56));
+    RUN(testSShrShl64(420000000, 56, 56));
+    RUN(testSShrShl64(-420000000, 56, 56));
+    RUN(testSShrShl64(42000000000, 56, 56));
+    RUN(testSShrShl64(-42000000000, 56, 56));
+
+    RUN(testSShrShl64(42, 48, 48));
+    RUN(testSShrShl64(-42, 48, 48));
+    RUN(testSShrShl64(4200, 48, 48));
+    RUN(testSShrShl64(-4200, 48, 48));
+    RUN(testSShrShl64(4200000, 48, 48));
+    RUN(testSShrShl64(-4200000, 48, 48));
+    RUN(testSShrShl64(420000000, 48, 48));
+    RUN(testSShrShl64(-420000000, 48, 48));
+    RUN(testSShrShl64(42000000000, 48, 48));
+    RUN(testSShrShl64(-42000000000, 48, 48));
+
+    RUN(testSShrShl64(42, 32, 32));
+    RUN(testSShrShl64(-42, 32, 32));
+    RUN(testSShrShl64(4200, 32, 32));
+    RUN(testSShrShl64(-4200, 32, 32));
+    RUN(testSShrShl64(4200000, 32, 32));
+    RUN(testSShrShl64(-4200000, 32, 32));
+    RUN(testSShrShl64(420000000, 32, 32));
+    RUN(testSShrShl64(-420000000, 32, 32));
+    RUN(testSShrShl64(42000000000, 32, 32));
+    RUN(testSShrShl64(-42000000000, 32, 32));
+
+    RUN(testSShrShl64(42, 24, 24));
+    RUN(testSShrShl64(-42, 24, 24));
+    RUN(testSShrShl64(4200, 24, 24));
+    RUN(testSShrShl64(-4200, 24, 24));
+    RUN(testSShrShl64(4200000, 24, 24));
+    RUN(testSShrShl64(-4200000, 24, 24));
+    RUN(testSShrShl64(420000000, 24, 24));
+    RUN(testSShrShl64(-420000000, 24, 24));
+    RUN(testSShrShl64(42000000000, 24, 24));
+    RUN(testSShrShl64(-42000000000, 24, 24));
+
+    RUN(testSShrShl64(42, 16, 16));
+    RUN(testSShrShl64(-42, 16, 16));
+    RUN(testSShrShl64(4200, 16, 16));
+    RUN(testSShrShl64(-4200, 16, 16));
+    RUN(testSShrShl64(4200000, 16, 16));
+    RUN(testSShrShl64(-4200000, 16, 16));
+    RUN(testSShrShl64(420000000, 16, 16));
+    RUN(testSShrShl64(-420000000, 16, 16));
+    RUN(testSShrShl64(42000000000, 16, 16));
+    RUN(testSShrShl64(-42000000000, 16, 16));
+
+    RUN(testSShrShl64(42, 8, 8));
+    RUN(testSShrShl64(-42, 8, 8));
+    RUN(testSShrShl64(4200, 8, 8));
+    RUN(testSShrShl64(-4200, 8, 8));
+    RUN(testSShrShl64(4200000, 8, 8));
+    RUN(testSShrShl64(-4200000, 8, 8));
+    RUN(testSShrShl64(420000000, 8, 8));
+    RUN(testSShrShl64(-420000000, 8, 8));
+    RUN(testSShrShl64(42000000000, 8, 8));
+    RUN(testSShrShl64(-42000000000, 8, 8));
+
+    RUN(testCheckMul64SShr());
+
+    RUN_BINARY(testRotR, int32Operands(), int32Operands());
+    RUN_BINARY(testRotR, int64Operands(), int32Operands());
+    RUN_BINARY(testRotL, int32Operands(), int32Operands());
+    RUN_BINARY(testRotL, int64Operands(), int32Operands());
+
+    RUN_BINARY(testRotRWithImmShift, int32Operands(), int32Operands());
+    RUN_BINARY(testRotRWithImmShift, int64Operands(), int32Operands());
+    RUN_BINARY(testRotLWithImmShift, int32Operands(), int32Operands());
+    RUN_BINARY(testRotLWithImmShift, int64Operands(), int32Operands());
+
+    RUN(testComputeDivisionMagic(2, -2147483647, 0));
+    RUN(testTrivialInfiniteLoop());
+    RUN(testFoldPathEqual());
+    
+    RUN(testRShiftSelf32());
+    RUN(testURShiftSelf32());
+    RUN(testLShiftSelf32());
+    RUN(testRShiftSelf64());
+    RUN(testURShiftSelf64());
+    RUN(testLShiftSelf64());
+
+    RUN(testPatchpointDoubleRegs());
+    RUN(testSpillDefSmallerThanUse());
+    RUN(testSpillUseLargerThanDef());
+    RUN(testLateRegister());
+    RUN(testInterpreter());
+    RUN(testReduceStrengthCheckBottomUseInAnotherBlock());
+    RUN(testResetReachabilityDanglingReference());
+    
+    RUN(testEntrySwitchSimple());
+    RUN(testEntrySwitchNoEntrySwitch());
+    RUN(testEntrySwitchWithCommonPaths());
+    RUN(testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint());
+    RUN(testEntrySwitchLoop());
+
+    RUN(testSomeEarlyRegister());
+    RUN(testPatchpointTerminalReturnValue(true));
+    RUN(testPatchpointTerminalReturnValue(false));
+    RUN(testTerminalPatchpointThatNeedsToBeSpilled());
+
+    RUN(testMemoryFence());
+    RUN(testStoreFence());
+    RUN(testLoadFence());
+    RUN(testTrappingLoad());
+    RUN(testTrappingStore());
+    RUN(testTrappingLoadAddStore());
+    RUN(testTrappingLoadDCE());
+    RUN(testTrappingStoreElimination());
+    RUN(testMoveConstants());
+    RUN(testPCOriginMapDoesntInsertNops());
+    RUN(testPinRegisters());
+    RUN(testReduceStrengthReassociation(true));
+    RUN(testReduceStrengthReassociation(false));
+    RUN(testAddShl32());
+    RUN(testAddShl64());
+    RUN(testAddShl65());
+    RUN(testLoadBaseIndexShift2());
+    RUN(testLoadBaseIndexShift32());
+    RUN(testOptimizeMaterialization());
+
+    RUN(testWasmBoundsCheck(0));
+    RUN(testWasmBoundsCheck(100));
+    RUN(testWasmBoundsCheck(10000));
+    RUN(testWasmBoundsCheck(std::numeric_limits::max() - 5));
+    RUN(testWasmAddress());
+
+    if (isX86()) {
+        RUN(testBranchBitAndImmFusion(Identity, Int64, 1, Air::BranchTest32, Air::Arg::Tmp));
+        RUN(testBranchBitAndImmFusion(Identity, Int64, 0xff, Air::BranchTest32, Air::Arg::Tmp));
+        RUN(testBranchBitAndImmFusion(Trunc, Int32, 1, Air::BranchTest32, Air::Arg::Tmp));
+        RUN(testBranchBitAndImmFusion(Trunc, Int32, 0xff, Air::BranchTest32, Air::Arg::Tmp));
+        RUN(testBranchBitAndImmFusion(Load8S, Int32, 1, Air::BranchTest8, Air::Arg::Addr));
+        RUN(testBranchBitAndImmFusion(Load8Z, Int32, 1, Air::BranchTest8, Air::Arg::Addr));
+        RUN(testBranchBitAndImmFusion(Load, Int32, 1, Air::BranchTest32, Air::Arg::Addr));
+        RUN(testBranchBitAndImmFusion(Load, Int64, 1, Air::BranchTest32, Air::Arg::Addr));
+        RUN(testX86LeaAddAddShlLeft());
+        RUN(testX86LeaAddAddShlRight());
+        RUN(testX86LeaAddAdd());
+        RUN(testX86LeaAddShlRight());
+        RUN(testX86LeaAddShlLeftScale1());
+        RUN(testX86LeaAddShlLeftScale2());
+        RUN(testX86LeaAddShlLeftScale4());
+        RUN(testX86LeaAddShlLeftScale8());
+    }
+
+    if (isARM64()) {
+        RUN(testTernarySubInstructionSelection(Identity, Int64, Air::Sub64));
+        RUN(testTernarySubInstructionSelection(Trunc, Int32, Air::Sub32));
+    }
+
+    if (tasks.isEmpty())
+        usage();
+
+    Lock lock;
+
+    Vector threads;
+    for (unsigned i = filter ? 1 : WTF::numberOfProcessorCores(); i--;) {
+        threads.append(
+            createThread(
+                "testb3 thread",
+                [&] () {
+                    for (;;) {
+                        RefPtr> task;
+                        {
+                            LockHolder locker(lock);
+                            if (tasks.isEmpty())
+                                return;
+                            task = tasks.takeFirst();
+                        }
+
+                        task->run();
+                    }
+                }));
+    }
+
+    for (ThreadIdentifier thread : threads)
+        waitForThreadCompletion(thread);
+    crashLock.lock();
+}
+
+} // anonymous namespace
+
+#else // ENABLE(B3_JIT)
+
+static void run(const char*)
+{
+    dataLog("B3 JIT is not enabled.\n");
+}
+
+#endif // ENABLE(B3_JIT)
+
+int main(int argc, char** argv)
+{
+    const char* filter = nullptr;
+    switch (argc) {
+    case 1:
+        break;
+    case 2:
+        filter = argv[1];
+        break;
+    default:
+        usage();
+        break;
+    }
+    
+    run(filter);
+    return 0;
+}
+
diff --git a/Source/JavaScriptCore/bindings/ScriptFunctionCall.cpp b/Source/JavaScriptCore/bindings/ScriptFunctionCall.cpp
index f2647da29..18d07df7e 100644
--- a/Source/JavaScriptCore/bindings/ScriptFunctionCall.cpp
+++ b/Source/JavaScriptCore/bindings/ScriptFunctionCall.cpp
@@ -32,6 +32,7 @@
 #include "config.h"
 #include "ScriptFunctionCall.h"
 
+#include "JSCInlines.h"
 #include "JSLock.h"
 #include "ScriptValue.h"
 #include 
@@ -40,20 +41,6 @@ using namespace JSC;
 
 namespace Deprecated {
 
-void ScriptCallArgumentHandler::appendArgument(const Deprecated::ScriptObject& argument)
-{
-    if (argument.scriptState() != m_exec) {
-        ASSERT_NOT_REACHED();
-        return;
-    }
-    m_arguments.append(argument.jsObject());
-}
-
-void ScriptCallArgumentHandler::appendArgument(const Deprecated::ScriptValue& argument)
-{
-    m_arguments.append(argument.jsValue());
-}
-
 void ScriptCallArgumentHandler::appendArgument(const String& argument)
 {
     JSLockHolder lock(m_exec);
@@ -114,40 +101,44 @@ ScriptFunctionCall::ScriptFunctionCall(const Deprecated::ScriptObject& thisObjec
 {
 }
 
-Deprecated::ScriptValue ScriptFunctionCall::call(bool& hadException)
+JSValue ScriptFunctionCall::call(bool& hadException)
 {
     JSObject* thisObject = m_thisObject.jsObject();
 
-    JSLockHolder lock(m_exec);
+    VM& vm = m_exec->vm();
+    JSLockHolder lock(vm);
+    auto scope = DECLARE_THROW_SCOPE(vm);
 
-    JSValue function = thisObject->get(m_exec, Identifier(m_exec, m_name));
-    if (m_exec->hadException()) {
+    JSValue function = thisObject->get(m_exec, Identifier::fromString(m_exec, m_name));
+    if (UNLIKELY(scope.exception())) {
         hadException = true;
-        return Deprecated::ScriptValue();
+        return { };
     }
 
     CallData callData;
     CallType callType = getCallData(function, callData);
-    if (callType == CallTypeNone)
-        return Deprecated::ScriptValue();
+    if (callType == CallType::None)
+        return { };
 
     JSValue result;
+    NakedPtr exception;
     if (m_callHandler)
-        result = m_callHandler(m_exec, function, callType, callData, thisObject, m_arguments);
+        result = m_callHandler(m_exec, function, callType, callData, thisObject, m_arguments, exception);
     else
-        result = JSC::call(m_exec, function, callType, callData, thisObject, m_arguments);
+        result = JSC::call(m_exec, function, callType, callData, thisObject, m_arguments, exception);
 
-    if (m_exec->hadException()) {
-        hadException = true;
-        return Deprecated::ScriptValue();
+    if (exception) {
+        // Do not treat a terminated execution exception as having an exception. Just treat it as an empty result.
+        hadException = !isTerminatedExecutionException(vm, exception);
+        return { };
     }
 
-    return Deprecated::ScriptValue(m_exec->vm(), result);
+    return result;
 }
 
-Deprecated::ScriptValue ScriptFunctionCall::call()
+JSC::JSValue ScriptFunctionCall::call()
 {
-    bool hadException = false;
+    bool hadException;
     return call(hadException);
 }
 
diff --git a/Source/JavaScriptCore/bindings/ScriptFunctionCall.h b/Source/JavaScriptCore/bindings/ScriptFunctionCall.h
index 04b2afe07..6978414e4 100644
--- a/Source/JavaScriptCore/bindings/ScriptFunctionCall.h
+++ b/Source/JavaScriptCore/bindings/ScriptFunctionCall.h
@@ -29,8 +29,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef ScriptFunctionCall_h
-#define ScriptFunctionCall_h
+#pragma once
 
 #include "ArgList.h"
 #include "ScriptObject.h"
@@ -46,8 +45,6 @@ class JS_EXPORT_PRIVATE ScriptCallArgumentHandler {
 public:
     ScriptCallArgumentHandler(JSC::ExecState* state) : m_exec(state) { }
 
-    void appendArgument(const ScriptObject&);
-    void appendArgument(const ScriptValue&);
     void appendArgument(const char*);
     void appendArgument(const String&);
     void appendArgument(JSC::JSValue);
@@ -71,10 +68,10 @@ private:
 
 class JS_EXPORT_PRIVATE ScriptFunctionCall : public ScriptCallArgumentHandler {
 public:
-    typedef JSC::JSValue (*ScriptFunctionCallHandler)(JSC::ExecState* exec, JSC::JSValue functionObject, JSC::CallType callType, const JSC::CallData& callData, JSC::JSValue thisValue, const JSC::ArgList& args);
+    typedef JSC::JSValue (*ScriptFunctionCallHandler)(JSC::ExecState* exec, JSC::JSValue functionObject, JSC::CallType callType, const JSC::CallData& callData, JSC::JSValue thisValue, const JSC::ArgList& args, NakedPtr&);
     ScriptFunctionCall(const ScriptObject& thisObject, const String& name, ScriptFunctionCallHandler handler = nullptr);
-    ScriptValue call(bool& hadException);
-    ScriptValue call();
+    JSC::JSValue call(bool& hadException);
+    JSC::JSValue call();
 
 protected:
     ScriptFunctionCallHandler m_callHandler;
@@ -83,5 +80,3 @@ protected:
 };
 
 } // namespace Deprecated
-
-#endif // ScriptFunctionCall
diff --git a/Source/JavaScriptCore/bindings/ScriptObject.cpp b/Source/JavaScriptCore/bindings/ScriptObject.cpp
index ccf7af28f..70422e282 100644
--- a/Source/JavaScriptCore/bindings/ScriptObject.cpp
+++ b/Source/JavaScriptCore/bindings/ScriptObject.cpp
@@ -32,6 +32,8 @@
 #include "config.h"
 #include "ScriptObject.h"
 
+#include "JSCInlines.h"
+
 using namespace JSC;
 
 namespace Deprecated {
diff --git a/Source/JavaScriptCore/bindings/ScriptObject.h b/Source/JavaScriptCore/bindings/ScriptObject.h
index 8f7b0dcdd..baa1ea8e0 100644
--- a/Source/JavaScriptCore/bindings/ScriptObject.h
+++ b/Source/JavaScriptCore/bindings/ScriptObject.h
@@ -29,8 +29,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef ScriptObject_h
-#define ScriptObject_h
+#pragma once
 
 #include "JSObject.h"
 #include "ScriptValue.h"
@@ -41,15 +40,15 @@ class ScriptObject : public ScriptValue {
 public:
     JS_EXPORT_PRIVATE ScriptObject(JSC::ExecState*, JSC::JSObject*);
     JS_EXPORT_PRIVATE ScriptObject(JSC::ExecState*, const ScriptValue&);
-    ScriptObject() : m_scriptState(nullptr) { }
+    ScriptObject() { }
+
+    operator JSC::JSObject*() const { return jsObject(); }
 
     JSC::JSObject* jsObject() const { return asObject(jsValue()); }
     JSC::ExecState* scriptState() const { return m_scriptState; }
 
-protected:
-    JSC::ExecState* m_scriptState;
+private:
+    JSC::ExecState* m_scriptState { nullptr };
 };
 
 } // namespace Deprecated
-
-#endif // ScriptObject_h
diff --git a/Source/JavaScriptCore/bindings/ScriptValue.cpp b/Source/JavaScriptCore/bindings/ScriptValue.cpp
index c72ab4634..2a94f3b6e 100644
--- a/Source/JavaScriptCore/bindings/ScriptValue.cpp
+++ b/Source/JavaScriptCore/bindings/ScriptValue.cpp
@@ -11,7 +11,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -32,11 +32,77 @@
 
 #include "APICast.h"
 #include "InspectorValues.h"
+#include "JSCInlines.h"
 #include "JSLock.h"
 
 using namespace JSC;
 using namespace Inspector;
 
+namespace Inspector {
+
+static RefPtr jsToInspectorValue(ExecState& scriptState, JSValue value, int maxDepth)
+{
+    if (!value) {
+        ASSERT_NOT_REACHED();
+        return nullptr;
+    }
+
+    if (!maxDepth)
+        return nullptr;
+
+    maxDepth--;
+
+    if (value.isUndefinedOrNull())
+        return InspectorValue::null();
+    if (value.isBoolean())
+        return InspectorValue::create(value.asBoolean());
+    if (value.isNumber() && value.isDouble())
+        return InspectorValue::create(value.asNumber());
+    if (value.isNumber() && value.isAnyInt())
+        return InspectorValue::create(static_cast(value.asAnyInt()));
+    if (value.isString())
+        return InspectorValue::create(asString(value)->value(&scriptState));
+
+    if (value.isObject()) {
+        if (isJSArray(value)) {
+            auto inspectorArray = InspectorArray::create();
+            auto& array = *asArray(value);
+            unsigned length = array.length();
+            for (unsigned i = 0; i < length; i++) {
+                auto elementValue = jsToInspectorValue(scriptState, array.getIndex(&scriptState, i), maxDepth);
+                if (!elementValue)
+                    return nullptr;
+                inspectorArray->pushValue(WTFMove(elementValue));
+            }
+            return WTFMove(inspectorArray);
+        }
+        auto inspectorObject = InspectorObject::create();
+        auto& object = *value.getObject();
+        PropertyNameArray propertyNames(&scriptState, PropertyNameMode::Strings);
+        object.methodTable()->getOwnPropertyNames(&object, &scriptState, propertyNames, EnumerationMode());
+        for (auto& name : propertyNames) {
+            auto inspectorValue = jsToInspectorValue(scriptState, object.get(&scriptState, name), maxDepth);
+            if (!inspectorValue)
+                return nullptr;
+            inspectorObject->setValue(name.string(), WTFMove(inspectorValue));
+        }
+        return WTFMove(inspectorObject);
+    }
+
+    ASSERT_NOT_REACHED();
+    return nullptr;
+}
+
+RefPtr toInspectorValue(ExecState& state, JSValue value)
+{
+    // FIXME: Maybe we should move the JSLockHolder stuff to the callers since this function takes a JSValue directly.
+    // Doing the locking here made sense when we were trying to abstract the difference between multiple JavaScript engines.
+    JSLockHolder holder(&state);
+    return jsToInspectorValue(state, value, InspectorValue::maxDepth);
+}
+
+} // namespace Inspector
+
 namespace Deprecated {
 
 ScriptValue::~ScriptValue()
@@ -55,10 +121,13 @@ bool ScriptValue::getString(ExecState* scriptState, String& result) const
 
 String ScriptValue::toString(ExecState* scriptState) const
 {
-    String result = m_value.get().toString(scriptState)->value(scriptState);
+    VM& vm = scriptState->vm();
+    auto scope = DECLARE_CATCH_SCOPE(vm);
+
+    String result = m_value.get().toWTFString(scriptState);
     // Handle the case where an exception is thrown as part of invoking toString on the object.
-    if (scriptState->hadException())
-        scriptState->clearException();
+    if (UNLIKELY(scope.exception()))
+        scope.clearException();
     return result;
 }
 
@@ -66,7 +135,7 @@ bool ScriptValue::isEqual(ExecState* scriptState, const ScriptValue& anotherValu
 {
     if (hasNoValue())
         return anotherValue.hasNoValue();
-    return JSValueIsEqual(toRef(scriptState), toRef(scriptState, jsValue()), toRef(scriptState, anotherValue.jsValue()), nullptr);
+    return JSValueIsStrictEqual(toRef(scriptState), toRef(scriptState, jsValue()), toRef(scriptState, anotherValue.jsValue()));
 }
 
 bool ScriptValue::isNull() const
@@ -93,71 +162,13 @@ bool ScriptValue::isObject() const
 bool ScriptValue::isFunction() const
 {
     CallData callData;
-    return getCallData(m_value.get(), callData) != CallTypeNone;
-}
-
-#if ENABLE(INSPECTOR)
-static PassRefPtr jsToInspectorValue(ExecState* scriptState, JSValue value, int maxDepth)
-{
-    if (!value) {
-        ASSERT_NOT_REACHED();
-        return nullptr;
-    }
-
-    if (!maxDepth)
-        return nullptr;
-
-    maxDepth--;
-
-    if (value.isNull() || value.isUndefined())
-        return InspectorValue::null();
-    if (value.isBoolean())
-        return InspectorBasicValue::create(value.asBoolean());
-    if (value.isNumber())
-        return InspectorBasicValue::create(value.asNumber());
-    if (value.isString()) {
-        String s = value.getString(scriptState);
-        return InspectorString::create(String(s.deprecatedCharacters(), s.length()));
-    }
-
-    if (value.isObject()) {
-        if (isJSArray(value)) {
-            RefPtr inspectorArray = InspectorArray::create();
-            JSArray* array = asArray(value);
-            unsigned length = array->length();
-            for (unsigned i = 0; i < length; i++) {
-                JSValue element = array->getIndex(scriptState, i);
-                RefPtr elementValue = jsToInspectorValue(scriptState, element, maxDepth);
-                if (!elementValue)
-                    return nullptr;
-                inspectorArray->pushValue(elementValue);
-            }
-            return inspectorArray;
-        }
-        RefPtr inspectorObject = InspectorObject::create();
-        JSObject* object = value.getObject();
-        PropertyNameArray propertyNames(scriptState);
-        object->methodTable()->getOwnPropertyNames(object, scriptState, propertyNames, ExcludeDontEnumProperties);
-        for (size_t i = 0; i < propertyNames.size(); i++) {
-            const Identifier& name =  propertyNames[i];
-            JSValue propertyValue = object->get(scriptState, name);
-            RefPtr inspectorValue = jsToInspectorValue(scriptState, propertyValue, maxDepth);
-            if (!inspectorValue)
-                return nullptr;
-            inspectorObject->setValue(String(name.deprecatedCharacters(), name.length()), inspectorValue);
-        }
-        return inspectorObject;
-    }
-
-    ASSERT_NOT_REACHED();
-    return nullptr;
+    return getCallData(m_value.get(), callData) != CallType::None;
 }
 
-PassRefPtr ScriptValue::toInspectorValue(ExecState* scriptState) const
+RefPtr ScriptValue::toInspectorValue(ExecState* scriptState) const
 {
     JSLockHolder holder(scriptState);
-    return jsToInspectorValue(scriptState, m_value.get(), InspectorValue::maxDepth);
+    return jsToInspectorValue(*scriptState, m_value.get(), InspectorValue::maxDepth);
 }
-#endif // ENABLE(INSPECTOR)
 
 } // namespace Deprecated
diff --git a/Source/JavaScriptCore/bindings/ScriptValue.h b/Source/JavaScriptCore/bindings/ScriptValue.h
index d5fade90b..7eb50725d 100644
--- a/Source/JavaScriptCore/bindings/ScriptValue.h
+++ b/Source/JavaScriptCore/bindings/ScriptValue.h
@@ -29,18 +29,21 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef ScriptValue_h
-#define ScriptValue_h
+#pragma once
 
 #include "JSCJSValue.h"
+#include "JSCJSValueInlines.h"
 #include "Operations.h"
 #include "Strong.h"
 #include "StrongInlines.h"
-#include 
 #include 
 
 namespace Inspector {
+
 class InspectorValue;
+
+JS_EXPORT_PRIVATE RefPtr toInspectorValue(JSC::ExecState&, JSC::JSValue);
+
 }
 
 namespace Deprecated {
@@ -51,6 +54,7 @@ public:
     ScriptValue(JSC::VM& vm, JSC::JSValue value) : m_value(vm, value) { }
     virtual ~ScriptValue();
 
+    operator JSC::JSValue() const { return jsValue(); }
     JSC::JSValue jsValue() const { return m_value.get(); }
     bool getString(JSC::ExecState*, String& result) const;
     String toString(JSC::ExecState*) const;
@@ -65,14 +69,10 @@ public:
 
     bool operator==(const ScriptValue& other) const { return m_value == other.m_value; }
 
-#if ENABLE(INSPECTOR)
-    PassRefPtr toInspectorValue(JSC::ExecState*) const;
-#endif
+    RefPtr toInspectorValue(JSC::ExecState*) const;
 
 private:
     JSC::Strong m_value;
 };
 
 } // namespace Deprecated
-
-#endif // ScriptValue_h
diff --git a/Source/JavaScriptCore/builtins/ArrayConstructor.js b/Source/JavaScriptCore/builtins/ArrayConstructor.js
new file mode 100644
index 000000000..73add1a70
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/ArrayConstructor.js
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2015, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function of(/* items... */)
+{
+    "use strict";
+
+    var length = arguments.length;
+    var array = @isConstructor(this) ? new this(length) : @newArrayWithSize(length);
+    for (var k = 0; k < length; ++k)
+        @putByValDirect(array, k, arguments[k]);
+    array.length = length;
+    return array;
+}
+
+function from(items /*, mapFn, thisArg */)
+{
+    "use strict";
+
+    var thisObj = this;
+
+    var mapFn = @argument(1);
+
+    var thisArg;
+
+    if (mapFn !== @undefined) {
+        if (typeof mapFn !== "function")
+            @throwTypeError("Array.from requires that the second argument, when provided, be a function");
+
+        thisArg = @argument(2);
+    }
+
+    if (items == null)
+        @throwTypeError("Array.from requires an array-like object - not null or undefined");
+
+    var iteratorMethod = items.@iteratorSymbol;
+    if (iteratorMethod != null) {
+        if (typeof iteratorMethod !== "function")
+            @throwTypeError("Array.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function");
+
+        var result = @isConstructor(thisObj) ? new thisObj() : [];
+
+        var k = 0;
+        var iterator = iteratorMethod.@call(items);
+
+        // Since for-of loop once more looks up the @@iterator property of a given iterable,
+        // it could be observable if the user defines a getter for @@iterator.
+        // To avoid this situation, we define a wrapper object that @@iterator just returns a given iterator.
+        var wrapper = {}
+        wrapper.@iteratorSymbol = function() { return iterator; };
+
+        for (var value of wrapper) {
+            if (mapFn)
+                @putByValDirect(result, k, thisArg === @undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+            else
+                @putByValDirect(result, k, value);
+            k += 1;
+        }
+
+        result.length = k;
+        return result;
+    }
+
+    var arrayLike = @Object(items);
+    var arrayLikeLength = @toLength(arrayLike.length);
+
+    var result = @isConstructor(thisObj) ? new thisObj(arrayLikeLength) : @newArrayWithSize(arrayLikeLength);
+
+    var k = 0;
+    while (k < arrayLikeLength) {
+        var value = arrayLike[k];
+        if (mapFn)
+            @putByValDirect(result, k, thisArg === @undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+        else
+            @putByValDirect(result, k, value);
+        k += 1;
+    }
+
+    result.length = arrayLikeLength;
+    return result;
+}
+
+function isArray(array)
+{
+    "use strict";
+
+    if (@isJSArray(array) || @isDerivedArray(array))
+        return true;
+    if (!@isProxyObject(array))
+        return false;
+    return @isArraySlow(array);
+}
diff --git a/Source/JavaScriptCore/builtins/ArrayIteratorPrototype.js b/Source/JavaScriptCore/builtins/ArrayIteratorPrototype.js
new file mode 100644
index 000000000..92640f7c8
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/ArrayIteratorPrototype.js
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function next()
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("%ArrayIteratorPrototype%.next requires that |this| not be null or undefined");
+
+    let next = this.@arrayIteratorNext;
+    if (next === @undefined)
+        @throwTypeError("%ArrayIteratorPrototype%.next requires that |this| be an Array Iterator instance");
+
+    return next.@call(this);
+}
+
+@globalPrivate
+function arrayIteratorValueNext()
+{
+    "use strict";
+    var done = true;
+    var value;
+
+    var array = this.@iteratedObject;
+    if (!this.@arrayIteratorIsDone) {
+        var index = this.@arrayIteratorNextIndex;
+        var length = array.length >>> 0;
+        if (index >= length) {
+            this.@arrayIteratorIsDone = true;
+        } else {
+            this.@arrayIteratorNextIndex = index + 1;
+            done = false;
+            value = array[index];
+        }
+    }
+
+    return { done, value };
+}
+
+@globalPrivate
+function arrayIteratorKeyNext()
+{
+    "use strict";
+    var done = true;
+    var value;
+
+    var array = this.@iteratedObject;
+    if (!this.@arrayIteratorIsDone) {
+        var index = this.@arrayIteratorNextIndex;
+        var length = array.length >>> 0;
+        if (index >= length) {
+            this.@arrayIteratorIsDone = true;
+        } else {
+            this.@arrayIteratorNextIndex = index + 1;
+            done = false;
+            value = index;
+        }
+    }
+
+    return { done, value };
+}
+
+@globalPrivate
+function arrayIteratorKeyValueNext()
+{
+    "use strict";
+    var done = true;
+    var value;
+
+    var array = this.@iteratedObject;
+    if (!this.@arrayIteratorIsDone) {
+        var index = this.@arrayIteratorNextIndex;
+        var length = array.length >>> 0;
+        if (index >= length) {
+            this.@arrayIteratorIsDone = true;
+        } else {
+            this.@arrayIteratorNextIndex = index + 1;
+            done = false;
+            value = [ index, array[index] ];
+        }
+    }
+
+    return { done, value };
+}
diff --git a/Source/JavaScriptCore/builtins/ArrayPrototype.js b/Source/JavaScriptCore/builtins/ArrayPrototype.js
new file mode 100644
index 000000000..b9f835562
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/ArrayPrototype.js
@@ -0,0 +1,782 @@
+/*
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+@constructor
+@globalPrivate
+function createArrayIterator(iteratedObject, kind, iterationFunction)
+{
+    this.@iteratedObject = iteratedObject;
+    this.@arrayIteratorKind = kind;
+    this.@arrayIteratorNextIndex = 0;
+    this.@arrayIteratorNext = iterationFunction;
+    this.@arrayIteratorIsDone = false;
+}
+
+function values()
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.values requires that |this| not be null or undefined");
+
+    return new @createArrayIterator(@Object(this), "value", @arrayIteratorValueNext);
+}
+
+function keys()
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.keys requires that |this| not be null or undefined");
+
+    return new @createArrayIterator(@Object(this), "key", @arrayIteratorKeyNext);
+}
+
+function entries()
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.entries requires that |this| not be null or undefined");
+
+    return new @createArrayIterator(@Object(this), "key+value", @arrayIteratorKeyValueNext);
+}
+
+function reduce(callback /*, initialValue */)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.reduce requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.reduce callback must be a function");
+
+    var argumentCount = @argumentCount();
+    if (length === 0 && argumentCount < 2)
+        @throwTypeError("reduce of empty array with no initial value");
+
+    var accumulator, k = 0;
+    if (argumentCount > 1)
+        accumulator = @argument(1);
+    else {
+        while (k < length && !(k in array))
+            k += 1;
+        if (k >= length)
+            @throwTypeError("reduce of empty array with no initial value");
+        accumulator = array[k++];
+    }
+
+    while (k < length) {
+        if (k in array)
+            accumulator = callback.@call(@undefined, accumulator, array[k], k, array);
+        k += 1;
+    }
+    return accumulator;
+}
+
+function reduceRight(callback /*, initialValue */)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.reduceRight requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.reduceRight callback must be a function");
+
+    var argumentCount = @argumentCount();
+    if (length === 0 && argumentCount < 2)
+        @throwTypeError("reduceRight of empty array with no initial value");
+
+    var accumulator, k = length - 1;
+    if (argumentCount > 1)
+        accumulator = @argument(1);
+    else {
+        while (k >= 0 && !(k in array))
+            k -= 1;
+        if (k < 0)
+            @throwTypeError("reduceRight of empty array with no initial value");
+        accumulator = array[k--];
+    }
+
+    while (k >= 0) {
+        if (k in array)
+            accumulator = callback.@call(@undefined, accumulator, array[k], k, array);
+        k -= 1;
+    }
+    return accumulator;
+}
+
+function every(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.every requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.every callback must be a function");
+    
+    var thisArg = @argument(1);
+    
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        if (!callback.@call(thisArg, array[i], i, array))
+            return false;
+    }
+    
+    return true;
+}
+
+function forEach(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.forEach requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.forEach callback must be a function");
+    
+    var thisArg = @argument(1);
+    
+    for (var i = 0; i < length; i++) {
+        if (i in array)
+            callback.@call(thisArg, array[i], i, array);
+    }
+}
+
+function filter(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.filter requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.filter callback must be a function");
+    
+    var thisArg = @argument(1);
+
+    // Do 9.4.2.3 ArraySpeciesCreate
+    var result;
+    var constructor;
+    if (@isArray(array)) {
+        constructor = array.constructor;
+        // We have this check so that if some array from a different global object
+        // calls this map they don't get an array with the Array.prototype of the
+        // other global object.
+        if (@isArrayConstructor(constructor) && @Array !== constructor)
+            constructor = @undefined;
+        if (@isObject(constructor)) {
+            constructor = constructor.@speciesSymbol;
+            if (constructor === null)
+                constructor = @undefined;
+        }
+    }
+    if (constructor === @Array || constructor === @undefined)
+        result = @newArrayWithSize(0);
+    else
+        result = new constructor(0);
+
+    var nextIndex = 0;
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        var current = array[i]
+        if (callback.@call(thisArg, current, i, array)) {
+            @putByValDirect(result, nextIndex, current);
+            ++nextIndex;
+        }
+    }
+    return result;
+}
+
+function map(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.map requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.map callback must be a function");
+    
+    var thisArg = @argument(1);
+
+    // Do 9.4.2.3 ArraySpeciesCreate
+    var result;
+    var constructor;
+    if (@isArray(array)) {
+        constructor = array.constructor;
+        // We have this check so that if some array from a different global object
+        // calls this map they don't get an array with the Array.prototype of the
+        // other global object.
+        if (@isArrayConstructor(constructor) && @Array !== constructor)
+            constructor = @undefined;
+        if (@isObject(constructor)) {
+            constructor = constructor.@speciesSymbol;
+            if (constructor === null)
+                constructor = @undefined;
+        }
+    }
+    if (constructor === @Array || constructor === @undefined)
+        result = @newArrayWithSize(length);
+    else
+        result = new constructor(length);
+
+    var nextIndex = 0;
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        var mappedValue = callback.@call(thisArg, array[i], i, array);
+        @putByValDirect(result, i, mappedValue);
+    }
+    return result;
+}
+
+function some(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.some requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.some callback must be a function");
+    
+    var thisArg = @argument(1);
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        if (callback.@call(thisArg, array[i], i, array))
+            return true;
+    }
+    return false;
+}
+
+function fill(value /* [, start [, end]] */)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.fill requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    var relativeStart = @toInteger(@argument(1));
+    var k = 0;
+    if (relativeStart < 0) {
+        k = length + relativeStart;
+        if (k < 0)
+            k = 0;
+    } else {
+        k = relativeStart;
+        if (k > length)
+            k = length;
+    }
+    var relativeEnd = length;
+    var end = @argument(2);
+    if (end !== @undefined)
+        relativeEnd = @toInteger(end);
+    var final = 0;
+    if (relativeEnd < 0) {
+        final = length + relativeEnd;
+        if (final < 0)
+            final = 0;
+    } else {
+        final = relativeEnd;
+        if (final > length)
+            final = length;
+    }
+    for (; k < final; k++)
+        array[k] = value;
+    return array;
+}
+
+function find(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.find requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.find callback must be a function");
+    
+    var thisArg = @argument(1);
+    for (var i = 0; i < length; i++) {
+        var kValue = array[i];
+        if (callback.@call(thisArg, kValue, i, array))
+            return kValue;
+    }
+    return @undefined;
+}
+
+function findIndex(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.findIndex requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.findIndex callback must be a function");
+    
+    var thisArg = @argument(1);
+    for (var i = 0; i < length; i++) {
+        if (callback.@call(thisArg, array[i], i, array))
+            return i;
+    }
+    return -1;
+}
+
+function includes(searchElement /*, fromIndex*/)
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.includes requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (length === 0)
+        return false;
+
+    var fromIndex = 0;
+    var from = @argument(1);
+    if (from !== @undefined)
+        fromIndex = @toInteger(from);
+
+    var index;
+    if (fromIndex >= 0)
+        index = fromIndex;
+    else
+        index = length + fromIndex;
+
+    if (index < 0)
+        index = 0;
+
+    var currentElement;
+    for (; index < length; ++index) {
+        currentElement = array[index];
+        // Use SameValueZero comparison, rather than just StrictEquals.
+        if (searchElement === currentElement || (searchElement !== searchElement && currentElement !== currentElement))
+            return true;
+    }
+    return false;
+}
+
+function sort(comparator)
+{
+    "use strict";
+
+    function min(a, b)
+    {
+        return a < b ? a : b;
+    }
+
+    function stringComparator(a, b)
+    {
+        var aString = a.string;
+        var bString = b.string;
+
+        var aLength = aString.length;
+        var bLength = bString.length;
+        var length = min(aLength, bLength);
+
+        for (var i = 0; i < length; ++i) {
+            var aCharCode = aString.@charCodeAt(i);
+            var bCharCode = bString.@charCodeAt(i);
+
+            if (aCharCode == bCharCode)
+                continue;
+
+            return aCharCode - bCharCode;
+        }
+
+        return aLength - bLength;
+    }
+
+    // Move undefineds and holes to the end of a sparse array. Result is [values..., undefineds..., holes...].
+    function compactSparse(array, dst, src, length)
+    {
+        var values = [ ];
+        var seen = { };
+        var valueCount = 0;
+        var undefinedCount = 0;
+
+        // Clean up after the in-progress non-sparse compaction that failed.
+        for (var i = dst; i < src; ++i)
+            delete array[i];
+
+        for (var object = array; object; object = @Object.@getPrototypeOf(object)) {
+            var propertyNames = @Object.@getOwnPropertyNames(object);
+            for (var i = 0; i < propertyNames.length; ++i) {
+                var index = propertyNames[i];
+                if (index < length) { // Exclude non-numeric properties and properties past length.
+                    if (seen[index]) // Exclude duplicates.
+                        continue;
+                    seen[index] = 1;
+
+                    var value = array[index];
+                    delete array[index];
+
+                    if (value === @undefined) {
+                        ++undefinedCount;
+                        continue;
+                    }
+
+                    array[valueCount++] = value;
+                }
+            }
+        }
+
+        for (var i = valueCount; i < valueCount + undefinedCount; ++i)
+            array[i] = @undefined;
+
+        return valueCount;
+    }
+
+    function compactSlow(array, length)
+    {
+        var holeCount = 0;
+
+        for (var dst = 0, src = 0; src < length; ++src) {
+            if (!(src in array)) {
+                ++holeCount;
+                if (holeCount < 256)
+                    continue;
+                return compactSparse(array, dst, src, length);
+            }
+
+            var value = array[src];
+            if (value === @undefined)
+                continue;
+
+            array[dst++] = value;
+        }
+
+        var valueCount = dst;
+        var undefinedCount = length - valueCount - holeCount;
+
+        for (var i = valueCount; i < valueCount + undefinedCount; ++i)
+            array[i] = @undefined;
+
+        for (var i = valueCount + undefinedCount; i < length; ++i)
+            delete array[i];
+
+        return valueCount;
+    }
+
+    // Move undefineds and holes to the end of an array. Result is [values..., undefineds..., holes...].
+    function compact(array, length)
+    {
+        for (var i = 0; i < array.length; ++i) {
+            if (array[i] === @undefined)
+                return compactSlow(array, length);
+        }
+
+        return length;
+    }
+
+    function merge(dst, src, srcIndex, srcEnd, width, comparator)
+    {
+        var left = srcIndex;
+        var leftEnd = min(left + width, srcEnd);
+        var right = leftEnd;
+        var rightEnd = min(right + width, srcEnd);
+
+        for (var dstIndex = left; dstIndex < rightEnd; ++dstIndex) {
+            if (right < rightEnd) {
+                if (left >= leftEnd || comparator(src[right], src[left]) < 0) {
+                    dst[dstIndex] = src[right++];
+                    continue;
+                }
+            }
+
+            dst[dstIndex] = src[left++];
+        }
+    }
+
+    function mergeSort(array, valueCount, comparator)
+    {
+        var buffer = [ ];
+        buffer.length = valueCount;
+
+        var dst = buffer;
+        var src = array;
+        for (var width = 1; width < valueCount; width *= 2) {
+            for (var srcIndex = 0; srcIndex < valueCount; srcIndex += 2 * width)
+                merge(dst, src, srcIndex, valueCount, width, comparator);
+
+            var tmp = src;
+            src = dst;
+            dst = tmp;
+        }
+
+        if (src != array) {
+            for(var i = 0; i < valueCount; i++)
+                array[i] = src[i];
+        }
+    }
+
+    function bucketSort(array, dst, bucket, depth)
+    {
+        if (bucket.length < 32 || depth > 32) {
+            mergeSort(bucket, bucket.length, stringComparator);
+            for (var i = 0; i < bucket.length; ++i)
+                array[dst++] = bucket[i].value;
+            return dst;
+        }
+
+        var buckets = [ ];
+        for (var i = 0; i < bucket.length; ++i) {
+            var entry = bucket[i];
+            var string = entry.string;
+            if (string.length == depth) {
+                array[dst++] = entry.value;
+                continue;
+            }
+
+            var c = string.@charCodeAt(depth);
+            if (!buckets[c])
+                buckets[c] = [ ];
+            buckets[c][buckets[c].length] = entry;
+        }
+
+        for (var i = 0; i < buckets.length; ++i) {
+            if (!buckets[i])
+                continue;
+            dst = bucketSort(array, dst, buckets[i], depth + 1);
+        }
+
+        return dst;
+    }
+
+    function comparatorSort(array, length, comparator)
+    {
+        var valueCount = compact(array, length);
+        mergeSort(array, valueCount, comparator);
+    }
+
+    function stringSort(array, length)
+    {
+        var valueCount = compact(array, length);
+
+        var strings = @newArrayWithSize(valueCount);
+        for (var i = 0; i < valueCount; ++i)
+            strings[i] = { string: @toString(array[i]), value: array[i] };
+
+        bucketSort(array, 0, strings, 0);
+    }
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.sort requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+
+    var length = array.length >>> 0;
+
+    // For compatibility with Firefox and Chrome, do nothing observable
+    // to the target array if it has 0 or 1 sortable properties.
+    if (length < 2)
+        return array;
+
+    if (typeof comparator == "function")
+        comparatorSort(array, length, comparator);
+    else if (comparator === @undefined)
+        stringSort(array, length);
+    else
+        @throwTypeError("Array.prototype.sort requires the comparsion function be a function or undefined");
+
+    return array;
+}
+
+function concatSlowPath()
+{
+    "use strict";
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.prototype.concat requires that |this| not be null or undefined");
+
+    var currentElement = @Object(this);
+
+    var constructor;
+    if (@isArray(currentElement)) {
+        constructor = currentElement.constructor;
+        // We have this check so that if some array from a different global object
+        // calls this map they don't get an array with the Array.prototype of the
+        // other global object.
+        if (@isArrayConstructor(constructor) && @Array !== constructor)
+            constructor = @undefined;
+        else if (@isObject(constructor)) {
+            constructor = constructor.@speciesSymbol;
+            if (constructor === null)
+                constructor = @Array;
+        }
+    }
+
+    var argCount = arguments.length;
+    var result;
+    if (constructor === @Array || constructor === @undefined)
+        result = @newArrayWithSize(0);
+    else
+        result = new constructor(0);
+    var resultIsArray = @isJSArray(result);
+
+    var resultIndex = 0;
+    var argIndex = 0;
+
+    do {
+        let spreadable = @isObject(currentElement) && currentElement.@isConcatSpreadableSymbol;
+        if ((spreadable === @undefined && @isArray(currentElement)) || spreadable) {
+            let length = @toLength(currentElement.length);
+            if (length + resultIndex > @MAX_ARRAY_INDEX)
+                @throwRangeError("Length exceeded the maximum array length");
+            if (resultIsArray && @isJSArray(currentElement)) {
+                @appendMemcpy(result, currentElement, resultIndex);
+                resultIndex += length;
+            } else {
+                for (var i = 0; i < length; i++) {
+                    if (i in currentElement)
+                        @putByValDirect(result, resultIndex, currentElement[i]);
+                    resultIndex++;
+                }
+            }
+        } else {
+            if (resultIndex >= @MAX_ARRAY_INDEX)
+                @throwRangeError("Length exceeded the maximum array length");
+            @putByValDirect(result, resultIndex++, currentElement);
+        }
+        currentElement = arguments[argIndex];
+    } while (argIndex++ < argCount);
+
+    result.length = resultIndex;
+    return result;
+}
+
+function concat(first)
+{
+    "use strict";
+
+    if (@argumentCount() === 1
+        && @isJSArray(this)
+        && this.@isConcatSpreadableSymbol === @undefined
+        && (!@isObject(first) || first.@isConcatSpreadableSymbol === @undefined)) {
+
+        let result = @concatMemcpy(this, first);
+        if (result !== null)
+            return result;
+    }
+
+    return @tailCallForwardArguments(@concatSlowPath, this);
+}
+
+function copyWithin(target, start /*, end */)
+{
+    "use strict";
+
+    function maxWithPositives(a, b)
+    {
+        return (a < b) ? b : a;
+    }
+
+    function minWithMaybeNegativeZeroAndPositive(maybeNegativeZero, positive)
+    {
+        return (maybeNegativeZero < positive) ? maybeNegativeZero : positive;
+    }
+
+    if (this === null || this === @undefined)
+        @throwTypeError("Array.copyWithin requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    var relativeTarget = @toInteger(target);
+    var to = (relativeTarget < 0) ? maxWithPositives(length + relativeTarget, 0) : minWithMaybeNegativeZeroAndPositive(relativeTarget, length);
+
+    var relativeStart = @toInteger(start);
+    var from = (relativeStart < 0) ? maxWithPositives(length + relativeStart, 0) : minWithMaybeNegativeZeroAndPositive(relativeStart, length);
+
+    var relativeEnd;
+    var end = @argument(2);
+    if (end === @undefined)
+        relativeEnd = length;
+    else
+        relativeEnd = @toInteger(end);
+
+    var finalValue = (relativeEnd < 0) ? maxWithPositives(length + relativeEnd, 0) : minWithMaybeNegativeZeroAndPositive(relativeEnd, length);
+
+    var count = minWithMaybeNegativeZeroAndPositive(finalValue - from, length - to);
+
+    var direction = 1;
+    if (from < to && to < from + count) {
+        direction = -1;
+        from = from + count - 1;
+        to = to + count - 1;
+    }
+
+    for (var i = 0; i < count; ++i, from += direction, to += direction) {
+        if (from in array)
+            array[to] = array[from];
+        else
+            delete array[to];
+    }
+
+    return array;
+}
diff --git a/Source/JavaScriptCore/builtins/AsyncFunctionPrototype.js b/Source/JavaScriptCore/builtins/AsyncFunctionPrototype.js
new file mode 100644
index 000000000..88cfb01d1
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/AsyncFunctionPrototype.js
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Caitlin Potter .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+@globalPrivate
+function asyncFunctionResume(generator, promiseCapability, sentValue, resumeMode)
+{
+    "use strict";
+    let state = generator.@generatorState;
+    let value = @undefined;
+
+    if (state === @GeneratorStateCompleted || (resumeMode !== @GeneratorResumeModeNormal && resumeMode !== @GeneratorResumeModeThrow))
+        @throwTypeError("Async function illegally resumed");
+
+    try {
+        generator.@generatorState = @GeneratorStateExecuting;
+        value = generator.@generatorNext.@call(generator.@generatorThis, generator, state, sentValue, resumeMode, generator.@generatorFrame);
+        if (generator.@generatorState === @GeneratorStateExecuting) {
+            generator.@generatorState = @GeneratorStateCompleted;
+            promiseCapability.@resolve(value);
+            return promiseCapability.@promise;
+        }
+    } catch (error) {
+        generator.@generatorState = @GeneratorStateCompleted;
+        promiseCapability.@reject(error);
+        return promiseCapability.@promise;
+    }
+
+    let wrappedValue = @newPromiseCapability(@Promise);
+    wrappedValue.@resolve.@call(@undefined, value);
+
+    wrappedValue.@promise.@then(
+        function(value) { @asyncFunctionResume(generator, promiseCapability, value, @GeneratorResumeModeNormal); },
+        function(error) { @asyncFunctionResume(generator, promiseCapability, error, @GeneratorResumeModeThrow); });
+
+    return promiseCapability.@promise;
+}
diff --git a/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.cpp b/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.cpp
new file mode 100644
index 000000000..2b79e7e6e
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BuiltinExecutableCreator.h"
+
+#include "BuiltinExecutables.h"
+
+namespace JSC {
+
+UnlinkedFunctionExecutable* createBuiltinExecutable(VM& vm, const SourceCode& source, const Identifier& ident, ConstructorKind kind, ConstructAbility ability)
+{
+    return BuiltinExecutables::createExecutable(vm, source, ident, kind, ability);
+}
+    
+} // namespace JSC
diff --git a/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.h b/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.h
new file mode 100644
index 000000000..19c0884b7
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/BuiltinExecutableCreator.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ConstructAbility.h"
+#include "ParserModes.h"
+#include "SourceCode.h"
+
+namespace JSC {
+
+JS_EXPORT_PRIVATE UnlinkedFunctionExecutable* createBuiltinExecutable(VM&, const SourceCode&, const Identifier&, ConstructorKind, ConstructAbility);
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/builtins/BuiltinExecutables.cpp b/Source/JavaScriptCore/builtins/BuiltinExecutables.cpp
new file mode 100644
index 000000000..a5be5c995
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/BuiltinExecutables.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "config.h"
+#include "BuiltinExecutables.h"
+
+#include "BuiltinNames.h"
+#include "JSCInlines.h"
+#include "Parser.h"
+#include 
+
+namespace JSC {
+
+BuiltinExecutables::BuiltinExecutables(VM& vm)
+    : m_vm(vm)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(makeSource(StringImpl::createFromLiteral(s_##name, length), { }))
+    JSC_FOREACH_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef EXPOSE_BUILTIN_STRINGS
+{
+}
+
+UnlinkedFunctionExecutable* BuiltinExecutables::createDefaultConstructor(ConstructorKind constructorKind, const Identifier& name)
+{
+    static NeverDestroyed baseConstructorCode(ASCIILiteral("(function () { })"));
+    static NeverDestroyed derivedConstructorCode(ASCIILiteral("(function (...args) { super(...args); })"));
+
+    switch (constructorKind) {
+    case ConstructorKind::None:
+        break;
+    case ConstructorKind::Base:
+        return createExecutable(m_vm, makeSource(baseConstructorCode, { }), name, constructorKind, ConstructAbility::CanConstruct);
+    case ConstructorKind::Extends:
+        return createExecutable(m_vm, makeSource(derivedConstructorCode, { }), name, constructorKind, ConstructAbility::CanConstruct);
+    }
+    ASSERT_NOT_REACHED();
+    return nullptr;
+}
+
+UnlinkedFunctionExecutable* BuiltinExecutables::createBuiltinExecutable(const SourceCode& code, const Identifier& name, ConstructAbility constructAbility)
+{
+    return createExecutable(m_vm, code, name, ConstructorKind::None, constructAbility);
+}
+
+UnlinkedFunctionExecutable* createBuiltinExecutable(VM& vm, const SourceCode& code, const Identifier& name, ConstructAbility constructAbility)
+{
+    return BuiltinExecutables::createExecutable(vm, code, name, ConstructorKind::None, constructAbility);
+}
+
+UnlinkedFunctionExecutable* BuiltinExecutables::createExecutable(VM& vm, const SourceCode& source, const Identifier& name, ConstructorKind constructorKind, ConstructAbility constructAbility)
+{
+    JSTextPosition positionBeforeLastNewline;
+    ParserError error;
+    bool isParsingDefaultConstructor = constructorKind != ConstructorKind::None;
+    JSParserBuiltinMode builtinMode = isParsingDefaultConstructor ? JSParserBuiltinMode::NotBuiltin : JSParserBuiltinMode::Builtin;
+    UnlinkedFunctionKind kind = isParsingDefaultConstructor ? UnlinkedNormalFunction : UnlinkedBuiltinFunction;
+    SourceCode parentSourceOverride = isParsingDefaultConstructor ? source : SourceCode();
+    std::unique_ptr program = parse(
+        &vm, source, Identifier(), builtinMode,
+        JSParserStrictMode::NotStrict, JSParserScriptMode::Classic, SourceParseMode::ProgramMode, SuperBinding::NotNeeded, error,
+        &positionBeforeLastNewline, constructorKind);
+
+    if (!program) {
+        dataLog("Fatal error compiling builtin function '", name.string(), "': ", error.message());
+        CRASH();
+    }
+
+    StatementNode* exprStatement = program->singleStatement();
+    RELEASE_ASSERT(exprStatement);
+    RELEASE_ASSERT(exprStatement->isExprStatement());
+    ExpressionNode* funcExpr = static_cast(exprStatement)->expr();
+    RELEASE_ASSERT(funcExpr);
+    RELEASE_ASSERT(funcExpr->isFuncExprNode());
+    FunctionMetadataNode* metadata = static_cast(funcExpr)->metadata();
+    RELEASE_ASSERT(!program->hasCapturedVariables());
+    
+    metadata->setEndPosition(positionBeforeLastNewline);
+    RELEASE_ASSERT(metadata);
+    RELEASE_ASSERT(metadata->ident().isNull());
+    
+    // This function assumes an input string that would result in a single anonymous function expression.
+    metadata->setEndPosition(positionBeforeLastNewline);
+    RELEASE_ASSERT(metadata);
+    metadata->overrideName(name);
+    VariableEnvironment dummyTDZVariables;
+    UnlinkedFunctionExecutable* functionExecutable = UnlinkedFunctionExecutable::create(&vm, source, metadata, kind, constructAbility, JSParserScriptMode::Classic, dummyTDZVariables, DerivedContextType::None, WTFMove(parentSourceOverride));
+    return functionExecutable;
+}
+
+void BuiltinExecutables::finalize(Handle, void* context)
+{
+    static_cast*>(context)->clear();
+}
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+UnlinkedFunctionExecutable* BuiltinExecutables::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = Weak(createBuiltinExecutable(m_##name##Source, m_vm.propertyNames->builtinNames().functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+JSC_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_SOURCES
+
+}
diff --git a/Source/JavaScriptCore/builtins/BuiltinExecutables.h b/Source/JavaScriptCore/builtins/BuiltinExecutables.h
new file mode 100644
index 000000000..ee0eaad02
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/BuiltinExecutables.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSCBuiltins.h"
+#include "ParserModes.h"
+#include "SourceCode.h"
+#include "Weak.h"
+#include "WeakHandleOwner.h"
+
+namespace JSC {
+
+class UnlinkedFunctionExecutable;
+class Identifier;
+class VM;
+
+class BuiltinExecutables final: private WeakHandleOwner {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    explicit BuiltinExecutables(VM&);
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+UnlinkedFunctionExecutable* name##Executable(); \
+const SourceCode& name##Source() { return m_##name##Source; }
+    
+    JSC_FOREACH_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_SOURCES
+
+    UnlinkedFunctionExecutable* createDefaultConstructor(ConstructorKind, const Identifier& name);
+
+    static UnlinkedFunctionExecutable* createExecutable(VM&, const SourceCode&, const Identifier&, ConstructorKind, ConstructAbility);
+private:
+    void finalize(Handle, void* context) override;
+
+    VM& m_vm;
+
+    UnlinkedFunctionExecutable* createBuiltinExecutable(const SourceCode&, const Identifier&, ConstructAbility);
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length)\
+    SourceCode m_##name##Source; \
+    Weak m_##name##Executable;
+    JSC_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+};
+
+}
diff --git a/Source/JavaScriptCore/builtins/BuiltinNames.h b/Source/JavaScriptCore/builtins/BuiltinNames.h
new file mode 100644
index 000000000..03aa44c68
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/BuiltinNames.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BuiltinUtils.h"
+#include "BytecodeIntrinsicRegistry.h"
+#include "CommonIdentifiers.h"
+#include "JSCBuiltins.h"
+
+namespace JSC {
+
+#define JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(macro) \
+    JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(macro) \
+    JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(macro) \
+    macro(arrayIteratorNextIndex) \
+    macro(arrayIterationKind) \
+    macro(arrayIteratorNext) \
+    macro(arrayIteratorIsDone) \
+    macro(arrayIteratorKind) \
+    macro(charCodeAt) \
+    macro(isView) \
+    macro(iteratedObject) \
+    macro(iteratedString) \
+    macro(stringIteratorNextIndex) \
+    macro(promise) \
+    macro(fulfillmentHandler) \
+    macro(rejectionHandler) \
+    macro(index) \
+    macro(deferred) \
+    macro(countdownHolder) \
+    macro(Object) \
+    macro(ownEnumerablePropertyKeys) \
+    macro(Number) \
+    macro(Array) \
+    macro(ArrayBuffer) \
+    macro(String) \
+    macro(RegExp) \
+    macro(Map) \
+    macro(Promise) \
+    macro(Reflect) \
+    macro(InternalPromise) \
+    macro(abs) \
+    macro(floor) \
+    macro(trunc) \
+    macro(create) \
+    macro(defineProperty) \
+    macro(getPrototypeOf) \
+    macro(getOwnPropertyDescriptor) \
+    macro(getOwnPropertyNames) \
+    macro(ownKeys) \
+    macro(Error) \
+    macro(RangeError) \
+    macro(TypeError) \
+    macro(typedArrayLength) \
+    macro(typedArraySort) \
+    macro(typedArrayGetOriginalConstructor) \
+    macro(typedArraySubarrayCreate) \
+    macro(BuiltinLog) \
+    macro(homeObject) \
+    macro(getTemplateObject) \
+    macro(templateRegistryKey) \
+    macro(enqueueJob) \
+    macro(promiseState) \
+    macro(promiseReactions) \
+    macro(promiseResult) \
+    macro(onFulfilled) \
+    macro(onRejected) \
+    macro(push) \
+    macro(repeatCharacter) \
+    macro(capabilities) \
+    macro(starDefault) \
+    macro(InspectorInstrumentation) \
+    macro(get) \
+    macro(set) \
+    macro(shift) \
+    macro(allocateTypedArray) \
+    macro(Int8Array) \
+    macro(Int16Array) \
+    macro(Int32Array) \
+    macro(Uint8Array) \
+    macro(Uint8ClampedArray) \
+    macro(Uint16Array) \
+    macro(Uint32Array) \
+    macro(Float32Array) \
+    macro(Float64Array) \
+    macro(exec) \
+    macro(generator) \
+    macro(generatorNext) \
+    macro(generatorState) \
+    macro(generatorFrame) \
+    macro(generatorValue) \
+    macro(generatorThis) \
+    macro(generatorResumeMode) \
+    macro(Collator) \
+    macro(DateTimeFormat) \
+    macro(NumberFormat) \
+    macro(intlSubstituteValue) \
+    macro(thisTimeValue) \
+    macro(thisNumberValue) \
+    macro(newTargetLocal) \
+    macro(derivedConstructor) \
+    macro(isTypedArrayView) \
+    macro(isBoundFunction) \
+    macro(hasInstanceBoundFunction) \
+    macro(instanceOf) \
+    macro(isArraySlow) \
+    macro(isArrayConstructor) \
+    macro(isConstructor) \
+    macro(isDerivedConstructor) \
+    macro(concatMemcpy) \
+    macro(appendMemcpy) \
+    macro(predictFinalLengthFromArgumunts) \
+    macro(print) \
+    macro(regExpCreate) \
+    macro(SetIterator) \
+    macro(setIteratorNext) \
+    macro(replaceUsingRegExp) \
+    macro(replaceUsingStringSearch) \
+    macro(MapIterator) \
+    macro(mapIteratorNext) \
+    macro(regExpBuiltinExec) \
+    macro(regExpMatchFast) \
+    macro(regExpProtoFlagsGetter) \
+    macro(regExpProtoGlobalGetter) \
+    macro(regExpProtoIgnoreCaseGetter) \
+    macro(regExpProtoMultilineGetter) \
+    macro(regExpProtoSourceGetter) \
+    macro(regExpProtoStickyGetter) \
+    macro(regExpProtoUnicodeGetter) \
+    macro(regExpPrototypeSymbolReplace) \
+    macro(regExpReplaceFast) \
+    macro(regExpSearchFast) \
+    macro(regExpSplitFast) \
+    macro(regExpTestFast) \
+    macro(stringIncludesInternal) \
+    macro(stringSplitFast) \
+    macro(stringSubstrInternal) \
+    macro(makeBoundFunction) \
+    macro(hasOwnLengthProperty) \
+    macro(importModule) \
+    macro(WebAssembly) \
+    macro(Module) \
+    macro(Instance) \
+    macro(Memory) \
+    macro(Table) \
+    macro(CompileError) \
+    macro(LinkError) \
+    macro(RuntimeError) \
+
+
+#define INITIALIZE_PRIVATE_TO_PUBLIC_ENTRY(name) m_privateToPublicMap.add(m_##name##PrivateName.impl(), &m_##name);
+#define INITIALIZE_PUBLIC_TO_PRIVATE_ENTRY(name) m_publicToPrivateMap.add(m_##name.impl(), &m_##name##PrivateName);
+
+// We commandeer the publicToPrivateMap to allow us to convert private symbol names into the appropriate symbol.
+// e.g. @iteratorSymbol points to Symbol.iterator in this map rather than to a an actual private name.
+// FIXME: This is a weird hack and we shouldn't need to do this.
+#define INITIALIZE_SYMBOL_PUBLIC_TO_PRIVATE_ENTRY(name) m_publicToPrivateMap.add(m_##name##SymbolPrivateIdentifier.impl(), &m_##name##Symbol);
+
+class BuiltinNames {
+    WTF_MAKE_NONCOPYABLE(BuiltinNames); WTF_MAKE_FAST_ALLOCATED;
+    
+public:
+    // We treat the dollarVM name as a special case below for $vm (because CommonIdentifiers does not
+    // yet support the $ character).
+
+    BuiltinNames(VM* vm, CommonIdentifiers* commonIdentifiers)
+        : m_emptyIdentifier(commonIdentifiers->emptyIdentifier)
+        JSC_FOREACH_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+        JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(INITIALIZE_BUILTIN_NAMES)
+        JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_WELL_KNOWN_SYMBOL(INITIALIZE_BUILTIN_SYMBOLS)
+        , m_dollarVMName(Identifier::fromString(vm, "$vm"))
+        , m_dollarVMPrivateName(Identifier::fromUid(PrivateName(PrivateName::Description, ASCIILiteral("PrivateSymbol.$vm"))))
+    {
+        JSC_FOREACH_BUILTIN_FUNCTION_NAME(INITIALIZE_PRIVATE_TO_PUBLIC_ENTRY)
+        JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(INITIALIZE_PRIVATE_TO_PUBLIC_ENTRY)
+        JSC_FOREACH_BUILTIN_FUNCTION_NAME(INITIALIZE_PUBLIC_TO_PRIVATE_ENTRY)
+        JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(INITIALIZE_PUBLIC_TO_PRIVATE_ENTRY)
+        JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_WELL_KNOWN_SYMBOL(INITIALIZE_SYMBOL_PUBLIC_TO_PRIVATE_ENTRY)
+        m_privateToPublicMap.add(m_dollarVMPrivateName.impl(), &m_dollarVMName);
+        m_publicToPrivateMap.add(m_dollarVMName.impl(), &m_dollarVMPrivateName);
+    }
+
+    bool isPrivateName(SymbolImpl& uid) const;
+    bool isPrivateName(UniquedStringImpl& uid) const;
+    bool isPrivateName(const Identifier&) const;
+    const Identifier* lookUpPrivateName(const Identifier&) const;
+    const Identifier& lookUpPublicName(const Identifier&) const;
+    
+    void appendExternalName(const Identifier& publicName, const Identifier& privateName);
+
+    JSC_FOREACH_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+    JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+    JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_WELL_KNOWN_SYMBOL(DECLARE_BUILTIN_SYMBOL_ACCESSOR)
+    const JSC::Identifier& dollarVMPublicName() const { return m_dollarVMName; }
+    const JSC::Identifier& dollarVMPrivateName() const { return m_dollarVMPrivateName; }
+
+private:
+    Identifier m_emptyIdentifier;
+    JSC_FOREACH_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+    JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(DECLARE_BUILTIN_NAMES)
+    JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_WELL_KNOWN_SYMBOL(DECLARE_BUILTIN_SYMBOLS)
+    const JSC::Identifier m_dollarVMName;
+    const JSC::Identifier m_dollarVMPrivateName;
+    typedef HashMap, const Identifier*, IdentifierRepHash> BuiltinNamesMap;
+    BuiltinNamesMap m_publicToPrivateMap;
+    BuiltinNamesMap m_privateToPublicMap;
+};
+
+inline bool BuiltinNames::isPrivateName(SymbolImpl& uid) const
+{
+    return m_privateToPublicMap.contains(&uid);
+}
+
+inline bool BuiltinNames::isPrivateName(UniquedStringImpl& uid) const
+{
+    if (!uid.isSymbol())
+        return false;
+    return m_privateToPublicMap.contains(&uid);
+}
+
+inline bool BuiltinNames::isPrivateName(const Identifier& ident) const
+{
+    if (ident.isNull())
+        return false;
+    return isPrivateName(*ident.impl());
+}
+
+inline const Identifier* BuiltinNames::lookUpPrivateName(const Identifier& ident) const
+{
+    auto iter = m_publicToPrivateMap.find(ident.impl());
+    if (iter != m_publicToPrivateMap.end())
+        return iter->value;
+    return 0;
+}
+
+inline const Identifier& BuiltinNames::lookUpPublicName(const Identifier& ident) const
+{
+    auto iter = m_privateToPublicMap.find(ident.impl());
+    if (iter != m_privateToPublicMap.end())
+        return *iter->value;
+    return m_emptyIdentifier;
+}
+
+inline void BuiltinNames::appendExternalName(const Identifier& publicName, const Identifier& privateName)
+{
+#ifndef NDEBUG
+    for (const auto& key : m_publicToPrivateMap.keys())
+        ASSERT(publicName.string() != *key);
+#endif
+
+    m_privateToPublicMap.add(privateName.impl(), &publicName);
+    m_publicToPrivateMap.add(publicName.impl(), &privateName);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/builtins/BuiltinUtils.h b/Source/JavaScriptCore/builtins/BuiltinUtils.h
new file mode 100644
index 000000000..26da2919c
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/BuiltinUtils.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ConstructAbility.h"
+
+namespace JSC {
+
+#define INITIALIZE_BUILTIN_NAMES(name) , m_##name(JSC::Identifier::fromString(vm, #name)), m_##name##PrivateName(JSC::Identifier::fromUid(JSC::PrivateName(JSC::PrivateName::Description, ASCIILiteral("PrivateSymbol." #name))))
+#define DECLARE_BUILTIN_NAMES(name) const JSC::Identifier m_##name; const JSC::Identifier m_##name##PrivateName;
+#define DECLARE_BUILTIN_IDENTIFIER_ACCESSOR(name) \
+    const JSC::Identifier& name##PublicName() const { return m_##name; } \
+    const JSC::Identifier& name##PrivateName() const { return m_##name##PrivateName; }
+
+#define INITIALIZE_BUILTIN_SYMBOLS(name) , m_##name##Symbol(JSC::Identifier::fromUid(JSC::PrivateName(JSC::PrivateName::Description, ASCIILiteral("Symbol." #name)))), m_##name##SymbolPrivateIdentifier(JSC::Identifier::fromString(vm, #name "Symbol"))
+#define DECLARE_BUILTIN_SYMBOLS(name) const JSC::Identifier m_##name##Symbol; const JSC::Identifier m_##name##SymbolPrivateIdentifier;
+#define DECLARE_BUILTIN_SYMBOL_ACCESSOR(name) \
+    const JSC::Identifier& name##Symbol() const { return m_##name##Symbol; }
+
+class Identifier;
+class SourceCode;
+class UnlinkedFunctionExecutable;
+class VM;
+
+JS_EXPORT_PRIVATE UnlinkedFunctionExecutable* createBuiltinExecutable(VM&, const SourceCode&, const Identifier&, ConstructAbility);
+    
+} // namespace JSC
diff --git a/Source/JavaScriptCore/builtins/DatePrototype.js b/Source/JavaScriptCore/builtins/DatePrototype.js
new file mode 100644
index 000000000..234f18522
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/DatePrototype.js
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2015 Andy VanWagoner .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(INTL)
+
+function toLocaleString(/* locales, options */)
+{
+    "use strict";
+
+    function toDateTimeOptionsAnyAll(opts)
+    {
+        // ToDateTimeOptions(options, "any", "all")
+        // http://www.ecma-international.org/ecma-402/2.0/#sec-InitializeDateTimeFormat
+
+        var options;
+        if (opts === @undefined)
+            options = null;
+        else if (opts === null)
+            @throwTypeError("null is not an object");
+        else
+            options = @Object(opts);
+
+        // Check original instead of descendant to reduce lookups up the prototype chain.
+        var needsDefaults = !options || (
+            options.weekday === @undefined &&
+            options.year === @undefined &&
+            options.month === @undefined &&
+            options.day === @undefined &&
+            options.hour === @undefined &&
+            options.minute === @undefined &&
+            options.second === @undefined
+        );
+
+        // Only create descendant if it will have own properties.
+        if (needsDefaults) {
+            options = @Object.@create(options);
+            options.year = "numeric";
+            options.month = "numeric";
+            options.day = "numeric";
+            options.hour = "numeric";
+            options.minute = "numeric";
+            options.second = "numeric";
+        }
+
+        // 9. Return options.
+        return options;
+    }
+
+    // 13.3.1 Date.prototype.toLocaleString ([locales [, options ]]) (ECMA-402 2.0)
+    // http://www.ecma-international.org/ecma-402/2.0/#sec-Date.prototype.toLocaleString
+
+    var value = @thisTimeValue.@call(this);
+    if (@isNaN(value))
+        return "Invalid Date";
+
+    var options = toDateTimeOptionsAnyAll(@argument(1));
+    var locales = @argument(0);
+
+    var dateFormat = new @DateTimeFormat(locales, options);
+    return dateFormat.format(value);
+}
+
+function toLocaleDateString(/* locales, options */)
+{
+    "use strict";
+
+    function toDateTimeOptionsDateDate(opts)
+    {
+        // ToDateTimeOptions(options, "date", "date")
+        // http://www.ecma-international.org/ecma-402/2.0/#sec-InitializeDateTimeFormat
+
+        var options;
+        if (opts === @undefined)
+            options = null;
+        else if (opts === null)
+            @throwTypeError("null is not an object");
+        else
+            options = @Object(opts);
+
+        // Check original instead of descendant to reduce lookups up the prototype chain.
+        var needsDefaults = !options || (
+            options.weekday === @undefined &&
+            options.year === @undefined &&
+            options.month === @undefined &&
+            options.day === @undefined
+        );
+
+        // Only create descendant if it will have own properties.
+        if (needsDefaults) {
+            options = @Object.@create(options);
+            options.year = "numeric";
+            options.month = "numeric";
+            options.day = "numeric";
+        }
+
+        return options;
+    }
+
+    // 13.3.2 Date.prototype.toLocaleDateString ([locales [, options ]]) (ECMA-402 2.0)
+    // http://www.ecma-international.org/ecma-402/2.0/#sec-Date.prototype.toLocaleDateString
+
+    var value = @thisTimeValue.@call(this);
+    if (@isNaN(value))
+        return "Invalid Date";
+
+    var options = toDateTimeOptionsDateDate(@argument(1));
+    var locales = @argument(0);
+
+    var dateFormat = new @DateTimeFormat(locales, options);
+    return dateFormat.format(value);
+}
+
+function toLocaleTimeString(/* locales, options */)
+{
+    "use strict";
+
+    function toDateTimeOptionsTimeTime(opts)
+    {
+        // ToDateTimeOptions(options, "time", "time")
+        // http://www.ecma-international.org/ecma-402/2.0/#sec-InitializeDateTimeFormat
+
+        var options;
+        if (opts === @undefined)
+            options = null;
+        else if (opts === null)
+            @throwTypeError("null is not an object");
+        else
+            options = @Object(opts);
+
+        // Check original instead of descendant to reduce lookups up the prototype chain.
+        var needsDefaults = !options || (
+            options.hour === @undefined &&
+            options.minute === @undefined &&
+            options.second === @undefined
+        );
+
+        // Only create descendant if it will have own properties.
+        if (needsDefaults) {
+            options = @Object.@create(options);
+            options.hour = "numeric";
+            options.minute = "numeric";
+            options.second = "numeric";
+        }
+
+        return options;
+    }
+
+    // 13.3.3 Date.prototype.toLocaleTimeString ([locales [, options ]]) (ECMA-402 2.0)
+    // http://www.ecma-international.org/ecma-402/2.0/#sec-Date.prototype.toLocaleTimeString
+
+    var value = @thisTimeValue.@call(this);
+    if (@isNaN(value))
+        return "Invalid Date";
+
+    var options = toDateTimeOptionsTimeTime(@argument(1));
+    var locales = @argument(0);
+
+    var dateFormat = new @DateTimeFormat(locales, options);
+    return dateFormat.format(value);
+}
diff --git a/Source/JavaScriptCore/builtins/FunctionPrototype.js b/Source/JavaScriptCore/builtins/FunctionPrototype.js
new file mode 100644
index 000000000..f1ee867ef
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/FunctionPrototype.js
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function call(thisArgument)
+{
+    "use strict";
+
+    let argumentValues = [];
+    // Start from 1 to ignore thisArgument
+    for (let i = 1; i < arguments.length; i++)
+        @putByValDirect(argumentValues, i-1, arguments[i]);
+
+    return this.@apply(thisArgument, argumentValues);
+}
+
+function apply(thisValue, argumentValues)
+{
+    "use strict";
+
+    return this.@apply(thisValue, argumentValues);
+}
+
+// FIXME: this should have a different name: https://bugs.webkit.org/show_bug.cgi?id=151363
+function symbolHasInstance(value)
+{
+    "use strict";
+
+    if (typeof this !== "function")
+        return false;
+
+    if (@isBoundFunction(this))
+        return @hasInstanceBoundFunction(this, value);
+
+    let target = this.prototype;
+    return @instanceOf(value, target);
+}
+
+function bind(thisValue)
+{
+    "use strict";
+
+    let target = this;
+    if (typeof target !== "function")
+        @throwTypeError("|this| is not a function inside Function.prototype.bind");
+
+    let argumentCount = arguments.length;
+    let boundArgs = null;
+    let numBoundArgs = 0;
+    if (argumentCount > 1) {
+        numBoundArgs = argumentCount - 1;
+        boundArgs = @newArrayWithSize(numBoundArgs);
+        for (let i = 0; i < numBoundArgs; i++)
+            @putByValDirect(boundArgs, i, arguments[i + 1]);
+    }
+
+    let length = 0;
+    if (@hasOwnLengthProperty(target)) {
+        let lengthValue = target.length;
+        if (typeof lengthValue === "number") {
+            lengthValue = lengthValue | 0;
+            // Note that we only care about positive lengthValues, however, this comparision
+            // against numBoundArgs suffices to prove we're not a negative number.
+            if (lengthValue > numBoundArgs)
+                length = lengthValue - numBoundArgs;
+        }
+    }
+
+    let name = target.name;
+    if (typeof name !== "string")
+        name = "";
+
+    return @makeBoundFunction(target, arguments[0], boundArgs, length, name);
+}
diff --git a/Source/JavaScriptCore/builtins/GeneratorPrototype.js b/Source/JavaScriptCore/builtins/GeneratorPrototype.js
new file mode 100644
index 000000000..4128a3532
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/GeneratorPrototype.js
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015-2016 Yusuke Suzuki .
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// 25.3.3.3 GeneratorResume ( generator, value )
+// 25.3.3.4 GeneratorResumeAbrupt(generator, abruptCompletion)
+@globalPrivate
+function generatorResume(generator, sentValue, resumeMode)
+{
+    "use strict";
+
+    let state = generator.@generatorState;
+    let done = false;
+    let value = @undefined;
+
+    if (typeof state !== 'number')
+        @throwTypeError("|this| should be a generator");
+
+    if (state === @GeneratorStateExecuting)
+        @throwTypeError("Generator is executing");
+
+    if (state === @GeneratorStateCompleted) {
+        if (resumeMode === @GeneratorResumeModeThrow)
+            throw sentValue;
+
+        done = true;
+        if (resumeMode === @GeneratorResumeModeReturn)
+            value = sentValue;
+    } else {
+        try {
+            generator.@generatorState = @GeneratorStateExecuting;
+            value = generator.@generatorNext.@call(generator.@generatorThis, generator, state, sentValue, resumeMode, generator.@generatorFrame);
+            if (generator.@generatorState === @GeneratorStateExecuting) {
+                generator.@generatorState = @GeneratorStateCompleted;
+                done = true;
+            }
+        } catch (error) {
+            generator.@generatorState = @GeneratorStateCompleted;
+            throw error;
+        }
+    }
+    return { done, value };
+}
+
+function next(value)
+{
+    "use strict";
+
+    return @generatorResume(this, value, @GeneratorResumeModeNormal);
+}
+
+function return(value)
+{
+    "use strict";
+
+    return @generatorResume(this, value, @GeneratorResumeModeReturn);
+}
+
+function throw(exception)
+{
+    "use strict";
+
+    return @generatorResume(this, exception, @GeneratorResumeModeThrow);
+}
diff --git a/Source/JavaScriptCore/builtins/GlobalObject.js b/Source/JavaScriptCore/builtins/GlobalObject.js
new file mode 100644
index 000000000..804930c84
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/GlobalObject.js
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015-2016 Yusuke Suzuki .
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+@globalPrivate
+function isFinite(value)
+{
+    "use strict";
+
+    var numberValue = @toNumber(value);
+    // Return false if numberValue is |NaN|.
+    if (numberValue !== numberValue)
+        return false;
+    return numberValue !== @Infinity && numberValue !== -@Infinity;
+}
+
+@globalPrivate
+function isNaN(value)
+{
+    "use strict";
+
+    var numberValue = @toNumber(value);
+    return numberValue !== numberValue;
+}
diff --git a/Source/JavaScriptCore/builtins/GlobalOperations.js b/Source/JavaScriptCore/builtins/GlobalOperations.js
new file mode 100644
index 000000000..22220cf2e
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/GlobalOperations.js
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @internal
+
+@globalPrivate
+function toInteger(target)
+{
+    "use strict";
+
+    var numberValue = @Number(target);
+
+    // isNaN(numberValue)
+    if (numberValue !== numberValue)
+        return 0;
+    return @trunc(numberValue);
+}
+
+@globalPrivate
+function toLength(target)
+{
+    "use strict";
+
+    var length = @toInteger(target);
+    // originally Math.min(Math.max(length, 0), maxSafeInteger));
+    return length > 0 ? (length < @MAX_SAFE_INTEGER ? length : @MAX_SAFE_INTEGER) : 0;
+}
+
+@globalPrivate
+function isDictionary(object)
+{
+    "use strict";
+
+    return object == null || typeof object === "object";
+}
+
+// FIXME: this needs to have it's name changed to "get [Symbol.species]".
+// see: https://bugs.webkit.org/show_bug.cgi?id=151363
+@globalPrivate
+function speciesGetter()
+{
+    return this;
+}
+
+@globalPrivate
+function speciesConstructor(obj, defaultConstructor)
+{
+    var constructor = obj.constructor;
+    if (constructor === @undefined)
+        return defaultConstructor;
+    if (!@isObject(constructor))
+        @throwTypeError("|this|.constructor is not an Object or undefined");
+    constructor = constructor.@speciesSymbol;
+    if (constructor == null)
+        return defaultConstructor;
+    if (@isConstructor(constructor))
+        return constructor;
+    @throwTypeError("|this|.constructor[Symbol.species] is not a constructor");
+}
diff --git a/Source/JavaScriptCore/builtins/InspectorInstrumentationObject.js b/Source/JavaScriptCore/builtins/InspectorInstrumentationObject.js
new file mode 100644
index 000000000..fb7d9eae6
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/InspectorInstrumentationObject.js
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function promiseFulfilled(promise, value, reactions)
+{
+    "use strict";
+
+    if (!this.isEnabled)
+        return;
+}
+
+function promiseRejected(promise, reason, reactions)
+{
+    "use strict";
+
+    if (!this.isEnabled)
+        return;
+}
diff --git a/Source/JavaScriptCore/builtins/InternalPromiseConstructor.js b/Source/JavaScriptCore/builtins/InternalPromiseConstructor.js
new file mode 100644
index 000000000..d01f5f7e5
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/InternalPromiseConstructor.js
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function internalAll(array)
+{
+    // This function is intended to be used in the JSC internals.
+    // The implementation should take care not to perform the user
+    // observable / trappable operations.
+    //
+    // 1. Don't use for-of and iterables. This function only accepts
+    //    the dense array of the promises.
+    // 2. Don't look up this.constructor / @@species. Always construct
+    //    the plain Promise object.
+
+    "use strict";
+
+    var promiseCapability = @newPromiseCapability(@InternalPromise);
+
+    var values = [];
+    var index = 0;
+    var remainingElementsCount = 0;
+
+    function newResolveElement(index)
+    {
+        var alreadyCalled = false;
+        return function (argument)
+        {
+            if (alreadyCalled)
+                return @undefined;
+            alreadyCalled = true;
+
+            @putByValDirect(values, index, argument);
+
+            --remainingElementsCount;
+            if (remainingElementsCount === 0)
+                return promiseCapability.@resolve.@call(@undefined, values);
+
+            return @undefined;
+        }
+    }
+
+    try {
+        if (array.length === 0)
+            promiseCapability.@resolve.@call(@undefined, values);
+        else {
+            for (var index = 0, length = array.length; index < length; ++index) {
+                var value = array[index];
+                @putByValDirect(values, index, @undefined);
+
+                var nextPromiseCapability = @newPromiseCapability(@InternalPromise);
+                nextPromiseCapability.@resolve.@call(@undefined, value);
+                var nextPromise = nextPromiseCapability.@promise;
+
+                var resolveElement = newResolveElement(index);
+                ++remainingElementsCount;
+                nextPromise.then(resolveElement, promiseCapability.@reject);
+            }
+        }
+    } catch (error) {
+        promiseCapability.@reject.@call(@undefined, error);
+    }
+
+    return promiseCapability.@promise;
+}
diff --git a/Source/JavaScriptCore/builtins/IteratorHelpers.js b/Source/JavaScriptCore/builtins/IteratorHelpers.js
new file mode 100644
index 000000000..f565d4464
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/IteratorHelpers.js
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function performIteration(iterable)
+{
+    "use strict";
+    // This is performing a spread operation on the iterable passed in,
+    // and returning the result in an array.
+    // https://tc39.github.io/ecma262/#sec-runtime-semantics-arrayaccumulation
+
+    let result = [];
+
+    let iterator = iterable.@iteratorSymbol();
+    let item;
+    let index = 0;
+    while (true) {
+        item = iterator.next();
+        if (!@isObject(item))
+            @throwTypeError("Iterator result interface is not an object");
+        if (item.done)
+            return result;
+        @putByValDirect(result, index++, item.value);
+    }
+}
diff --git a/Source/JavaScriptCore/builtins/IteratorPrototype.js b/Source/JavaScriptCore/builtins/IteratorPrototype.js
new file mode 100644
index 000000000..5c1691a3d
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/IteratorPrototype.js
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function symbolIteratorGetter()
+{
+    "use strict";
+
+    return this;
+}
diff --git a/Source/JavaScriptCore/builtins/MapPrototype.js b/Source/JavaScriptCore/builtins/MapPrototype.js
new file mode 100644
index 000000000..830260269
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/MapPrototype.js
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function forEach(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (!@isMap(this))
+        @throwTypeError("Map operation called on non-Map object");
+
+    if (typeof callback !== 'function')
+        @throwTypeError("Map.prototype.forEach callback must be a function");
+
+    var thisArg = @argument(1);
+    var iterator = @MapIterator(this);
+
+    // To avoid object allocations for iterator result objects, we pass the placeholder to the special "next" function in order to fill the results.
+    var value = [ @undefined, @undefined ];
+    for (;;) {
+        if (@mapIteratorNext.@call(iterator, value))
+            break;
+        callback.@call(thisArg, value[1], value[0], this);
+    }
+}
diff --git a/Source/JavaScriptCore/builtins/ModuleLoaderPrototype.js b/Source/JavaScriptCore/builtins/ModuleLoaderPrototype.js
new file mode 100644
index 000000000..29556ea3d
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/ModuleLoaderPrototype.js
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2015, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// https://whatwg.github.io/loader/#loader-object
+// Module Loader has several hooks that can be customized by the platform.
+// For example, the [[Fetch]] hook can be provided by the JavaScriptCore shell
+// as fetching the payload from the local file system.
+// Currently, there are 4 hooks.
+//    1. Loader.resolve
+//    2. Loader.fetch
+//    3. Loader.instantiate
+
+@globalPrivate
+function setStateToMax(entry, newState)
+{
+    // https://whatwg.github.io/loader/#set-state-to-max
+
+    "use strict";
+
+    if (entry.state < newState)
+        entry.state = newState;
+}
+
+@globalPrivate
+function newRegistryEntry(key)
+{
+    // https://whatwg.github.io/loader/#registry
+    //
+    // Each registry entry becomes one of the 5 states.
+    // 1. Fetch
+    //     Ready to fetch (or now fetching) the resource of this module.
+    //     Typically, we fetch the source code over the network or from the file system.
+    //     a. If the status is Fetch and there is no entry.fetch promise, the entry is ready to fetch.
+    //     b. If the status is Fetch and there is the entry.fetch promise, the entry is just fetching the resource.
+    //
+    // 2. Instantiate (AnalyzeModule)
+    //     Ready to instantiate (or now instantiating) the module record from the fetched
+    //     source code.
+    //     Typically, we parse the module code, extract the dependencies and binding information.
+    //     a. If the status is Instantiate and there is no entry.instantiate promise, the entry is ready to instantiate.
+    //     b. If the status is Instantiate and there is the entry.fetch promise, the entry is just instantiating
+    //        the module record.
+    //
+    // 3. Satisfy
+    //     Ready to request the dependent modules (or now requesting & resolving).
+    //     Without this state, the current draft causes infinite recursion when there is circular dependency.
+    //     a. If the status is Satisfy and there is no entry.satisfy promise, the entry is ready to resolve the dependencies.
+    //     b. If the status is Satisfy and there is the entry.satisfy promise, the entry is just resolving
+    //        the dependencies.
+    //
+    // 4. Link
+    //     Ready to link the module with the other modules.
+    //     Linking means that the module imports and exports the bindings from/to the other modules.
+    //
+    // 5. Ready
+    //     The module is linked, so the module is ready to be executed.
+    //
+    // Each registry entry has the 4 promises; "fetch", "instantiate" and "satisfy".
+    // They are assigned when starting the each phase. And they are fulfilled when the each phase is completed.
+    //
+    // In the current module draft, linking will be performed after the whole modules are instantiated and the dependencies are resolved.
+    // And execution is also done after the all modules are linked.
+    //
+    // TODO: We need to exploit the way to execute the module while fetching non-related modules.
+    // One solution; introducing the ready promise chain to execute the modules concurrently while keeping
+    // the execution order.
+
+    "use strict";
+
+    return {
+        key: key,
+        state: @ModuleFetch,
+        fetch: @undefined,
+        instantiate: @undefined,
+        satisfy: @undefined,
+        dependencies: [], // To keep the module order, we store the module keys in the array.
+        dependenciesMap: @undefined,
+        module: @undefined, // JSModuleRecord
+        linkError: @undefined,
+        linkSucceeded: true,
+    };
+}
+
+function ensureRegistered(key)
+{
+    // https://whatwg.github.io/loader/#ensure-registered
+
+    "use strict";
+
+    var entry = this.registry.@get(key);
+    if (entry)
+        return entry;
+
+    entry = @newRegistryEntry(key);
+    this.registry.@set(key, entry);
+
+    return entry;
+}
+
+function forceFulfillPromise(promise, value)
+{
+    "use strict";
+
+    if (promise.@promiseState === @promiseStatePending)
+        @fulfillPromise(promise, value);
+}
+
+function fulfillFetch(entry, source)
+{
+    // https://whatwg.github.io/loader/#fulfill-fetch
+
+    "use strict";
+
+    if (!entry.fetch)
+        entry.fetch = @newPromiseCapability(@InternalPromise).@promise;
+    this.forceFulfillPromise(entry.fetch, source);
+    @setStateToMax(entry, @ModuleInstantiate);
+}
+
+function fulfillInstantiate(entry, optionalInstance, source)
+{
+    // https://whatwg.github.io/loader/#fulfill-instantiate
+
+    "use strict";
+
+    if (!entry.instantiate)
+        entry.instantiate = @newPromiseCapability(@InternalPromise).@promise;
+    this.commitInstantiated(entry, optionalInstance, source);
+
+    // FIXME: The draft fulfills the promise in the CommitInstantiated operation.
+    // But it CommitInstantiated is also used in the requestInstantiate and
+    // we should not "force fulfill" there.
+    // So we separate "force fulfill" operation from the CommitInstantiated operation.
+    // https://github.com/whatwg/loader/pull/67
+    this.forceFulfillPromise(entry.instantiate, entry);
+}
+
+function commitInstantiated(entry, optionalInstance, source)
+{
+    // https://whatwg.github.io/loader/#commit-instantiated
+
+    "use strict";
+
+    var moduleRecord = this.instantiation(optionalInstance, source, entry);
+
+    // FIXME: Described in the draft,
+    //   4. Fulfill entry.[[Instantiate]] with instance.
+    // But, instantiate promise should be fulfilled with the entry.
+    // We remove this statement because instantiate promise will be
+    // fulfilled without this "force fulfill" operation.
+    // https://github.com/whatwg/loader/pull/67
+
+    var dependencies = [];
+    var dependenciesMap = moduleRecord.dependenciesMap;
+    moduleRecord.registryEntry = entry;
+    var requestedModules = this.requestedModules(moduleRecord);
+    for (var i = 0, length = requestedModules.length; i < length; ++i) {
+        var depKey = requestedModules[i];
+        var pair = {
+            key: depKey,
+            value: @undefined
+        };
+        @putByValDirect(dependencies, dependencies.length, pair);
+        dependenciesMap.@set(depKey, pair);
+    }
+    entry.dependencies = dependencies;
+    entry.dependenciesMap = dependenciesMap;
+    entry.module = moduleRecord;
+    @setStateToMax(entry, @ModuleSatisfy);
+}
+
+function instantiation(result, source, entry)
+{
+    // https://whatwg.github.io/loader/#instantiation
+    // FIXME: Current implementation does not support optionalInstance.
+    // https://bugs.webkit.org/show_bug.cgi?id=148171
+
+    "use strict";
+
+    return this.parseModule(entry.key, source);
+}
+
+// Loader.
+
+function requestFetch(key, fetcher)
+{
+    // https://whatwg.github.io/loader/#request-fetch
+
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+    if (entry.fetch)
+        return entry.fetch;
+
+    // Hook point.
+    // 2. Loader.fetch
+    //     https://whatwg.github.io/loader/#browser-fetch
+    //     Take the key and fetch the resource actually.
+    //     For example, JavaScriptCore shell can provide the hook fetching the resource
+    //     from the local file system.
+    var fetchPromise = this.fetch(key, fetcher).then((source) => {
+        @setStateToMax(entry, @ModuleInstantiate);
+        return source;
+    });
+    entry.fetch = fetchPromise;
+    return fetchPromise;
+}
+
+function requestInstantiate(key, fetcher)
+{
+    // https://whatwg.github.io/loader/#request-instantiate
+
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+    if (entry.instantiate)
+        return entry.instantiate;
+
+    var instantiatePromise = this.requestFetch(key, fetcher).then((source) => {
+        // Hook point.
+        // 3. Loader.instantiate
+        //     https://whatwg.github.io/loader/#browser-instantiate
+        //     Take the key and the fetched source code, and instantiate the module record
+        //     by parsing the module source code.
+        //     It has the chance to provide the optional module instance that is different from
+        //     the ordinary one.
+        return this.instantiate(key, source, fetcher).then((optionalInstance) => {
+            this.commitInstantiated(entry, optionalInstance, source);
+            return entry;
+        });
+    });
+    entry.instantiate = instantiatePromise;
+    return instantiatePromise;
+}
+
+function requestSatisfy(key, fetcher)
+{
+    // https://whatwg.github.io/loader/#satisfy-instance
+
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+    if (entry.satisfy)
+        return entry.satisfy;
+
+    var satisfyPromise = this.requestInstantiate(key, fetcher).then((entry) => {
+        var depLoads = [];
+        for (var i = 0, length = entry.dependencies.length; i < length; ++i) {
+            let pair = entry.dependencies[i];
+
+            // Hook point.
+            // 1. Loader.resolve.
+            //     https://whatwg.github.io/loader/#browser-resolve
+            //     Take the name and resolve it to the unique identifier for the resource location.
+            //     For example, take the "jquery" and return the URL for the resource.
+            var promise = this.resolve(pair.key, key, fetcher).then((depKey) => {
+                var depEntry = this.ensureRegistered(depKey);
+
+                // Recursive resolving. The dependencies of this entry is being resolved or already resolved.
+                // Stop tracing the circular dependencies.
+                // But to retrieve the instantiated module record correctly,
+                // we need to wait for the instantiation for the dependent module.
+                // For example, reaching here, the module is starting resolving the dependencies.
+                // But the module may or may not reach the instantiation phase in the loader's pipeline.
+                // If we wait for the Satisfy for this module, it construct the circular promise chain and
+                // rejected by the Promises runtime. Since only we need is the instantiated module, instead of waiting
+                // the Satisfy for this module, we just wait Instantiate for this.
+                if (depEntry.satisfy) {
+                    return depEntry.instantiate.then((entry) => {
+                        pair.value = entry.module;
+                        return entry;
+                    });
+                }
+
+                return this.requestSatisfy(depKey, fetcher).then((entry) => {
+                    pair.value = entry.module;
+                    return entry;
+                });
+            });
+            @putByValDirect(depLoads, depLoads.length, promise);
+        }
+
+        return @InternalPromise.internalAll(depLoads).then((modules) => {
+            @setStateToMax(entry, @ModuleLink);
+            return entry;
+        });
+    });
+
+    entry.satisfy = satisfyPromise;
+    return satisfyPromise;
+}
+
+function requestLink(key, fetcher)
+{
+    // https://whatwg.github.io/loader/#request-link
+
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+    if (entry.state > @ModuleLink) {
+        var deferred = @newPromiseCapability(@InternalPromise);
+        deferred.@resolve.@call(@undefined, entry);
+        return deferred.@promise;
+    }
+
+    return this.requestSatisfy(key, fetcher).then((entry) => {
+        this.link(entry, fetcher);
+        return entry;
+    });
+}
+
+function requestReady(key, fetcher)
+{
+    // https://whatwg.github.io/loader/#request-ready
+
+    "use strict";
+
+    return this.requestLink(key, fetcher).then((entry) => {
+        this.moduleEvaluation(entry.module, fetcher);
+    });
+}
+
+// Linking semantics.
+
+function link(entry, fetcher)
+{
+    // https://whatwg.github.io/loader/#link
+
+    "use strict";
+
+    if (!entry.linkSucceeded)
+        throw entry.linkError;
+    if (entry.state === @ModuleReady)
+        return;
+    @setStateToMax(entry, @ModuleReady);
+
+    try {
+        // Since we already have the "dependencies" field,
+        // we can call moduleDeclarationInstantiation with the correct order
+        // without constructing the dependency graph by calling dependencyGraph.
+        var dependencies = entry.dependencies;
+        for (var i = 0, length = dependencies.length; i < length; ++i) {
+            var pair = dependencies[i];
+            this.link(pair.value.registryEntry, fetcher);
+        }
+
+        this.moduleDeclarationInstantiation(entry.module, fetcher);
+    } catch (error) {
+        entry.linkSucceeded = false;
+        entry.linkError = error;
+        throw error;
+    }
+}
+
+// Module semantics.
+
+function moduleEvaluation(moduleRecord, fetcher)
+{
+    // http://www.ecma-international.org/ecma-262/6.0/#sec-moduleevaluation
+
+    "use strict";
+
+    if (moduleRecord.evaluated)
+        return;
+    moduleRecord.evaluated = true;
+
+    var entry = moduleRecord.registryEntry;
+
+    // The contents of the [[RequestedModules]] is cloned into entry.dependencies.
+    var dependencies = entry.dependencies;
+    for (var i = 0, length = dependencies.length; i < length; ++i) {
+        var pair = dependencies[i];
+        var requiredModuleRecord = pair.value;
+        this.moduleEvaluation(requiredModuleRecord, fetcher);
+    }
+    this.evaluate(entry.key, moduleRecord, fetcher);
+}
+
+// APIs to control the module loader.
+
+function provide(key, stage, value)
+{
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+
+    if (stage === @ModuleFetch) {
+        if (entry.state > @ModuleFetch)
+            @throwTypeError("Requested module is already fetched.");
+        this.fulfillFetch(entry, value);
+        return;
+    }
+
+    if (stage === @ModuleInstantiate) {
+        if (entry.state > @ModuleInstantiate)
+            @throwTypeError("Requested module is already instantiated.");
+        this.fulfillFetch(entry, @undefined);
+        entry.fetch.then((source) => {
+            this.fulfillInstantiate(entry, value, source);
+        });
+        return;
+    }
+
+    @throwTypeError("Requested module is already ready to be executed.");
+}
+
+function loadAndEvaluateModule(moduleName, referrer, fetcher)
+{
+    "use strict";
+
+    // Loader.resolve hook point.
+    // resolve: moduleName => Promise(moduleKey)
+    // Take the name and resolve it to the unique identifier for the resource location.
+    // For example, take the "jquery" and return the URL for the resource.
+    return this.resolve(moduleName, referrer, fetcher).then((key) => {
+        return this.requestReady(key, fetcher);
+    });
+}
+
+function loadModule(moduleName, referrer, fetcher)
+{
+    "use strict";
+
+    // Loader.resolve hook point.
+    // resolve: moduleName => Promise(moduleKey)
+    // Take the name and resolve it to the unique identifier for the resource location.
+    // For example, take the "jquery" and return the URL for the resource.
+    return this.resolve(moduleName, referrer, fetcher).then((key) => {
+        return this.requestSatisfy(key, fetcher);
+    }).then((entry) => {
+        return entry.key;
+    });
+}
+
+function linkAndEvaluateModule(key, fetcher)
+{
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+    if (entry.state < @ModuleLink)
+        @throwTypeError("Requested module is not instantiated yet.");
+
+    this.link(entry, fetcher);
+    return this.moduleEvaluation(entry.module, fetcher);
+}
+
+function requestImportModule(key, fetcher)
+{
+    "use strict";
+
+    return this.requestSatisfy(key, fetcher).then((entry) => {
+        this.linkAndEvaluateModule(entry.key, fetcher);
+        return this.getModuleNamespaceObject(entry.module);
+    });
+}
diff --git a/Source/JavaScriptCore/builtins/NumberConstructor.js b/Source/JavaScriptCore/builtins/NumberConstructor.js
new file mode 100644
index 000000000..2c0e4c868
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/NumberConstructor.js
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function isFinite(value)
+{
+    "use strict";
+
+    if (typeof value !== "number")
+        return false;
+
+    // Return false if value is |NaN|.
+    if (value !== value)
+        return false;
+
+    return value !== @Infinity && value !== -@Infinity;
+}
+
+function isNaN(value)
+{
+    "use strict";
+
+    return value !== value;
+}
diff --git a/Source/JavaScriptCore/builtins/NumberPrototype.js b/Source/JavaScriptCore/builtins/NumberPrototype.js
new file mode 100644
index 000000000..435ea789b
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/NumberPrototype.js
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 Andy VanWagoner .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(INTL)
+
+function toLocaleString(/* locales, options */)
+{
+    "use strict";
+
+    // 13.2.1 Number.prototype.toLocaleString ([locales [, options ]]) (ECMA-402 2.0)
+    // http://ecma-international.org/publications/standards/Ecma-402.htm
+
+    // 1. Let x be thisNumberValue(this value).
+    // 2. ReturnIfAbrupt(x).
+    var number = @thisNumberValue.@call(this);
+
+    // 3. Let numberFormat be Construct(%NumberFormat%, «locales, options»).
+    // 4. ReturnIfAbrupt(numberFormat).
+    var numberFormat = new @NumberFormat(@argument(0), @argument(1));
+
+    // 5. Return FormatNumber(numberFormat, x).
+    return numberFormat.format(number);
+}
diff --git a/Source/JavaScriptCore/builtins/ObjectConstructor.js b/Source/JavaScriptCore/builtins/ObjectConstructor.js
new file mode 100644
index 000000000..d855beb2a
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/ObjectConstructor.js
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2016 Oleksandr Skachkov .
+ * Copyright (C) 2015 Jordan Harband. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+@globalPrivate
+function enumerableOwnProperties(object, kind)
+{
+    "use strict";
+
+    const obj = @Object(object);
+    const ownKeys = @Reflect.@ownKeys(obj);
+    const properties = [];
+    for (let i = 0, keysLength = ownKeys.length; i < keysLength; ++i) {
+        let nextKey = ownKeys[i];
+        if (typeof nextKey === 'string') {
+            let descriptor = @Reflect.@getOwnPropertyDescriptor(obj, nextKey);
+            if (descriptor !== @undefined && descriptor.enumerable) {
+                if (kind === @iterationKindValue)
+                    properties.@push(obj[nextKey]);
+                else if (kind === @iterationKindKeyValue)
+                    properties.@push([nextKey, obj[nextKey]]);
+            }
+        }
+    }
+    
+    return properties;
+}
+
+function values(object)
+{
+    "use strict";
+    
+    if (object == null)
+        @throwTypeError("Object.values requires that input parameter not be null or undefined");
+
+    return @enumerableOwnProperties(object, @iterationKindValue);
+}
+
+function entries(object)
+{
+    "use strict";
+    
+    if (object == null)
+        @throwTypeError("Object.entries requires that input parameter not be null or undefined");
+    
+    return @enumerableOwnProperties(object, @iterationKindKeyValue);
+}
+
+function assign(target/*[*/, /*...*/sources/*] */)
+{
+    "use strict";
+
+    if (target == null)
+        @throwTypeError("Object.assign requires that input parameter not be null or undefined");
+
+    let objTarget = @Object(target);
+    for (let s = 1, argumentsLength = arguments.length; s < argumentsLength; ++s) {
+        let nextSource = arguments[s];
+        if (nextSource != null) {
+            let from = @Object(nextSource);
+            let keys = @Reflect.@ownKeys(from);
+            for (let i = 0, keysLength = keys.length; i < keysLength; ++i) {
+                let nextKey = keys[i];
+                let descriptor = @Reflect.@getOwnPropertyDescriptor(from, nextKey);
+                if (descriptor !== @undefined && descriptor.enumerable)
+                    objTarget[nextKey] = from[nextKey];
+            }
+        }
+    }
+    return objTarget;
+}
diff --git a/Source/JavaScriptCore/builtins/PromiseConstructor.js b/Source/JavaScriptCore/builtins/PromiseConstructor.js
new file mode 100644
index 000000000..3f0848dfa
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/PromiseConstructor.js
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function all(iterable)
+{
+    "use strict";
+
+    if (!@isObject(this))
+        @throwTypeError("|this| is not a object");
+
+    var promiseCapability = @newPromiseCapability(this);
+
+    var values = [];
+    var index = 0;
+    var remainingElementsCount = 1;
+
+    function newResolveElement(index)
+    {
+        var alreadyCalled = false;
+        return function (argument)
+        {
+            if (alreadyCalled)
+                return @undefined;
+            alreadyCalled = true;
+
+            @putByValDirect(values, index, argument);
+
+            --remainingElementsCount;
+            if (remainingElementsCount === 0)
+                return promiseCapability.@resolve.@call(@undefined, values);
+
+            return @undefined;
+        }
+    }
+
+    try {
+        for (var value of iterable) {
+            @putByValDirect(values, index, @undefined);
+            var nextPromise = this.resolve(value);
+            var resolveElement = newResolveElement(index);
+            ++remainingElementsCount;
+            nextPromise.then(resolveElement, promiseCapability.@reject);
+            ++index;
+        }
+
+        --remainingElementsCount;
+        if (remainingElementsCount === 0)
+            promiseCapability.@resolve.@call(@undefined, values);
+    } catch (error) {
+        promiseCapability.@reject.@call(@undefined, error);
+    }
+
+    return promiseCapability.@promise;
+}
+
+function race(iterable)
+{
+    "use strict";
+
+    if (!@isObject(this))
+        @throwTypeError("|this| is not a object");
+
+    var promiseCapability = @newPromiseCapability(this);
+
+    try {
+        for (var value of iterable) {
+            var nextPromise = this.resolve(value);
+            nextPromise.then(promiseCapability.@resolve, promiseCapability.@reject);
+        }
+    } catch (error) {
+        promiseCapability.@reject.@call(@undefined, error);
+    }
+
+    return promiseCapability.@promise;
+}
+
+function reject(reason)
+{
+    "use strict";
+
+    if (!@isObject(this))
+        @throwTypeError("|this| is not a object");
+
+    var promiseCapability = @newPromiseCapability(this);
+
+    promiseCapability.@reject.@call(@undefined, reason);
+
+    return promiseCapability.@promise;
+}
+
+function resolve(value)
+{
+    "use strict";
+
+    if (!@isObject(this))
+        @throwTypeError("|this| is not a object");
+
+    if (@isPromise(value)) {
+        var valueConstructor = value.constructor;
+        if (valueConstructor === this)
+            return value;
+    }
+
+    var promiseCapability = @newPromiseCapability(this);
+
+    promiseCapability.@resolve.@call(@undefined, value);
+
+    return promiseCapability.@promise;
+}
diff --git a/Source/JavaScriptCore/builtins/PromiseOperations.js b/Source/JavaScriptCore/builtins/PromiseOperations.js
new file mode 100644
index 000000000..61564e7cd
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/PromiseOperations.js
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @internal
+
+@globalPrivate
+function isPromise(promise)
+{
+    "use strict";
+
+    return @isObject(promise) && !!promise.@promiseState;
+}
+
+@globalPrivate
+function newPromiseReaction(capability, onFulfilled, onRejected)
+{
+    "use strict";
+
+    return {
+        @capabilities: capability,
+        @onFulfilled: onFulfilled,
+        @onRejected: onRejected,
+    };
+}
+
+@globalPrivate
+function newPromiseCapability(constructor)
+{
+    "use strict";
+
+    if (!@isConstructor(constructor))
+        @throwTypeError("promise capability requires a constructor function");
+
+    var promiseCapability = {
+        @promise: @undefined,
+        @resolve: @undefined,
+        @reject: @undefined
+    };
+
+    function executor(resolve, reject)
+    {
+        if (promiseCapability.@resolve !== @undefined)
+            @throwTypeError("resolve function is already set");
+        if (promiseCapability.@reject !== @undefined)
+            @throwTypeError("reject function is already set");
+
+        promiseCapability.@resolve = resolve;
+        promiseCapability.@reject = reject;
+    }
+
+    var promise = new constructor(executor);
+
+    if (typeof promiseCapability.@resolve !== "function")
+        @throwTypeError("executor did not take a resolve function");
+
+    if (typeof promiseCapability.@reject !== "function")
+        @throwTypeError("executor did not take a reject function");
+
+    promiseCapability.@promise = promise;
+
+    return promiseCapability;
+}
+
+@globalPrivate
+function triggerPromiseReactions(state, reactions, argument)
+{
+    "use strict";
+
+    for (var index = 0, length = reactions.length; index < length; ++index)
+        @enqueueJob(@promiseReactionJob, [state, reactions[index], argument]);
+}
+
+@globalPrivate
+function rejectPromise(promise, reason)
+{
+    "use strict";
+
+    var reactions = promise.@promiseReactions;
+    promise.@promiseResult = reason;
+    promise.@promiseReactions = @undefined;
+    promise.@promiseState = @promiseStateRejected;
+
+    @InspectorInstrumentation.promiseRejected(promise, reason, reactions);
+
+    @triggerPromiseReactions(@promiseStateRejected, reactions, reason);
+}
+
+@globalPrivate
+function fulfillPromise(promise, value)
+{
+    "use strict";
+
+    var reactions = promise.@promiseReactions;
+    promise.@promiseResult = value;
+    promise.@promiseReactions = @undefined;
+    promise.@promiseState = @promiseStateFulfilled;
+
+    @InspectorInstrumentation.promiseFulfilled(promise, value, reactions);
+
+    @triggerPromiseReactions(@promiseStateFulfilled, reactions, value);
+}
+
+@globalPrivate
+function createResolvingFunctions(promise)
+{
+    "use strict";
+
+    var alreadyResolved = false;
+
+    var resolve = function (resolution) {
+        if (alreadyResolved)
+            return @undefined;
+        alreadyResolved = true;
+
+        if (resolution === promise)
+            return @rejectPromise(promise, new @TypeError("Resolve a promise with itself"));
+
+        if (!@isObject(resolution))
+            return @fulfillPromise(promise, resolution);
+
+        var then;
+        try {
+            then = resolution.then;
+        } catch (error) {
+            return @rejectPromise(promise, error);
+        }
+
+        if (typeof then !== 'function')
+            return @fulfillPromise(promise, resolution);
+
+        @enqueueJob(@promiseResolveThenableJob, [promise, resolution, then]);
+
+        return @undefined;
+    };
+
+    var reject = function (reason) {
+        if (alreadyResolved)
+            return @undefined;
+        alreadyResolved = true;
+
+        return @rejectPromise(promise, reason);
+    };
+
+    return {
+        @resolve: resolve,
+        @reject: reject
+    };
+}
+
+@globalPrivate
+function promiseReactionJob(state, reaction, argument)
+{
+    "use strict";
+
+    var promiseCapability = reaction.@capabilities;
+
+    var result;
+    var handler = (state === @promiseStateFulfilled) ? reaction.@onFulfilled: reaction.@onRejected;
+    try {
+        result = handler(argument);
+    } catch (error) {
+        return promiseCapability.@reject.@call(@undefined, error);
+    }
+
+    return promiseCapability.@resolve.@call(@undefined, result);
+}
+
+@globalPrivate
+function promiseResolveThenableJob(promiseToResolve, thenable, then)
+{
+    "use strict";
+
+    var resolvingFunctions = @createResolvingFunctions(promiseToResolve);
+
+    try {
+        return then.@call(thenable, resolvingFunctions.@resolve, resolvingFunctions.@reject);
+    } catch (error) {
+        return resolvingFunctions.@reject.@call(@undefined, error);
+    }
+}
+
+@globalPrivate
+function initializePromise(executor)
+{
+    "use strict";
+
+    if (typeof executor !== 'function')
+        @throwTypeError("Promise constructor takes a function argument");
+
+    this.@promiseState = @promiseStatePending;
+    this.@promiseReactions = [];
+
+    var resolvingFunctions = @createResolvingFunctions(this);
+    try {
+        executor(resolvingFunctions.@resolve, resolvingFunctions.@reject);
+    } catch (error) {
+        return resolvingFunctions.@reject.@call(@undefined, error);
+    }
+
+    return this;
+}
diff --git a/Source/JavaScriptCore/builtins/PromisePrototype.js b/Source/JavaScriptCore/builtins/PromisePrototype.js
new file mode 100644
index 000000000..6065ad837
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/PromisePrototype.js
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function catch(onRejected)
+{
+    "use strict";
+
+    return this.then(@undefined, onRejected);
+}
+
+function then(onFulfilled, onRejected)
+{
+    "use strict";
+
+    if (!@isPromise(this))
+        @throwTypeError("|this| is not a object");
+
+    var constructor = @speciesConstructor(this, @Promise);
+
+    var resultCapability = @newPromiseCapability(constructor);
+
+    if (typeof onFulfilled !== "function")
+        onFulfilled = function (argument) { return argument; };
+
+    if (typeof onRejected !== "function")
+        onRejected = function (argument) { throw argument; };
+
+    var reaction = @newPromiseReaction(resultCapability, onFulfilled, onRejected);
+
+    var state = this.@promiseState;
+    if (state === @promiseStatePending)
+        @putByValDirect(this.@promiseReactions, this.@promiseReactions.length, reaction);
+    else
+        @enqueueJob(@promiseReactionJob, [state, reaction, this.@promiseResult]);
+
+    return resultCapability.@promise;
+}
diff --git a/Source/JavaScriptCore/builtins/ReflectObject.js b/Source/JavaScriptCore/builtins/ReflectObject.js
new file mode 100644
index 000000000..1aaa1f407
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/ReflectObject.js
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// https://tc39.github.io/ecma262/#sec-reflect.apply
+function apply(target, thisArgument, argumentsList)
+{
+    "use strict";
+
+    if (typeof target !== "function")
+        @throwTypeError("Reflect.apply requires the first argument be a function");
+
+    if (!@isObject(argumentsList))
+        @throwTypeError("Reflect.apply requires the third argument be an object");
+
+    return target.@apply(thisArgument, argumentsList);
+}
+
+// https://tc39.github.io/ecma262/#sec-reflect.deleteproperty
+function deleteProperty(target, propertyKey)
+{
+    // Intentionally keep the code the sloppy mode to suppress the TypeError
+    // raised by the delete operator under the strict mode.
+
+    if (!@isObject(target))
+        @throwTypeError("Reflect.deleteProperty requires the first argument be an object");
+
+    return delete target[propertyKey];
+}
+
+// https://tc39.github.io/ecma262/#sec-reflect.has
+function has(target, propertyKey)
+{
+    "use strict";
+
+    if (!@isObject(target))
+        @throwTypeError("Reflect.has requires the first argument be an object");
+
+    return propertyKey in target;
+}
diff --git a/Source/JavaScriptCore/builtins/RegExpPrototype.js b/Source/JavaScriptCore/builtins/RegExpPrototype.js
new file mode 100644
index 000000000..017a81cd1
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/RegExpPrototype.js
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+@globalPrivate
+function advanceStringIndex(string, index, unicode)
+{
+    // This function implements AdvanceStringIndex described in ES6 21.2.5.2.3.
+    "use strict";
+
+    if (!unicode)
+        return index + 1;
+
+    if (index + 1 >= string.length)
+        return index + 1;
+
+    let first = string.@charCodeAt(index);
+    if (first < 0xD800 || first > 0xDBFF)
+        return index + 1;
+
+    let second = string.@charCodeAt(index + 1);
+    if (second < 0xDC00 || second > 0xDFFF)
+        return index + 1;
+
+    return index + 2;
+}
+
+@globalPrivate
+function regExpExec(regexp, str)
+{
+    "use strict";
+
+    let exec = regexp.exec;
+    let builtinExec = @regExpBuiltinExec;
+    if (exec !== builtinExec && typeof exec === "function") {
+        let result = exec.@call(regexp, str);
+        if (result !== null && !@isObject(result))
+            @throwTypeError("The result of a RegExp exec must be null or an object");
+        return result;
+    }
+    return builtinExec.@call(regexp, str);
+}
+
+@globalPrivate
+function hasObservableSideEffectsForRegExpMatch(regexp) {
+    // This is accessed by the RegExpExec internal function.
+    let regexpExec = @tryGetById(regexp, "exec");
+    if (regexpExec !== @regExpBuiltinExec)
+        return true;
+
+    let regexpGlobal = @tryGetById(regexp, "global");
+    if (regexpGlobal !== @regExpProtoGlobalGetter)
+        return true;
+    let regexpUnicode = @tryGetById(regexp, "unicode");
+    if (regexpUnicode !== @regExpProtoUnicodeGetter)
+        return true;
+
+    return !@isRegExpObject(regexp);
+}
+
+function match(strArg)
+{
+    "use strict";
+
+    if (!@isObject(this))
+        @throwTypeError("RegExp.prototype.@@match requires that |this| be an Object");
+
+    let regexp = this;
+
+    // Check for observable side effects and call the fast path if there aren't any.
+    if (!@hasObservableSideEffectsForRegExpMatch(regexp))
+        return @regExpMatchFast.@call(regexp, strArg);
+
+    let str = @toString(strArg);
+
+    if (!regexp.global)
+        return @regExpExec(regexp, str);
+    
+    let unicode = regexp.unicode;
+    regexp.lastIndex = 0;
+    let resultList = [];
+
+    // FIXME: It would be great to implement a solution similar to what we do in
+    // RegExpObject::matchGlobal(). It's not clear if this is possible, since this loop has
+    // effects. https://bugs.webkit.org/show_bug.cgi?id=158145
+    const maximumReasonableMatchSize = 100000000;
+
+    while (true) {
+        let result = @regExpExec(regexp, str);
+        
+        if (result === null) {
+            if (resultList.length === 0)
+                return null;
+            return resultList;
+        }
+
+        if (resultList.length > maximumReasonableMatchSize)
+            @throwOutOfMemoryError();
+
+        if (!@isObject(result))
+            @throwTypeError("RegExp.prototype.@@match call to RegExp.exec didn't return null or an object");
+
+        let resultString = @toString(result[0]);
+
+        if (!resultString.length)
+            regexp.lastIndex = @advanceStringIndex(str, regexp.lastIndex, unicode);
+
+        resultList.@push(resultString);
+    }
+}
+
+function replace(strArg, replace)
+{
+    "use strict";
+
+    function getSubstitution(matched, str, position, captures, replacement)
+    {
+        "use strict";
+
+        let matchLength = matched.length;
+        let stringLength = str.length;
+        let tailPos = position + matchLength;
+        let m = captures.length;
+        let replacementLength = replacement.length;
+        let result = "";
+        let lastStart = 0;
+
+        for (let start = 0; start = replacement.indexOf("$", lastStart), start !== -1; lastStart = start) {
+            if (start - lastStart > 0)
+                result = result + replacement.substring(lastStart, start);
+            start++;
+            let ch = replacement.charAt(start);
+            if (ch === "")
+                result = result + "$";
+            else {
+                switch (ch)
+                {
+                case "$":
+                    result = result + "$";
+                    start++;
+                    break;
+                case "&":
+                    result = result + matched;
+                    start++;
+                    break;
+                case "`":
+                    if (position > 0)
+                        result = result + str.substring(0, position);
+                    start++;
+                    break;
+                case "'":
+                    if (tailPos < stringLength)
+                        result = result + str.substring(tailPos);
+                    start++;
+                    break;
+                default:
+                    let chCode = ch.charCodeAt(0);
+                    if (chCode >= 0x30 && chCode <= 0x39) {
+                        start++;
+                        let n = chCode - 0x30;
+                        if (n > m)
+                            break;
+                        if (start < replacementLength) {
+                            let nextChCode = replacement.charCodeAt(start);
+                            if (nextChCode >= 0x30 && nextChCode <= 0x39) {
+                                let nn = 10 * n + nextChCode - 0x30;
+                                if (nn <= m) {
+                                    n = nn;
+                                    start++;
+                                }
+                            }
+                        }
+
+                        if (n == 0)
+                            break;
+
+                        if (captures[n] != @undefined)
+                            result = result + captures[n];
+                    } else
+                        result = result + "$";
+                    break;
+                }
+            }
+        }
+
+        return result + replacement.substring(lastStart);
+    }
+
+    if (!@isObject(this))
+        @throwTypeError("RegExp.prototype.@@replace requires that |this| be an Object");
+
+    let regexp = this;
+
+    let str = @toString(strArg);
+    let stringLength = str.length;
+    let functionalReplace = typeof replace === 'function';
+
+    if (!functionalReplace)
+        replace = @toString(replace);
+
+    let global = regexp.global;
+    let unicode = false;
+
+    if (global) {
+        unicode = regexp.unicode;
+        regexp.lastIndex = 0;
+    }
+
+    let resultList = [];
+    let result;
+    let done = false;
+    while (!done) {
+        result = @regExpExec(regexp, str);
+
+        if (result === null)
+            done = true;
+        else {
+            resultList.@push(result);
+            if (!global)
+                done = true;
+            else {
+                let matchStr = @toString(result[0]);
+
+                if (!matchStr.length)
+                    regexp.lastIndex = @advanceStringIndex(str, regexp.lastIndex, unicode);
+            }
+        }
+    }
+
+    let accumulatedResult = "";
+    let nextSourcePosition = 0;
+    let lastPosition = 0;
+
+    for (result of resultList) {
+        let nCaptures = result.length - 1;
+        if (nCaptures < 0)
+            nCaptures = 0;
+        let matched = @toString(result[0]);
+        let matchLength = matched.length;
+        let position = result.index;
+        position = (position > stringLength) ? stringLength : position;
+        position = (position < 0) ? 0 : position;
+
+        let captures = [];
+        for (let n = 1; n <= nCaptures; n++) {
+            let capN = result[n];
+            if (capN !== @undefined)
+                capN = @toString(capN);
+            captures[n] = capN;
+        }
+
+        let replacement;
+
+        if (functionalReplace) {
+            let replacerArgs = [ matched ].concat(captures.slice(1));
+            replacerArgs.@push(position);
+            replacerArgs.@push(str);
+
+            let replValue = replace.@apply(@undefined, replacerArgs);
+            replacement = @toString(replValue);
+        } else
+            replacement = getSubstitution(matched, str, position, captures, replace);
+
+        if (position >= nextSourcePosition && position >= lastPosition) {
+            accumulatedResult = accumulatedResult + str.substring(nextSourcePosition, position) + replacement;
+            nextSourcePosition = position + matchLength;
+            lastPosition = position;
+        }
+    }
+
+    if (nextSourcePosition >= stringLength)
+        return  accumulatedResult;
+
+    return accumulatedResult + str.substring(nextSourcePosition);
+}
+
+// 21.2.5.9 RegExp.prototype[@@search] (string)
+function search(strArg)
+{
+    "use strict";
+
+    let regexp = this;
+
+    // Check for observable side effects and call the fast path if there aren't any.
+    if (@isRegExpObject(regexp) && @tryGetById(regexp, "exec") === @regExpBuiltinExec)
+        return @regExpSearchFast.@call(regexp, strArg);
+
+    // 1. Let rx be the this value.
+    // 2. If Type(rx) is not Object, throw a TypeError exception.
+    if (!@isObject(this))
+        @throwTypeError("RegExp.prototype.@@search requires that |this| be an Object");
+
+    // 3. Let S be ? ToString(string).
+    let str = @toString(strArg)
+
+    // 4. Let previousLastIndex be ? Get(rx, "lastIndex").
+    let previousLastIndex = regexp.lastIndex;
+    // 5. Perform ? Set(rx, "lastIndex", 0, true).
+    regexp.lastIndex = 0;
+    // 6. Let result be ? RegExpExec(rx, S).
+    let result = @regExpExec(regexp, str);
+    // 7. Perform ? Set(rx, "lastIndex", previousLastIndex, true).
+    regexp.lastIndex = previousLastIndex;
+    // 8. If result is null, return -1.
+    if (result === null)
+        return -1;
+    // 9. Return ? Get(result, "index").
+    return result.index;
+}
+
+@globalPrivate
+function hasObservableSideEffectsForRegExpSplit(regexp) {
+    // This is accessed by the RegExpExec internal function.
+    let regexpExec = @tryGetById(regexp, "exec");
+    if (regexpExec !== @regExpBuiltinExec)
+        return true;
+    
+    // This is accessed by step 5 below.
+    let regexpFlags = @tryGetById(regexp, "flags");
+    if (regexpFlags !== @regExpProtoFlagsGetter)
+        return true;
+    
+    // These are accessed by the builtin flags getter.
+    let regexpGlobal = @tryGetById(regexp, "global");
+    if (regexpGlobal !== @regExpProtoGlobalGetter)
+        return true;
+    let regexpIgnoreCase = @tryGetById(regexp, "ignoreCase");
+    if (regexpIgnoreCase !== @regExpProtoIgnoreCaseGetter)
+        return true;
+    let regexpMultiline = @tryGetById(regexp, "multiline");
+    if (regexpMultiline !== @regExpProtoMultilineGetter)
+        return true;
+    let regexpSticky = @tryGetById(regexp, "sticky");
+    if (regexpSticky !== @regExpProtoStickyGetter)
+        return true;
+    let regexpUnicode = @tryGetById(regexp, "unicode");
+    if (regexpUnicode !== @regExpProtoUnicodeGetter)
+        return true;
+    
+    // This is accessed by the RegExp species constructor.
+    let regexpSource = @tryGetById(regexp, "source");
+    if (regexpSource !== @regExpProtoSourceGetter)
+        return true;
+    
+    return !@isRegExpObject(regexp);
+}
+
+// ES 21.2.5.11 RegExp.prototype[@@split](string, limit)
+function split(string, limit)
+{
+    "use strict";
+
+    // 1. Let rx be the this value.
+    // 2. If Type(rx) is not Object, throw a TypeError exception.
+    if (!@isObject(this))
+        @throwTypeError("RegExp.prototype.@@split requires that |this| be an Object");
+    let regexp = this;
+
+    // 3. Let S be ? ToString(string).
+    let str = @toString(string);
+
+    // 4. Let C be ? SpeciesConstructor(rx, %RegExp%).
+    let speciesConstructor = @speciesConstructor(regexp, @RegExp);
+
+    if (speciesConstructor === @RegExp && !@hasObservableSideEffectsForRegExpSplit(regexp))
+        return @regExpSplitFast.@call(regexp, str, limit);
+
+    // 5. Let flags be ? ToString(? Get(rx, "flags")).
+    let flags = @toString(regexp.flags);
+
+    // 6. If flags contains "u", let unicodeMatching be true.
+    // 7. Else, let unicodeMatching be false.
+    let unicodeMatching = @stringIncludesInternal.@call(flags, "u");
+    // 8. If flags contains "y", let newFlags be flags.
+    // 9. Else, let newFlags be the string that is the concatenation of flags and "y".
+    let newFlags = @stringIncludesInternal.@call(flags, "y") ? flags : flags + "y";
+
+    // 10. Let splitter be ? Construct(C, « rx, newFlags »).
+    let splitter = new speciesConstructor(regexp, newFlags);
+
+    // We need to check again for RegExp subclasses that will fail the speciesConstructor test
+    // but can still use the fast path after we invoke the constructor above.
+    if (!@hasObservableSideEffectsForRegExpSplit(splitter))
+        return @regExpSplitFast.@call(splitter, str, limit);
+
+    // 11. Let A be ArrayCreate(0).
+    // 12. Let lengthA be 0.
+    let result = [];
+
+    // 13. If limit is undefined, let lim be 2^32-1; else let lim be ? ToUint32(limit).
+    limit = (limit === @undefined) ? 0xffffffff : limit >>> 0;
+
+    // 16. If lim = 0, return A.
+    if (!limit)
+        return result;
+
+    // 14. [Defered from above] Let size be the number of elements in S.
+    let size = str.length;
+
+    // 17. If size = 0, then
+    if (!size) {
+        // a. Let z be ? RegExpExec(splitter, S).
+        let z = @regExpExec(splitter, str);
+        // b. If z is not null, return A.
+        if (z != null)
+            return result;
+        // c. Perform ! CreateDataProperty(A, "0", S).
+        @putByValDirect(result, 0, str);
+        // d. Return A.
+        return result;
+    }
+
+    // 15. [Defered from above] Let p be 0.
+    let position = 0;
+    // 18. Let q be p.
+    let matchPosition = 0;
+
+    // 19. Repeat, while q < size
+    while (matchPosition < size) {
+        // a. Perform ? Set(splitter, "lastIndex", q, true).
+        splitter.lastIndex = matchPosition;
+        // b. Let z be ? RegExpExec(splitter, S).
+        let matches = @regExpExec(splitter, str);
+        // c. If z is null, let q be AdvanceStringIndex(S, q, unicodeMatching).
+        if (matches === null)
+            matchPosition = @advanceStringIndex(str, matchPosition, unicodeMatching);
+        // d. Else z is not null,
+        else {
+            // i. Let e be ? ToLength(? Get(splitter, "lastIndex")).
+            let endPosition = @toLength(splitter.lastIndex);
+            // ii. Let e be min(e, size).
+            endPosition = (endPosition <= size) ? endPosition : size;
+            // iii. If e = p, let q be AdvanceStringIndex(S, q, unicodeMatching).
+            if (endPosition === position)
+                matchPosition = @advanceStringIndex(str, matchPosition, unicodeMatching);
+            // iv. Else e != p,
+            else {
+                // 1. Let T be a String value equal to the substring of S consisting of the elements at indices p (inclusive) through q (exclusive).
+                let subStr = @stringSubstrInternal.@call(str, position, matchPosition - position);
+                // 2. Perform ! CreateDataProperty(A, ! ToString(lengthA), T).
+                // 3. Let lengthA be lengthA + 1.
+                @putByValDirect(result, result.length, subStr);
+                // 4. If lengthA = lim, return A.
+                if (result.length == limit)
+                    return result;
+
+                // 5. Let p be e.
+                position = endPosition;
+                // 6. Let numberOfCaptures be ? ToLength(? Get(z, "length")).
+                // 7. Let numberOfCaptures be max(numberOfCaptures-1, 0).
+                let numberOfCaptures = matches.length > 1 ? matches.length - 1 : 0;
+
+                // 8. Let i be 1.
+                let i = 1;
+                // 9. Repeat, while i <= numberOfCaptures,
+                while (i <= numberOfCaptures) {
+                    // a. Let nextCapture be ? Get(z, ! ToString(i)).
+                    let nextCapture = matches[i];
+                    // b. Perform ! CreateDataProperty(A, ! ToString(lengthA), nextCapture).
+                    // d. Let lengthA be lengthA + 1.
+                    @putByValDirect(result, result.length, nextCapture);
+                    // e. If lengthA = lim, return A.
+                    if (result.length == limit)
+                        return result;
+                    // c. Let i be i + 1.
+                    i++;
+                }
+                // 10. Let q be p.
+                matchPosition = position;
+            }
+        }
+    }
+    // 20. Let T be a String value equal to the substring of S consisting of the elements at indices p (inclusive) through size (exclusive).
+    let remainingStr = @stringSubstrInternal.@call(str, position, size);
+    // 21. Perform ! CreateDataProperty(A, ! ToString(lengthA), T).
+    @putByValDirect(result, result.length, remainingStr);
+    // 22. Return A.
+    return result;
+}
+
+// ES 21.2.5.13 RegExp.prototype.test(string)
+@intrinsic=RegExpTestIntrinsic
+function test(strArg)
+{
+    "use strict";
+
+    let regexp = this;
+
+    // Check for observable side effects and call the fast path if there aren't any.
+    if (@isRegExpObject(regexp) && @tryGetById(regexp, "exec") === @regExpBuiltinExec)
+        return @regExpTestFast.@call(regexp, strArg);
+
+    // 1. Let R be the this value.
+    // 2. If Type(R) is not Object, throw a TypeError exception.
+    if (!@isObject(regexp))
+        @throwTypeError("RegExp.prototype.test requires that |this| be an Object");
+
+    // 3. Let string be ? ToString(S).
+    let str = @toString(strArg);
+
+    // 4. Let match be ? RegExpExec(R, string).
+    let match = @regExpExec(regexp, str);
+
+    // 5. If match is not null, return true; else return false.
+    if (match !== null)
+        return true;
+    return false;
+}
diff --git a/Source/JavaScriptCore/builtins/SetPrototype.js b/Source/JavaScriptCore/builtins/SetPrototype.js
new file mode 100644
index 000000000..e9b6626e9
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/SetPrototype.js
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function forEach(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (!@isSet(this))
+        @throwTypeError("Set operation called on non-Set object");
+
+    if (typeof callback !== 'function')
+        @throwTypeError("Set.prototype.forEach callback must be a function");
+
+    var thisArg = @argument(1);
+    var iterator = @SetIterator(this);
+
+    // To avoid object allocations for iterator result objects, we pass the placeholder to the special "next" function in order to fill the results.
+    var value = [ @undefined ];
+    for (;;) {
+        if (@setIteratorNext.@call(iterator, value))
+            break;
+        callback.@call(thisArg, value[0], value[0], this);
+    }
+}
diff --git a/Source/JavaScriptCore/builtins/StringConstructor.js b/Source/JavaScriptCore/builtins/StringConstructor.js
new file mode 100644
index 000000000..a3293328b
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/StringConstructor.js
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function raw(template)
+{
+    "use strict";
+
+    if (template === null || template === @undefined)
+        @throwTypeError("String.raw requires template not be null or undefined");
+    var cookedSegments = @Object(template);
+
+    var rawValue = cookedSegments.raw;
+    if (rawValue === null || rawValue === @undefined)
+        @throwTypeError("String.raw requires template.raw not be null or undefined");
+    var rawSegments = @Object(rawValue);
+
+    var numberOfSubstitutions = arguments.length - 1;
+
+    var segmentCount = @toLength(rawSegments.length);
+
+    if (segmentCount <= 0)
+        return '';
+
+    var stringElements = '';
+    for (var i = 0; ; ++i) {
+        var segment = @toString(rawSegments[i]);
+        stringElements += segment;
+
+        if ((i + 1) === segmentCount)
+            return stringElements;
+
+        if (i < numberOfSubstitutions) {
+            var substitutionIndexInArguments = i + 1;
+            var next = @toString(arguments[substitutionIndexInArguments]);
+            stringElements += next;
+        }
+    }
+}
diff --git a/Source/JavaScriptCore/builtins/StringIteratorPrototype.js b/Source/JavaScriptCore/builtins/StringIteratorPrototype.js
new file mode 100644
index 000000000..52762dbf3
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/StringIteratorPrototype.js
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function next()
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("%StringIteratorPrototype%.next requires that |this| not be null or undefined");
+
+    var position = this.@stringIteratorNextIndex;
+    if (position === @undefined)
+        @throwTypeError("%StringIteratorPrototype%.next requires that |this| be a String Iterator instance");
+
+    var done = true;
+    var value = @undefined;
+
+    var string = this.@iteratedString;
+    if (string !== @undefined) {
+        var length = string.length >>> 0;
+        if (position >= length) {
+            this.@iteratedString = @undefined;
+        } else {
+            done = false;
+
+            var first = string.@charCodeAt(position);
+            if (first < 0xD800 || first > 0xDBFF || position + 1 === length)
+                value = string[position];
+            else {
+                var second = string.@charCodeAt(position + 1);
+                if (second < 0xDC00 || second > 0xDFFF)
+                    value = string[position];
+                else
+                    value = string[position] + string[position + 1];
+            }
+
+            this.@stringIteratorNextIndex = position + value.length;
+        }
+    }
+
+    return {done, value};
+}
diff --git a/Source/JavaScriptCore/builtins/StringPrototype.js b/Source/JavaScriptCore/builtins/StringPrototype.js
new file mode 100644
index 000000000..c9cdfcf02
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/StringPrototype.js
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) 2015 Andy VanWagoner .
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function match(regexp)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.match requires that |this| not be null or undefined");
+
+    if (regexp != null) {
+        var matcher = regexp.@matchSymbol;
+        if (matcher != @undefined)
+            return matcher.@call(regexp, this);
+    }
+
+    let thisString = @toString(this);
+    let createdRegExp = @regExpCreate(regexp, @undefined);
+    return createdRegExp.@matchSymbol(thisString);
+}
+
+@globalPrivate
+function repeatSlowPath(string, count)
+{
+    "use strict";
+
+    // Return an empty string.
+    if (count === 0 || string.length === 0)
+        return "";
+
+    // Return the original string.
+    if (count === 1)
+        return string;
+
+    if (string.length * count > @MAX_STRING_LENGTH)
+        @throwOutOfMemoryError();
+
+    // Bit operation onto |count| is safe because |count| should be within Int32 range,
+    // Repeat log N times to generate the repeated string rope.
+    var result = "";
+    var operand = string;
+    while (true) {
+        if (count & 1)
+            result += operand;
+        count >>= 1;
+        if (!count)
+            return result;
+        operand += operand;
+    }
+}
+
+@globalPrivate
+function repeatCharactersSlowPath(string, count)
+{
+    "use strict";
+    var repeatCount = (count / string.length) | 0;
+    var remainingCharacters = count - repeatCount * string.length;
+    var result = "";
+    var operand = string;
+    // Bit operation onto |repeatCount| is safe because |repeatCount| should be within Int32 range,
+    // Repeat log N times to generate the repeated string rope.
+    while (true) {
+        if (repeatCount & 1)
+            result += operand;
+        repeatCount >>= 1;
+        if (!repeatCount)
+            break;
+        operand += operand;
+    }
+    if (remainingCharacters)
+        result += @stringSubstrInternal.@call(string, 0, remainingCharacters);
+    return result;
+}
+
+
+function repeat(count)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.repeat requires that |this| not be null or undefined");
+
+    var string = @toString(this);
+    count = @toInteger(count);
+
+    if (count < 0 || count === @Infinity)
+        @throwRangeError("String.prototype.repeat argument must be greater than or equal to 0 and not be Infinity");
+
+    if (string.length === 1)
+        return @repeatCharacter(string, count);
+
+    return @repeatSlowPath(string, count);
+}
+
+function padStart(maxLength/*, fillString*/)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.padStart requires that |this| not be null or undefined");
+
+    var string = @toString(this);
+    maxLength = @toLength(maxLength);
+
+    var stringLength = string.length;
+    if (maxLength <= stringLength)
+        return string;
+
+    var filler;
+    var fillString = @argument(1);
+    if (fillString === @undefined)
+        filler = " ";
+    else {
+        filler = @toString(fillString);
+        if (filler === "")
+            return string;
+    }
+
+    if (maxLength > @MAX_STRING_LENGTH)
+        @throwOutOfMemoryError();
+
+    var fillLength = maxLength - stringLength;
+    var truncatedStringFiller;
+
+    if (filler.length === 1)
+        truncatedStringFiller = @repeatCharacter(filler, fillLength);
+    else
+        truncatedStringFiller = @repeatCharactersSlowPath(filler, fillLength);
+    return truncatedStringFiller + string;
+}
+
+function padEnd(maxLength/*, fillString*/)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.padEnd requires that |this| not be null or undefined");
+
+    var string = @toString(this);
+    maxLength = @toLength(maxLength);
+
+    var stringLength = string.length;
+    if (maxLength <= stringLength)
+        return string;
+
+    var filler;
+    var fillString = @argument(1);
+    if (fillString === @undefined)
+        filler = " ";
+    else {
+        filler = @toString(fillString);
+        if (filler === "")
+            return string;
+    }
+
+    if (maxLength > @MAX_STRING_LENGTH)
+        @throwOutOfMemoryError();
+
+    var fillLength = maxLength - stringLength;
+    var truncatedStringFiller;
+
+    if (filler.length === 1)
+        truncatedStringFiller = @repeatCharacter(filler, fillLength);
+    else
+        truncatedStringFiller = @repeatCharactersSlowPath(filler, fillLength);
+    return string + truncatedStringFiller;
+}
+
+@globalPrivate
+function hasObservableSideEffectsForStringReplace(regexp, replacer) {
+    if (replacer !== @regExpPrototypeSymbolReplace)
+        return true;
+    
+    let regexpExec = @tryGetById(regexp, "exec");
+    if (regexpExec !== @regExpBuiltinExec)
+        return true;
+
+    let regexpGlobal = @tryGetById(regexp, "global");
+    if (regexpGlobal !== @regExpProtoGlobalGetter)
+        return true;
+
+    let regexpUnicode = @tryGetById(regexp, "unicode");
+    if (regexpUnicode !== @regExpProtoUnicodeGetter)
+        return true;
+
+    return !@isRegExpObject(regexp);
+}
+
+@intrinsic=StringPrototypeReplaceIntrinsic
+function replace(search, replace)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.replace requires that |this| not be null or undefined");
+
+    if (search != null) {
+        let replacer = search.@replaceSymbol;
+        if (replacer !== @undefined) {
+            if (!@hasObservableSideEffectsForStringReplace(search, replacer))
+                return @toString(this).@replaceUsingRegExp(search, replace);
+            return replacer.@call(search, this, replace);
+        }
+    }
+
+    let thisString = @toString(this);
+    let searchString = @toString(search);
+    return thisString.@replaceUsingStringSearch(searchString, replace);
+}
+    
+function localeCompare(that/*, locales, options */)
+{
+    "use strict";
+
+    // 13.1.1 String.prototype.localeCompare (that [, locales [, options ]]) (ECMA-402 2.0)
+    // http://ecma-international.org/publications/standards/Ecma-402.htm
+
+    // 1. Let O be RequireObjectCoercible(this value).
+    if (this == null)
+        @throwTypeError("String.prototype.localeCompare requires that |this| not be null or undefined");
+
+    // 2. Let S be ToString(O).
+    // 3. ReturnIfAbrupt(S).
+    var thisString = @toString(this);
+
+    // 4. Let That be ToString(that).
+    // 5. ReturnIfAbrupt(That).
+    var thatString = @toString(that);
+
+    // Avoid creating a collator for defaults.
+    var locales = @argument(1);
+    var options = @argument(2);
+    if (locales === @undefined && options === @undefined)
+        return @Collator.prototype.compare(thisString, thatString);
+
+    // 6. Let collator be Construct(%Collator%, «locales, options»).
+    // 7. ReturnIfAbrupt(collator).
+    var collator = new @Collator(locales, options);
+
+    // 8. Return CompareStrings(collator, S, That).
+    return collator.compare(thisString, thatString);
+}
+
+function search(regexp)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.search requires that |this| not be null or undefined");
+
+    if (regexp != null) {
+        var searcher = regexp.@searchSymbol;
+        if (searcher != @undefined)
+            return searcher.@call(regexp, this);
+    }
+
+    var thisString = @toString(this);
+    var createdRegExp = @regExpCreate(regexp, @undefined);
+    return createdRegExp.@searchSymbol(thisString);
+}
+
+function split(separator, limit)
+{
+    "use strict";
+    
+    if (this == null)
+        @throwTypeError("String.prototype.split requires that |this| not be null or undefined");
+    
+    if (separator != null) {
+        var splitter = separator.@splitSymbol;
+        if (splitter != @undefined)
+            return splitter.@call(separator, this, limit);
+    }
+    
+    return @stringSplitFast.@call(this, separator, limit);
+}
diff --git a/Source/JavaScriptCore/builtins/TypedArrayConstructor.js b/Source/JavaScriptCore/builtins/TypedArrayConstructor.js
new file mode 100644
index 000000000..54a957b23
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/TypedArrayConstructor.js
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// According to the spec we are supposed to crawl the prototype chain looking
+// for the a TypedArray constructor. The way we implement this is with a
+// private function, @alloctateTypedArray, on each of the prototypes.
+// This enables us to optimize this lookup in the inline cache.
+
+function of(/* items... */)
+{
+    "use strict";
+    let len = arguments.length;
+    let constructFunction = this.@allocateTypedArray;
+    if (constructFunction === @undefined)
+        @throwTypeError("TypedArray.of requires its this argument to subclass a TypedArray constructor");
+
+    let result = constructFunction(len);
+
+    for (let i = 0; i < len; i++)
+        result[i] = arguments[i];
+
+    return result;
+}
+
+function from(items /* [ , mapfn [ , thisArg ] ] */)
+{
+    "use strict";
+
+    let mapFn = @argument(1);
+
+    let thisArg;
+
+    if (mapFn !== @undefined) {
+        if (typeof mapFn !== "function")
+            @throwTypeError("TypedArray.from requires that the second argument, when provided, be a function");
+
+        thisArg = @argument(2);
+    }
+
+    if (items == null)
+        @throwTypeError("TypedArray.from requires an array-like object - not null or undefined");
+
+    let iteratorMethod = items.@iteratorSymbol;
+    if (iteratorMethod != null) {
+        if (typeof iteratorMethod !== "function")
+            @throwTypeError("TypedArray.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function");
+
+        let accumulator = [];
+
+        let k = 0;
+        let iterator = iteratorMethod.@call(items);
+
+        // Since for-of loop once more looks up the @@iterator property of a given iterable,
+        // it could be observable if the user defines a getter for @@iterator.
+        // To avoid this situation, we define a wrapper object that @@iterator just returns a given iterator.
+        let wrapper = {};
+        wrapper.@iteratorSymbol = function() { return iterator; }
+
+        for (let value of wrapper) {
+            if (mapFn)
+                @putByValDirect(accumulator, k, thisArg === @undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+            else
+                @putByValDirect(accumulator, k, value);
+            k++;
+        }
+
+        let constructFunction = this.@allocateTypedArray;
+        if (constructFunction === @undefined)
+            @throwTypeError("TypedArray.from requires its this argument subclass a TypedArray constructor");
+
+        let result = constructFunction(k);
+
+        for (let i = 0; i < k; i++) 
+            result[i] = accumulator[i];
+
+
+        return result;
+    }
+
+    let arrayLike = @Object(items);
+    let arrayLikeLength = @toLength(arrayLike.length);
+
+    let constructFunction = this.@allocateTypedArray;
+    if (constructFunction === @undefined)
+        @throwTypeError("this does not subclass a TypedArray constructor");
+
+    let result = constructFunction(arrayLikeLength);
+
+    let k = 0;
+    while (k < arrayLikeLength) {
+        let value = arrayLike[k];
+        if (mapFn)
+            result[k] = thisArg === @undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k);
+        else
+            result[k] = value;
+        k++;
+    }
+
+    return result;
+}
+
+function allocateInt8Array(length)
+{
+    return new @Int8Array(length);
+}
+
+function allocateInt16Array(length)
+{
+    return new @Int16Array(length);    
+}
+
+function allocateInt32Array(length)
+{
+    return new @Int32Array(length);   
+}
+
+function allocateUint32Array(length)
+{
+    return new @Uint32Array(length);
+}
+
+function allocateUint16Array(length)
+{
+    return new @Uint16Array(length);   
+}
+
+function allocateUint8Array(length)
+{
+    return new @Uint8Array(length);   
+}
+
+function allocateUint8ClampedArray(length)
+{
+    return new @Uint8ClampedArray(length);
+}
+
+function allocateFloat32Array(length)
+{
+    return new @Float32Array(length);
+}
+
+function allocateFloat64Array(length)
+{
+    return new @Float64Array(length);
+}
diff --git a/Source/JavaScriptCore/builtins/TypedArrayPrototype.js b/Source/JavaScriptCore/builtins/TypedArrayPrototype.js
new file mode 100644
index 000000000..53674bf7b
--- /dev/null
+++ b/Source/JavaScriptCore/builtins/TypedArrayPrototype.js
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Note that the intrisic @typedArrayLength checks that the argument passed is a typed array
+// and throws if it is not.
+
+
+// Typed Arrays have their own species constructor function since they need
+// to look up their default constructor, which is expensive. If we used the
+// normal speciesConstructor helper we would need to look up the default
+// constructor every time.
+@globalPrivate
+function typedArraySpeciesConstructor(value)
+{
+    "use strict";
+    let constructor = value.constructor;
+    if (constructor === @undefined)
+        return @typedArrayGetOriginalConstructor(value);
+
+    if (!@isObject(constructor))
+        @throwTypeError("|this|.constructor is not an Object or undefined");
+
+    constructor = constructor.@speciesSymbol;
+    if (constructor == null)
+        return @typedArrayGetOriginalConstructor(value);
+    // The lack of an @isConstructor(constructor) check here is not observable because
+    // the first thing we will do with the value is attempt to construct the result with it.
+    // If any user of this function does not immediately construct the result they need to
+    // verify that the result is a constructor.
+    return constructor;
+}
+
+@globalPrivate
+function typedArrayClampArgumentToStartOrEnd(value, length, undefinedValue)
+{
+    "use strict";
+
+    if (value === @undefined)
+        return undefinedValue;
+
+    let int = @toInteger(value);
+    if (int < 0) {
+        int += length;
+        return int < 0 ? 0 : int;
+    }
+    return int > length ? length : int;
+}
+
+function values()
+{
+    "use strict";
+    @typedArrayLength(this);
+    return new @createArrayIterator(this, "value", @arrayIteratorValueNext);
+}
+
+function keys()
+{
+    "use strict";
+    @typedArrayLength(this);
+    return new @createArrayIterator(this, "key", @arrayIteratorKeyNext);
+}
+
+function entries()
+{
+    "use strict";
+    @typedArrayLength(this);
+    return new @createArrayIterator(this, "key+value", @arrayIteratorKeyValueNext);
+}
+
+function every(callback /*, thisArg */)
+{
+    "use strict";
+    var length = @typedArrayLength(this);
+    var thisArg = @argument(1);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.every callback must be a function");
+
+    for (var i = 0; i < length; i++) {
+        if (!callback.@call(thisArg, this[i], i, this))
+            return false;
+    }
+
+    return true;
+}
+
+function fill(value /* [, start [, end]] */)
+{
+    "use strict";
+
+    let length = @typedArrayLength(this);
+
+    let start = @argument(1);
+    let end = @argument(2);
+
+    start = @typedArrayClampArgumentToStartOrEnd(start, length, 0);
+    end = @typedArrayClampArgumentToStartOrEnd(end, length, length);
+
+    for (let i = start; i < end; i++)
+        this[i] = value;
+    return this;
+}
+
+function find(callback /* [, thisArg] */)
+{
+    "use strict";
+    var length = @typedArrayLength(this);
+    var thisArg = @argument(1);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.find callback must be a function");
+
+    for (var i = 0; i < length; i++) {
+        let elem = this[i];
+        if (callback.@call(thisArg, elem, i, this))
+            return elem;
+    }
+    return @undefined;
+}
+
+function findIndex(callback /* [, thisArg] */)
+{
+    "use strict";
+    var length = @typedArrayLength(this);
+    var thisArg = @argument(1);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.findIndex callback must be a function");
+
+    for (var i = 0; i < length; i++) {
+        if (callback.@call(thisArg, this[i], i, this))
+            return i;
+    }
+    return -1;
+}
+
+function forEach(callback /* [, thisArg] */)
+{
+    "use strict";
+    var length = @typedArrayLength(this);
+    var thisArg = @argument(1);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.forEach callback must be a function");
+
+    for (var i = 0; i < length; i++)
+        callback.@call(thisArg, this[i], i, this);
+}
+
+function some(callback /* [, thisArg] */)
+{
+    // 22.2.3.24
+    "use strict";
+    var length = @typedArrayLength(this);
+    var thisArg = @argument(1);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.some callback must be a function");
+
+    for (var i = 0; i < length; i++) {
+        if (callback.@call(thisArg, this[i], i, this))
+            return true;
+    }
+
+    return false;
+}
+
+function sort(comparator)
+{
+    // 22.2.3.25
+    "use strict";
+
+    function min(a, b)
+    {
+        return a < b ? a : b;
+    }
+
+    function merge(dst, src, srcIndex, srcEnd, width, comparator)
+    {
+        var left = srcIndex;
+        var leftEnd = min(left + width, srcEnd);
+        var right = leftEnd;
+        var rightEnd = min(right + width, srcEnd);
+
+        for (var dstIndex = left; dstIndex < rightEnd; ++dstIndex) {
+            if (right < rightEnd) {
+                if (left >= leftEnd || comparator(src[right], src[left]) < 0) {
+                    dst[dstIndex] = src[right++];
+                    continue;
+                }
+            }
+
+            dst[dstIndex] = src[left++];
+        }
+    }
+
+    function mergeSort(array, valueCount, comparator)
+    {
+        var buffer = [ ];
+        buffer.length = valueCount;
+
+        var dst = buffer;
+        var src = array;
+
+        for (var width = 1; width < valueCount; width *= 2) {
+            for (var srcIndex = 0; srcIndex < valueCount; srcIndex += 2 * width)
+                merge(dst, src, srcIndex, valueCount, width, comparator);
+
+            var tmp = src;
+            src = dst;
+            dst = tmp;
+        }
+
+        if (src != array) {
+            for(var i = 0; i < valueCount; i++)
+                array[i] = src[i];
+        }
+    }
+
+    var length = @typedArrayLength(this);
+
+    if (length < 2)
+        return;
+
+    if (typeof comparator == "function")
+        mergeSort(this, length, comparator);
+    else
+        @typedArraySort(this);
+    
+    return this;
+}
+
+function subarray(begin, end)
+{
+    "use strict";
+
+    if (!@isTypedArrayView(this))
+        @throwTypeError("|this| should be a typed array view");
+
+    let start = @toInteger(begin);
+    let finish;
+    if (end !== @undefined)
+        finish = @toInteger(end);
+
+    let constructor = @typedArraySpeciesConstructor(this);
+
+    return @typedArraySubarrayCreate.@call(this, start, finish, constructor);
+}
+
+function reduce(callback /* [, initialValue] */)
+{
+    // 22.2.3.19
+    "use strict";
+
+    var length = @typedArrayLength(this);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.reduce callback must be a function");
+
+    var argumentCount = @argumentCount();
+    if (length === 0 && argumentCount < 2)
+        @throwTypeError("TypedArray.prototype.reduce of empty array with no initial value");
+
+    var accumulator, k = 0;
+    if (argumentCount > 1)
+        accumulator = @argument(1);
+    else
+        accumulator = this[k++];
+
+    for (; k < length; k++)
+        accumulator = callback.@call(@undefined, accumulator, this[k], k, this);
+
+    return accumulator;
+}
+
+function reduceRight(callback /* [, initialValue] */)
+{
+    // 22.2.3.20
+    "use strict";
+
+    var length = @typedArrayLength(this);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.reduceRight callback must be a function");
+
+    var argumentCount = @argumentCount();
+    if (length === 0 && argumentCount < 2)
+        @throwTypeError("TypedArray.prototype.reduceRight of empty array with no initial value");
+
+    var accumulator, k = length - 1;
+    if (argumentCount > 1)
+        accumulator = @argument(1);
+    else
+        accumulator = this[k--];
+
+    for (; k >= 0; k--)
+        accumulator = callback.@call(@undefined, accumulator, this[k], k, this);
+
+    return accumulator;
+}
+
+function map(callback /*, thisArg */)
+{
+    // 22.2.3.18
+    "use strict";
+
+    var length = @typedArrayLength(this);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.map callback must be a function");
+
+    var thisArg = @argument(1);
+
+    // Do species construction
+    var constructor = this.constructor;
+    var result;
+    if (constructor === @undefined)
+        result = new (@typedArrayGetOriginalConstructor(this))(length);
+    else {
+        var speciesConstructor = @Object(constructor).@speciesSymbol;
+        if (speciesConstructor === null || speciesConstructor === @undefined)
+            result = new (@typedArrayGetOriginalConstructor(this))(length);
+        else {
+            result = new speciesConstructor(length);
+            // typedArrayLength throws if it doesn't get a view.
+            @typedArrayLength(result);
+        }
+    }
+
+    for (var i = 0; i < length; i++) {
+        var mappedValue = callback.@call(thisArg, this[i], i, this);
+        result[i] = mappedValue;
+    }
+    return result;
+}
+
+function filter(callback /*, thisArg */)
+{
+    "use strict";
+
+    var length = @typedArrayLength(this);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.filter callback must be a function");
+
+    var thisArg = @argument(1);
+    var kept = [];
+
+    for (var i = 0; i < length; i++) {
+        var value = this[i];
+        if (callback.@call(thisArg, value, i, this))
+            kept.@push(value);
+    }
+
+    var constructor = this.constructor;
+    var result;
+    var resultLength = kept.length;
+    if (constructor === @undefined)
+        result = new (@typedArrayGetOriginalConstructor(this))(resultLength);
+    else {
+        var speciesConstructor = @Object(constructor).@speciesSymbol;
+        if (speciesConstructor === null || speciesConstructor === @undefined)
+            result = new (@typedArrayGetOriginalConstructor(this))(resultLength);
+        else {
+            result = new speciesConstructor(resultLength);
+            // typedArrayLength throws if it doesn't get a view.
+            @typedArrayLength(result);
+        }
+    }
+
+    for (var i = 0; i < kept.length; i++)
+        result[i] = kept[i];
+
+    return result;
+}
+
+function toLocaleString()
+{
+    "use strict";
+
+    var length = @typedArrayLength(this);
+
+    if (length == 0)
+        return "";
+
+    var string = this[0].toLocaleString();
+    for (var i = 1; i < length; i++)
+        string += "," + this[i].toLocaleString();
+
+    return string;
+}
diff --git a/Source/JavaScriptCore/bytecode/AccessCase.cpp b/Source/JavaScriptCore/bytecode/AccessCase.cpp
new file mode 100644
index 000000000..658ea0f1e
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/AccessCase.cpp
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AccessCase.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "CallLinkInfo.h"
+#include "DOMJITGetterSetter.h"
+#include "DirectArguments.h"
+#include "GetterSetter.h"
+#include "GetterSetterAccessCase.h"
+#include "HeapInlines.h"
+#include "IntrinsicGetterAccessCase.h"
+#include "JSCJSValueInlines.h"
+#include "JSModuleEnvironment.h"
+#include "JSModuleNamespaceObject.h"
+#include "LinkBuffer.h"
+#include "ModuleNamespaceAccessCase.h"
+#include "PolymorphicAccess.h"
+#include "ScopedArguments.h"
+#include "ScratchRegisterAllocator.h"
+#include "SlotVisitorInlines.h"
+#include "StructureStubInfo.h"
+
+namespace JSC {
+
+static const bool verbose = false;
+
+AccessCase::AccessCase(VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet)
+    : m_type(type)
+    , m_offset(offset)
+{
+    m_structure.setMayBeNull(vm, owner, structure);
+    m_conditionSet = conditionSet;
+}
+
+std::unique_ptr AccessCase::create(VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet)
+{
+    switch (type) {
+    case InHit:
+    case InMiss:
+    case ArrayLength:
+    case StringLength:
+    case DirectArgumentsLength:
+    case ScopedArgumentsLength:
+    case ModuleNamespaceLoad:
+    case Replace:
+        break;
+    default:
+        ASSERT_NOT_REACHED();
+    };
+
+    return std::unique_ptr(new AccessCase(vm, owner, type, offset, structure, conditionSet));
+}
+
+std::unique_ptr AccessCase::create(
+    VM& vm, JSCell* owner, PropertyOffset offset, Structure* oldStructure, Structure* newStructure,
+    const ObjectPropertyConditionSet& conditionSet)
+{
+    RELEASE_ASSERT(oldStructure == newStructure->previousID());
+
+    // Skip optimizing the case where we need a realloc, if we don't have
+    // enough registers to make it happen.
+    if (GPRInfo::numberOfRegisters < 6
+        && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
+        && oldStructure->outOfLineCapacity()) {
+        return nullptr;
+    }
+
+    return std::unique_ptr(new AccessCase(vm, owner, Transition, offset, newStructure, conditionSet));
+}
+
+AccessCase::~AccessCase()
+{
+}
+
+std::unique_ptr AccessCase::fromStructureStubInfo(
+    VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
+{
+    switch (stubInfo.cacheType) {
+    case CacheType::GetByIdSelf:
+        return ProxyableAccessCase::create(vm, owner, Load, stubInfo.u.byIdSelf.offset, stubInfo.u.byIdSelf.baseObjectStructure.get());
+
+    case CacheType::PutByIdReplace:
+        return AccessCase::create(vm, owner, Replace, stubInfo.u.byIdSelf.offset, stubInfo.u.byIdSelf.baseObjectStructure.get());
+
+    default:
+        return nullptr;
+    }
+}
+
+std::unique_ptr AccessCase::clone() const
+{
+    std::unique_ptr result(new AccessCase(*this));
+    result->resetState();
+    return result;
+}
+
+Vector AccessCase::commit(VM& vm, const Identifier& ident)
+{
+    // It's fine to commit something that is already committed. That arises when we switch to using
+    // newly allocated watchpoints. When it happens, it's not efficient - but we think that's OK
+    // because most AccessCases have no extra watchpoints anyway.
+    RELEASE_ASSERT(m_state == Primordial || m_state == Committed);
+
+    Vector result;
+
+    if ((structure() && structure()->needImpurePropertyWatchpoint())
+        || m_conditionSet.needImpurePropertyWatchpoint())
+        result.append(vm.ensureWatchpointSetForImpureProperty(ident));
+
+    if (additionalSet())
+        result.append(additionalSet());
+
+    m_state = Committed;
+
+    return result;
+}
+
+bool AccessCase::guardedByStructureCheck() const
+{
+    if (viaProxy())
+        return false;
+
+    switch (m_type) {
+    case ArrayLength:
+    case StringLength:
+    case DirectArgumentsLength:
+    case ScopedArgumentsLength:
+    case ModuleNamespaceLoad:
+        return false;
+    default:
+        return true;
+    }
+}
+
+bool AccessCase::doesCalls(Vector* cellsToMark) const
+{
+    switch (type()) {
+    case Getter:
+    case Setter:
+    case CustomValueGetter:
+    case CustomAccessorGetter:
+    case CustomValueSetter:
+    case CustomAccessorSetter:
+        return true;
+    case Transition:
+        if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
+            && structure()->couldHaveIndexingHeader()) {
+            if (cellsToMark)
+                cellsToMark->append(newStructure());
+            return true;
+        }
+        return false;
+    default:
+        return false;
+    }
+}
+
+bool AccessCase::couldStillSucceed() const
+{
+    return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
+}
+
+bool AccessCase::canReplace(const AccessCase& other) const
+{
+    // This puts in a good effort to try to figure out if 'other' is made superfluous by '*this'.
+    // It's fine for this to return false if it's in doubt.
+
+    switch (type()) {
+    case ArrayLength:
+    case StringLength:
+    case DirectArgumentsLength:
+    case ScopedArgumentsLength:
+        return other.type() == type();
+    case ModuleNamespaceLoad: {
+        if (other.type() != type())
+            return false;
+        auto& thisCase = this->as();
+        auto& otherCase = this->as();
+        return thisCase.moduleNamespaceObject() == otherCase.moduleNamespaceObject();
+    }
+    default:
+        if (!guardedByStructureCheck() || !other.guardedByStructureCheck())
+            return false;
+
+        return structure() == other.structure();
+    }
+}
+
+void AccessCase::dump(PrintStream& out) const
+{
+    out.print(m_type, ":(");
+
+    CommaPrinter comma;
+
+    out.print(comma, m_state);
+
+    if (m_type == Transition)
+        out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
+    else if (m_structure)
+        out.print(comma, "structure = ", pointerDump(m_structure.get()));
+
+    if (isValidOffset(m_offset))
+        out.print(comma, "offset = ", m_offset);
+    if (!m_conditionSet.isEmpty())
+        out.print(comma, "conditions = ", m_conditionSet);
+
+    dumpImpl(out, comma);
+    out.print(")");
+}
+
+bool AccessCase::visitWeak(VM& vm) const
+{
+    if (m_structure && !Heap::isMarked(m_structure.get()))
+        return false;
+    if (!m_conditionSet.areStillLive())
+        return false;
+    if (isAccessor()) {
+        auto& accessor = this->as();
+        if (accessor.callLinkInfo())
+            accessor.callLinkInfo()->visitWeak(vm);
+        if (accessor.customSlotBase() && !Heap::isMarked(accessor.customSlotBase()))
+            return false;
+    } else if (type() == IntrinsicGetter) {
+        auto& intrinsic = this->as();
+        if (intrinsic.intrinsicFunction() && !Heap::isMarked(intrinsic.intrinsicFunction()))
+            return false;
+    } else if (type() == ModuleNamespaceLoad) {
+        auto& accessCase = this->as();
+        if (accessCase.moduleNamespaceObject() && !Heap::isMarked(accessCase.moduleNamespaceObject()))
+            return false;
+        if (accessCase.moduleEnvironment() && !Heap::isMarked(accessCase.moduleEnvironment()))
+            return false;
+    }
+
+    return true;
+}
+
+bool AccessCase::propagateTransitions(SlotVisitor& visitor) const
+{
+    bool result = true;
+
+    if (m_structure)
+        result &= m_structure->markIfCheap(visitor);
+
+    switch (m_type) {
+    case Transition:
+        if (Heap::isMarkedConcurrently(m_structure->previousID()))
+            visitor.appendUnbarriered(m_structure.get());
+        else
+            result = false;
+        break;
+    default:
+        break;
+    }
+
+    return result;
+}
+
+void AccessCase::generateWithGuard(
+    AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
+{
+    SuperSamplerScope superSamplerScope(false);
+
+    RELEASE_ASSERT(m_state == Committed);
+    m_state = Generated;
+
+    CCallHelpers& jit = *state.jit;
+    VM& vm = *jit.vm();
+    JSValueRegs valueRegs = state.valueRegs;
+    GPRReg baseGPR = state.baseGPR;
+    GPRReg scratchGPR = state.scratchGPR;
+
+    UNUSED_PARAM(vm);
+
+    switch (m_type) {
+    case ArrayLength: {
+        ASSERT(!viaProxy());
+        jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeAndMiscOffset()), scratchGPR);
+        fallThrough.append(
+            jit.branchTest32(
+                CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
+        fallThrough.append(
+            jit.branchTest32(
+                CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
+        break;
+    }
+
+    case StringLength: {
+        ASSERT(!viaProxy());
+        fallThrough.append(
+            jit.branch8(
+                CCallHelpers::NotEqual,
+                CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+                CCallHelpers::TrustedImm32(StringType)));
+        break;
+    }
+
+    case DirectArgumentsLength: {
+        ASSERT(!viaProxy());
+        fallThrough.append(
+            jit.branch8(
+                CCallHelpers::NotEqual,
+                CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+                CCallHelpers::TrustedImm32(DirectArgumentsType)));
+
+        fallThrough.append(
+            jit.branchTestPtr(
+                CCallHelpers::NonZero,
+                CCallHelpers::Address(baseGPR, DirectArguments::offsetOfMappedArguments())));
+        jit.load32(
+            CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
+            valueRegs.payloadGPR());
+        jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
+        state.succeed();
+        return;
+    }
+
+    case ScopedArgumentsLength: {
+        ASSERT(!viaProxy());
+        fallThrough.append(
+            jit.branch8(
+                CCallHelpers::NotEqual,
+                CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+                CCallHelpers::TrustedImm32(ScopedArgumentsType)));
+
+        fallThrough.append(
+            jit.branchTest8(
+                CCallHelpers::NonZero,
+                CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
+        jit.load32(
+            CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
+            valueRegs.payloadGPR());
+        jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
+        state.succeed();
+        return;
+    }
+
+    case ModuleNamespaceLoad: {
+        this->as().emit(state, fallThrough);
+        return;
+    }
+
+    default: {
+        if (viaProxy()) {
+            fallThrough.append(
+                jit.branch8(
+                    CCallHelpers::NotEqual,
+                    CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+                    CCallHelpers::TrustedImm32(PureForwardingProxyType)));
+
+            jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
+
+            fallThrough.append(
+                jit.branchStructure(
+                    CCallHelpers::NotEqual,
+                    CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
+                    structure()));
+        } else {
+            fallThrough.append(
+                jit.branchStructure(
+                    CCallHelpers::NotEqual,
+                    CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
+                    structure()));
+        }
+        break;
+    } };
+
+    generateImpl(state);
+}
+
+void AccessCase::generate(AccessGenerationState& state)
+{
+    RELEASE_ASSERT(m_state == Committed);
+    m_state = Generated;
+
+    generateImpl(state);
+}
+
+void AccessCase::generateImpl(AccessGenerationState& state)
+{
+    SuperSamplerScope superSamplerScope(false);
+    if (verbose)
+        dataLog("\n\nGenerating code for: ", *this, "\n");
+
+    ASSERT(m_state == Generated); // We rely on the callers setting this for us.
+
+    CCallHelpers& jit = *state.jit;
+    VM& vm = *jit.vm();
+    CodeBlock* codeBlock = jit.codeBlock();
+    StructureStubInfo& stubInfo = *state.stubInfo;
+    const Identifier& ident = *state.ident;
+    JSValueRegs valueRegs = state.valueRegs;
+    GPRReg baseGPR = state.baseGPR;
+    GPRReg scratchGPR = state.scratchGPR;
+
+    ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
+
+    for (const ObjectPropertyCondition& condition : m_conditionSet) {
+        Structure* structure = condition.object()->structure();
+
+        if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
+            structure->addTransitionWatchpoint(state.addWatchpoint(condition));
+            continue;
+        }
+
+        if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
+            // The reason why this cannot happen is that we require that PolymorphicAccess calls
+            // AccessCase::generate() only after it has verified that
+            // AccessCase::couldStillSucceed() returned true.
+
+            dataLog("This condition is no longer met: ", condition, "\n");
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+
+        // We will emit code that has a weak reference that isn't otherwise listed anywhere.
+        state.weakReferences.append(WriteBarrier(vm, codeBlock, structure));
+
+        jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
+        state.failAndRepatch.append(
+            jit.branchStructure(
+                CCallHelpers::NotEqual,
+                CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
+                structure));
+    }
+
+    switch (m_type) {
+    case InHit:
+    case InMiss:
+        jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
+        state.succeed();
+        return;
+
+    case Miss:
+        jit.moveTrustedValue(jsUndefined(), valueRegs);
+        state.succeed();
+        return;
+
+    case Load:
+    case GetGetter:
+    case Getter:
+    case Setter:
+    case CustomValueGetter:
+    case CustomAccessorGetter:
+    case CustomValueSetter:
+    case CustomAccessorSetter: {
+        GPRReg valueRegsPayloadGPR = valueRegs.payloadGPR();
+
+        if (isValidOffset(m_offset)) {
+            Structure* currStructure;
+            if (m_conditionSet.isEmpty())
+                currStructure = structure();
+            else
+                currStructure = m_conditionSet.slotBaseCondition().object()->structure();
+            currStructure->startWatchingPropertyForReplacements(vm, offset());
+        }
+
+        GPRReg baseForGetGPR;
+        if (viaProxy()) {
+            ASSERT(m_type != CustomValueSetter || m_type != CustomAccessorSetter); // Because setters need to not trash valueRegsPayloadGPR.
+            if (m_type == Getter || m_type == Setter)
+                baseForGetGPR = scratchGPR;
+            else
+                baseForGetGPR = valueRegsPayloadGPR;
+
+            ASSERT((m_type != Getter && m_type != Setter) || baseForGetGPR != baseGPR);
+            ASSERT(m_type != Setter || baseForGetGPR != valueRegsPayloadGPR);
+
+            jit.loadPtr(
+                CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
+                baseForGetGPR);
+        } else
+            baseForGetGPR = baseGPR;
+
+        GPRReg baseForAccessGPR;
+        if (!m_conditionSet.isEmpty()) {
+            jit.move(
+                CCallHelpers::TrustedImmPtr(alternateBase()),
+                scratchGPR);
+            baseForAccessGPR = scratchGPR;
+        } else
+            baseForAccessGPR = baseForGetGPR;
+
+        GPRReg loadedValueGPR = InvalidGPRReg;
+        if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
+            if (m_type == Load || m_type == GetGetter)
+                loadedValueGPR = valueRegsPayloadGPR;
+            else
+                loadedValueGPR = scratchGPR;
+
+            ASSERT((m_type != Getter && m_type != Setter) || loadedValueGPR != baseGPR);
+            ASSERT(m_type != Setter || loadedValueGPR != valueRegsPayloadGPR);
+
+            GPRReg storageGPR;
+            if (isInlineOffset(m_offset))
+                storageGPR = baseForAccessGPR;
+            else {
+                jit.loadPtr(
+                    CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
+                    loadedValueGPR);
+                storageGPR = loadedValueGPR;
+            }
+
+#if USE(JSVALUE64)
+            jit.load64(
+                CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
+#else
+            if (m_type == Load || m_type == GetGetter) {
+                jit.load32(
+                    CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
+                    valueRegs.tagGPR());
+            }
+            jit.load32(
+                CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
+                loadedValueGPR);
+#endif
+        }
+
+        if (m_type == Load || m_type == GetGetter) {
+            state.succeed();
+            return;
+        }
+
+        if (Options::useDOMJIT() && m_type == CustomAccessorGetter && this->as().domJIT()) {
+            auto& access = this->as();
+            // We do not need to emit CheckDOM operation since structure check ensures
+            // that the structure of the given base value is structure()! So all we should
+            // do is performing the CheckDOM thingy in IC compiling time here.
+            if (structure()->classInfo()->isSubClassOf(access.domJIT()->thisClassInfo())) {
+                access.emitDOMJITGetter(state, baseForGetGPR);
+                return;
+            }
+        }
+
+        // Stuff for custom getters/setters.
+        CCallHelpers::Call operationCall;
+
+        // Stuff for JS getters/setters.
+        CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
+        CCallHelpers::Call fastPathCall;
+        CCallHelpers::Call slowPathCall;
+
+        // This also does the necessary calculations of whether or not we're an
+        // exception handling call site.
+        AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall();
+
+        auto restoreLiveRegistersFromStackForCall = [&](AccessGenerationState::SpillState& spillState, bool callHasReturnValue) {
+            RegisterSet dontRestore;
+            if (callHasReturnValue) {
+                // This is the result value. We don't want to overwrite the result with what we stored to the stack.
+                // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
+                dontRestore.set(valueRegs);
+            }
+            state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
+        };
+
+        jit.store32(
+            CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+            CCallHelpers::tagFor(static_cast(CallFrameSlot::argumentCount)));
+
+        if (m_type == Getter || m_type == Setter) {
+            auto& access = this->as();
+            ASSERT(baseGPR != loadedValueGPR);
+            ASSERT(m_type != Setter || (baseGPR != valueRegsPayloadGPR && loadedValueGPR != valueRegsPayloadGPR));
+
+            // Create a JS call using a JS call inline cache. Assume that:
+            //
+            // - SP is aligned and represents the extent of the calling compiler's stack usage.
+            //
+            // - FP is set correctly (i.e. it points to the caller's call frame header).
+            //
+            // - SP - FP is an aligned difference.
+            //
+            // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
+            //   code.
+            //
+            // Therefore, we temporarily grow the stack for the purpose of the call and then
+            // shrink it after.
+
+            state.setSpillStateForJSGetterSetter(spillState);
+
+            RELEASE_ASSERT(!access.callLinkInfo());
+            access.m_callLinkInfo = std::make_unique();
+
+            // FIXME: If we generated a polymorphic call stub that jumped back to the getter
+            // stub, which then jumped back to the main code, then we'd have a reachability
+            // situation that the GC doesn't know about. The GC would ensure that the polymorphic
+            // call stub stayed alive, and it would ensure that the main code stayed alive, but
+            // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
+            // be GC objects, and then we'd be able to say that the polymorphic call stub has a
+            // reference to the getter stub.
+            // https://bugs.webkit.org/show_bug.cgi?id=148914
+            access.callLinkInfo()->disallowStubs();
+
+            access.callLinkInfo()->setUpCall(
+                CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
+
+            CCallHelpers::JumpList done;
+
+            // There is a "this" argument.
+            unsigned numberOfParameters = 1;
+            // ... and a value argument if we're calling a setter.
+            if (m_type == Setter)
+                numberOfParameters++;
+
+            // Get the accessor; if there ain't one then the result is jsUndefined().
+            if (m_type == Setter) {
+                jit.loadPtr(
+                    CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
+                    loadedValueGPR);
+            } else {
+                jit.loadPtr(
+                    CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
+                    loadedValueGPR);
+            }
+
+            CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
+                CCallHelpers::Zero, loadedValueGPR);
+
+            unsigned numberOfRegsForCall = CallFrame::headerSizeInRegisters + numberOfParameters;
+            unsigned numberOfBytesForCall = numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
+
+            unsigned alignedNumberOfBytesForCall =
+            WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
+
+            jit.subPtr(
+                CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
+                CCallHelpers::stackPointerRegister);
+
+            CCallHelpers::Address calleeFrame = CCallHelpers::Address(
+                CCallHelpers::stackPointerRegister,
+                -static_cast(sizeof(CallerFrameAndPC)));
+
+            jit.store32(
+                CCallHelpers::TrustedImm32(numberOfParameters),
+                calleeFrame.withOffset(CallFrameSlot::argumentCount * sizeof(Register) + PayloadOffset));
+
+            jit.storeCell(
+                loadedValueGPR, calleeFrame.withOffset(CallFrameSlot::callee * sizeof(Register)));
+
+            jit.storeCell(
+                baseGPR,
+                calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
+
+            if (m_type == Setter) {
+                jit.storeValue(
+                    valueRegs,
+                    calleeFrame.withOffset(
+                        virtualRegisterForArgument(1).offset() * sizeof(Register)));
+            }
+
+            CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
+                CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
+                CCallHelpers::TrustedImmPtr(0));
+
+            fastPathCall = jit.nearCall();
+            if (m_type == Getter)
+                jit.setupResults(valueRegs);
+            done.append(jit.jump());
+
+            slowCase.link(&jit);
+            jit.move(loadedValueGPR, GPRInfo::regT0);
+#if USE(JSVALUE32_64)
+            // We *always* know that the getter/setter, if non-null, is a cell.
+            jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+#endif
+            jit.move(CCallHelpers::TrustedImmPtr(access.callLinkInfo()), GPRInfo::regT2);
+            slowPathCall = jit.nearCall();
+            if (m_type == Getter)
+                jit.setupResults(valueRegs);
+            done.append(jit.jump());
+
+            returnUndefined.link(&jit);
+            if (m_type == Getter)
+                jit.moveTrustedValue(jsUndefined(), valueRegs);
+
+            done.link(&jit);
+
+            jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation),
+                GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+            bool callHasReturnValue = isGetter();
+            restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
+
+            jit.addLinkTask([=, &vm] (LinkBuffer& linkBuffer) {
+                this->as().callLinkInfo()->setCallLocations(
+                    CodeLocationLabel(linkBuffer.locationOfNearCall(slowPathCall)),
+                    CodeLocationLabel(linkBuffer.locationOf(addressOfLinkFunctionCheck)),
+                    linkBuffer.locationOfNearCall(fastPathCall));
+
+                linkBuffer.link(
+                    slowPathCall,
+                    CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
+            });
+        } else {
+            ASSERT(m_type == CustomValueGetter || m_type == CustomAccessorGetter || m_type == CustomValueSetter || m_type == CustomAccessorSetter);
+
+            // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
+            // hard to track if someone did spillage or not, so we just assume that we always need
+            // to make some space here.
+            jit.makeSpaceOnStackForCCall();
+
+            // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
+            // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
+            // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
+            // FIXME: Remove this differences in custom values and custom accessors.
+            // https://bugs.webkit.org/show_bug.cgi?id=158014
+            GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
+#if USE(JSVALUE64)
+            if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
+                jit.setupArgumentsWithExecState(
+                    baseForCustomValue,
+                    CCallHelpers::TrustedImmPtr(ident.impl()));
+            } else
+                jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
+#else
+            if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
+                jit.setupArgumentsWithExecState(
+                    EABI_32BIT_DUMMY_ARG baseForCustomValue,
+                    CCallHelpers::TrustedImm32(JSValue::CellTag),
+                    CCallHelpers::TrustedImmPtr(ident.impl()));
+            } else {
+                jit.setupArgumentsWithExecState(
+                    EABI_32BIT_DUMMY_ARG baseForCustomValue,
+                    CCallHelpers::TrustedImm32(JSValue::CellTag),
+                    valueRegs.payloadGPR(), valueRegs.tagGPR());
+            }
+#endif
+            jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
+
+            operationCall = jit.call();
+            jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+                linkBuffer.link(operationCall, FunctionPtr(this->as().m_customAccessor.opaque));
+            });
+
+            if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
+                jit.setupResults(valueRegs);
+            jit.reclaimSpaceOnStackForCCall();
+
+            CCallHelpers::Jump noException =
+            jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+            state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
+            state.emitExplicitExceptionHandler();
+
+            noException.link(&jit);
+            bool callHasReturnValue = isGetter();
+            restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
+        }
+        state.succeed();
+        return;
+    }
+
+    case Replace: {
+        if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
+            if (verbose)
+                dataLog("Have type: ", type->descriptor(), "\n");
+            state.failAndRepatch.append(
+                jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
+        } else if (verbose)
+            dataLog("Don't have type.\n");
+
+        if (isInlineOffset(m_offset)) {
+            jit.storeValue(
+                valueRegs,
+                CCallHelpers::Address(
+                    baseGPR,
+                    JSObject::offsetOfInlineStorage() +
+                    offsetInInlineStorage(m_offset) * sizeof(JSValue)));
+        } else {
+            jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+            jit.storeValue(
+                valueRegs,
+                CCallHelpers::Address(
+                    scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
+        }
+        state.succeed();
+        return;
+    }
+
+    case Transition: {
+        // AccessCase::transition() should have returned null if this wasn't true.
+        RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
+
+        if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
+            if (verbose)
+                dataLog("Have type: ", type->descriptor(), "\n");
+            state.failAndRepatch.append(
+                jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
+        } else if (verbose)
+            dataLog("Don't have type.\n");
+
+        // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
+        // exactly when this would make calls.
+        bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
+        bool reallocating = allocating && structure()->outOfLineCapacity();
+        bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
+
+        ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+        allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+        allocator.lock(static_cast(stubInfo.patch.baseTagGPR));
+#endif
+        allocator.lock(valueRegs);
+        allocator.lock(scratchGPR);
+
+        GPRReg scratchGPR2 = InvalidGPRReg;
+        GPRReg scratchGPR3 = InvalidGPRReg;
+        if (allocatingInline) {
+            scratchGPR2 = allocator.allocateScratchGPR();
+            scratchGPR3 = allocator.allocateScratchGPR();
+        }
+
+        ScratchRegisterAllocator::PreservedState preservedState =
+        allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+
+        CCallHelpers::JumpList slowPath;
+
+        ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
+
+        if (allocating) {
+            size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
+
+            if (allocatingInline) {
+                MarkedAllocator* allocator = vm.auxiliarySpace.allocatorFor(newSize);
+
+                if (!allocator) {
+                    // Yuck, this case would suck!
+                    slowPath.append(jit.jump());
+                }
+
+                jit.move(CCallHelpers::TrustedImmPtr(allocator), scratchGPR2);
+                jit.emitAllocate(scratchGPR, allocator, scratchGPR2, scratchGPR3, slowPath);
+                jit.addPtr(CCallHelpers::TrustedImm32(newSize + sizeof(IndexingHeader)), scratchGPR);
+
+                size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
+                ASSERT(newSize > oldSize);
+
+                if (reallocating) {
+                    // Handle the case where we are reallocating (i.e. the old structure/butterfly
+                    // already had out-of-line property storage).
+
+                    jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
+
+                    // We have scratchGPR = new storage, scratchGPR3 = old storage,
+                    // scratchGPR2 = available
+                    for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
+                        jit.loadPtr(
+                            CCallHelpers::Address(
+                                scratchGPR3,
+                                -static_cast(
+                                    offset + sizeof(JSValue) + sizeof(void*))),
+                            scratchGPR2);
+                        jit.storePtr(
+                            scratchGPR2,
+                            CCallHelpers::Address(
+                                scratchGPR,
+                                -static_cast(offset + sizeof(JSValue) + sizeof(void*))));
+                    }
+                }
+
+                for (size_t offset = oldSize; offset < newSize; offset += sizeof(void*))
+                    jit.storePtr(CCallHelpers::TrustedImmPtr(0), CCallHelpers::Address(scratchGPR, -static_cast(offset + sizeof(JSValue) + sizeof(void*))));
+            } else {
+                // Handle the case where we are allocating out-of-line using an operation.
+                RegisterSet extraRegistersToPreserve;
+                extraRegistersToPreserve.set(baseGPR);
+                extraRegistersToPreserve.set(valueRegs);
+                AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
+                
+                jit.store32(
+                    CCallHelpers::TrustedImm32(
+                        state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+                    CCallHelpers::tagFor(static_cast(CallFrameSlot::argumentCount)));
+                
+                jit.makeSpaceOnStackForCCall();
+                
+                if (!reallocating) {
+                    jit.setupArgumentsWithExecState(baseGPR);
+                    
+                    CCallHelpers::Call operationCall = jit.call();
+                    jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+                        linkBuffer.link(
+                            operationCall,
+                            FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
+                    });
+                } else {
+                    // Handle the case where we are reallocating (i.e. the old structure/butterfly
+                    // already had out-of-line property storage).
+                    jit.setupArgumentsWithExecState(
+                        baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
+                    
+                    CCallHelpers::Call operationCall = jit.call();
+                    jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+                        linkBuffer.link(
+                            operationCall,
+                            FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
+                    });
+                }
+                
+                jit.reclaimSpaceOnStackForCCall();
+                jit.move(GPRInfo::returnValueGPR, scratchGPR);
+                
+                CCallHelpers::Jump noException = jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+                
+                state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
+                state.emitExplicitExceptionHandler();
+                
+                noException.link(&jit);
+                state.restoreLiveRegistersFromStackForCall(spillState);
+            }
+        }
+        
+        if (isInlineOffset(m_offset)) {
+            jit.storeValue(
+                valueRegs,
+                CCallHelpers::Address(
+                    baseGPR,
+                    JSObject::offsetOfInlineStorage() +
+                    offsetInInlineStorage(m_offset) * sizeof(JSValue)));
+        } else {
+            if (!allocating)
+                jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+            jit.storeValue(
+                valueRegs,
+                CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
+        }
+        
+        if (allocatingInline) {
+            // We set the new butterfly and the structure last. Doing it this way ensures that
+            // whatever we had done up to this point is forgotten if we choose to branch to slow
+            // path.
+            jit.nukeStructureAndStoreButterfly(scratchGPR, baseGPR);
+        }
+        
+        uint32_t structureBits = bitwise_cast(newStructure()->id());
+        jit.store32(
+            CCallHelpers::TrustedImm32(structureBits),
+            CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
+        
+        allocator.restoreReusedRegistersByPopping(jit, preservedState);
+        state.succeed();
+        
+        // We will have a slow path if we were allocating without the help of an operation.
+        if (allocatingInline) {
+            if (allocator.didReuseRegisters()) {
+                slowPath.link(&jit);
+                allocator.restoreReusedRegistersByPopping(jit, preservedState);
+                state.failAndIgnore.append(jit.jump());
+            } else
+                state.failAndIgnore.append(slowPath);
+        } else
+            RELEASE_ASSERT(slowPath.empty());
+        return;
+    }
+        
+    case ArrayLength: {
+        jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+        jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
+        state.failAndIgnore.append(
+            jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
+        jit.boxInt32(scratchGPR, valueRegs);
+        state.succeed();
+        return;
+    }
+        
+    case StringLength: {
+        jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
+        jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
+        state.succeed();
+        return;
+    }
+        
+    case IntrinsicGetter: {
+        RELEASE_ASSERT(isValidOffset(offset()));
+        
+        // We need to ensure the getter value does not move from under us. Note that GetterSetters
+        // are immutable so we just need to watch the property not any value inside it.
+        Structure* currStructure;
+        if (m_conditionSet.isEmpty())
+            currStructure = structure();
+        else
+            currStructure = m_conditionSet.slotBaseCondition().object()->structure();
+        currStructure->startWatchingPropertyForReplacements(vm, offset());
+        
+        this->as().emitIntrinsicGetter(state);
+        return;
+    }
+        
+    case DirectArgumentsLength:
+    case ScopedArgumentsLength:
+    case ModuleNamespaceLoad:
+        // These need to be handled by generateWithGuard(), since the guard is part of the
+        // algorithm. We can be sure that nobody will call generate() directly for these since they
+        // are not guarded by structure checks.
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/bytecode/AccessCase.h b/Source/JavaScriptCore/bytecode/AccessCase.h
new file mode 100644
index 000000000..9f8a20063
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/AccessCase.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "JSFunctionInlines.h"
+#include "ObjectPropertyConditionSet.h"
+
+namespace JSC {
+
+struct AccessGenerationState;
+
+// An AccessCase describes one of the cases of a PolymorphicAccess. A PolymorphicAccess represents a
+// planned (to generate in future) or generated stub for some inline cache. That stub contains fast
+// path code for some finite number of fast cases, each described by an AccessCase object.
+//
+// An AccessCase object has a lifecycle that proceeds through several states. Note that the states
+// of AccessCase have a lot to do with the global effect epoch (we'll say epoch for short). This is
+// a simple way of reasoning about the state of the system outside this AccessCase. Any observable
+// effect - like storing to a property, changing an object's structure, etc. - increments the epoch.
+// The states are:
+//
+// Primordial:   This is an AccessCase that was just allocated. It does not correspond to any actual
+//               code and it is not owned by any PolymorphicAccess. In this state, the AccessCase
+//               assumes that it is in the same epoch as when it was created. This is important
+//               because it may make claims about itself ("I represent a valid case so long as you
+//               register a watchpoint on this set") that could be contradicted by some outside
+//               effects (like firing and deleting the watchpoint set in question). This is also the
+//               state that an AccessCase is in when it is cloned (AccessCase::clone()).
+//
+// Committed:    This happens as soon as some PolymorphicAccess takes ownership of this AccessCase.
+//               In this state, the AccessCase no longer assumes anything about the epoch. To
+//               accomplish this, PolymorphicAccess calls AccessCase::commit(). This must be done
+//               during the same epoch when the AccessCase was created, either by the client or by
+//               clone(). When created by the client, committing during the same epoch works because
+//               we can be sure that whatever watchpoint sets they spoke of are still valid. When
+//               created by clone(), we can be sure that the set is still valid because the original
+//               of the clone still has watchpoints on it.
+//
+// Generated:    This is the state when the PolymorphicAccess generates code for this case by
+//               calling AccessCase::generate() or AccessCase::generateWithGuard(). At this point
+//               the case object will have some extra stuff in it, like possibly the CallLinkInfo
+//               object associated with the inline cache.
+//               FIXME: Moving into the Generated state should not mutate the AccessCase object or
+//               put more stuff into it. If we fix this, then we can get rid of AccessCase::clone().
+//               https://bugs.webkit.org/show_bug.cgi?id=156456
+//
+// An AccessCase may be destroyed while in any of these states.
+//
+// We will sometimes buffer committed AccessCases in the PolymorphicAccess object before generating
+// code. This allows us to only regenerate once we've accumulated (hopefully) more than one new
+// AccessCase.
+class AccessCase {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    enum AccessType : uint8_t {
+        Load,
+        Transition,
+        Replace,
+        Miss,
+        GetGetter,
+        Getter,
+        Setter,
+        CustomValueGetter,
+        CustomAccessorGetter,
+        CustomValueSetter,
+        CustomAccessorSetter,
+        IntrinsicGetter,
+        InHit,
+        InMiss,
+        ArrayLength,
+        StringLength,
+        DirectArgumentsLength,
+        ScopedArgumentsLength,
+        ModuleNamespaceLoad,
+    };
+
+    enum State : uint8_t {
+        Primordial,
+        Committed,
+        Generated
+    };
+
+    template
+    T& as() { return *static_cast(this); }
+
+    template
+    const T& as() const { return *static_cast(this); }
+
+
+    template
+    static std::unique_ptr create(Arguments... arguments)
+    {
+        return std::unique_ptr(new AccessCaseType(arguments...));
+    }
+
+    static std::unique_ptr create(VM&, JSCell* owner, AccessType, PropertyOffset = invalidOffset,
+        Structure* = nullptr, const ObjectPropertyConditionSet& = ObjectPropertyConditionSet());
+
+    // This create method should be used for transitions.
+    static std::unique_ptr create(VM&, JSCell* owner, PropertyOffset, Structure* oldStructure,
+        Structure* newStructure, const ObjectPropertyConditionSet& = ObjectPropertyConditionSet());
+
+    static std::unique_ptr fromStructureStubInfo(VM&, JSCell* owner, StructureStubInfo&);
+
+    AccessType type() const { return m_type; }
+    State state() const { return m_state; }
+    PropertyOffset offset() const { return m_offset; }
+
+    Structure* structure() const
+    {
+        if (m_type == Transition)
+            return m_structure->previousID();
+        return m_structure.get();
+    }
+    bool guardedByStructureCheck() const;
+
+    Structure* newStructure() const
+    {
+        ASSERT(m_type == Transition);
+        return m_structure.get();
+    }
+
+    ObjectPropertyConditionSet conditionSet() const { return m_conditionSet; }
+
+    virtual JSObject* alternateBase() const { return conditionSet().slotBaseCondition().object(); }
+    virtual WatchpointSet* additionalSet() const { return nullptr; }
+    virtual bool viaProxy() const { return false; }
+
+    // If you supply the optional vector, this will append the set of cells that this will need to keep alive
+    // past the call.
+    bool doesCalls(Vector* cellsToMark = nullptr) const;
+
+    bool isGetter() const
+    {
+        switch (type()) {
+        case Getter:
+        case CustomValueGetter:
+        case CustomAccessorGetter:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool isAccessor() const { return isGetter() || type() == Setter; }
+
+    // Is it still possible for this case to ever be taken? Must call this as a prerequisite for
+    // calling generate() and friends. If this returns true, then you can call generate(). If
+    // this returns false, then generate() will crash. You must call generate() in the same epoch
+    // as when you called couldStillSucceed().
+    bool couldStillSucceed() const;
+
+    // If this method returns true, then it's a good idea to remove 'other' from the access once 'this'
+    // is added. This method assumes that in case of contradictions, 'this' represents a newer, and so
+    // more useful, truth. This method can be conservative; it will return false when it doubt.
+    bool canReplace(const AccessCase& other) const;
+
+    void dump(PrintStream& out) const;
+    virtual void dumpImpl(PrintStream&, CommaPrinter&) const { }
+
+    virtual ~AccessCase();
+
+protected:
+    AccessCase(VM&, JSCell* owner, AccessType, PropertyOffset, Structure*, const ObjectPropertyConditionSet&);
+    AccessCase(const AccessCase&) = default;
+    AccessCase& operator=(const AccessCase&) = delete;
+    void resetState() { m_state = Primordial; }
+
+private:
+    friend class CodeBlock;
+    friend class PolymorphicAccess;
+
+    bool visitWeak(VM&) const;
+    bool propagateTransitions(SlotVisitor&) const;
+
+    // FIXME: This only exists because of how AccessCase puts post-generation things into itself.
+    // https://bugs.webkit.org/show_bug.cgi?id=156456
+    virtual std::unique_ptr clone() const;
+
+    // Perform any action that must be performed before the end of the epoch in which the case
+    // was created. Returns a set of watchpoint sets that will need to be watched.
+    Vector commit(VM&, const Identifier&);
+
+    // Fall through on success. Two kinds of failures are supported: fall-through, which means that we
+    // should try a different case; and failure, which means that this was the right case but it needs
+    // help from the slow path.
+    void generateWithGuard(AccessGenerationState&, MacroAssembler::JumpList& fallThrough);
+
+    // Fall through on success, add a jump to the failure list on failure.
+    void generate(AccessGenerationState&);
+
+    void generateImpl(AccessGenerationState&);
+
+    AccessType m_type;
+    State m_state { Primordial };
+    PropertyOffset m_offset;
+
+    // Usually this is the structure that we expect the base object to have. But, this is the *new*
+    // structure for a transition and we rely on the fact that it has a strong reference to the old
+    // structure. For proxies, this is the structure of the object behind the proxy.
+    WriteBarrier m_structure;
+
+    ObjectPropertyConditionSet m_conditionSet;
+};
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp
new file mode 100644
index 000000000..2fd703150
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AdaptiveInferredPropertyValueWatchpointBase.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+AdaptiveInferredPropertyValueWatchpointBase::AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition& key)
+    : m_key(key)
+{
+    RELEASE_ASSERT(key.kind() == PropertyCondition::Equivalence);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::install()
+{
+    RELEASE_ASSERT(m_key.isWatchable());
+
+    m_key.object()->structure()->addTransitionWatchpoint(&m_structureWatchpoint);
+
+    PropertyOffset offset = m_key.object()->structure()->getConcurrently(m_key.uid());
+    WatchpointSet* set = m_key.object()->structure()->propertyReplacementWatchpointSet(offset);
+    set->add(&m_propertyWatchpoint);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::fire(const FireDetail& detail)
+{
+    // One of the watchpoints fired, but the other one didn't. Make sure that neither of them are
+    // in any set anymore. This simplifies things by allowing us to reinstall the watchpoints
+    // wherever from scratch.
+    if (m_structureWatchpoint.isOnList())
+        m_structureWatchpoint.remove();
+    if (m_propertyWatchpoint.isOnList())
+        m_propertyWatchpoint.remove();
+
+    if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+        install();
+        return;
+    }
+
+    handleFire(detail);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::StructureWatchpoint::fireInternal(const FireDetail& detail)
+{
+    ptrdiff_t myOffset = OBJECT_OFFSETOF(AdaptiveInferredPropertyValueWatchpointBase, m_structureWatchpoint);
+
+    AdaptiveInferredPropertyValueWatchpointBase* parent = bitwise_cast(bitwise_cast(this) - myOffset);
+
+    parent->fire(detail);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::PropertyWatchpoint::fireInternal(const FireDetail& detail)
+{
+    ptrdiff_t myOffset = OBJECT_OFFSETOF(AdaptiveInferredPropertyValueWatchpointBase, m_propertyWatchpoint);
+
+    AdaptiveInferredPropertyValueWatchpointBase* parent = bitwise_cast(bitwise_cast(this) - myOffset);
+    
+    parent->fire(detail);
+}
+    
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h
new file mode 100644
index 000000000..410a93fc9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ObjectPropertyCondition.h"
+#include "Watchpoint.h"
+#include 
+#include 
+
+namespace JSC {
+
+class AdaptiveInferredPropertyValueWatchpointBase {
+    WTF_MAKE_NONCOPYABLE(AdaptiveInferredPropertyValueWatchpointBase);
+    WTF_MAKE_FAST_ALLOCATED;
+
+public:
+    AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition&);
+
+    const ObjectPropertyCondition& key() const { return m_key; }
+
+    void install();
+
+    virtual ~AdaptiveInferredPropertyValueWatchpointBase() = default;
+
+protected:
+    virtual void handleFire(const FireDetail&) = 0;
+
+private:
+    class StructureWatchpoint : public Watchpoint {
+    public:
+        StructureWatchpoint() { }
+    protected:
+        void fireInternal(const FireDetail&) override;
+    };
+    class PropertyWatchpoint : public Watchpoint {
+    public:
+        PropertyWatchpoint() { }
+    protected:
+        void fireInternal(const FireDetail&) override;
+    };
+
+    void fire(const FireDetail&);
+
+    ObjectPropertyCondition m_key;
+    StructureWatchpoint m_structureWatchpoint;
+    PropertyWatchpoint m_propertyWatchpoint;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ArithProfile.cpp b/Source/JavaScriptCore/bytecode/ArithProfile.cpp
new file mode 100644
index 000000000..1fa7c7989
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ArithProfile.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ArithProfile.h"
+
+#include "CCallHelpers.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+#if ENABLE(JIT)
+void ArithProfile::emitObserveResult(CCallHelpers& jit, JSValueRegs regs, TagRegistersMode mode)
+{
+    if (!shouldEmitSetDouble() && !shouldEmitSetNonNumber())
+        return;
+
+    CCallHelpers::Jump isInt32 = jit.branchIfInt32(regs, mode);
+    CCallHelpers::Jump notDouble = jit.branchIfNotDoubleKnownNotInt32(regs, mode);
+    emitSetDouble(jit);
+    CCallHelpers::Jump done = jit.jump();
+    notDouble.link(&jit);
+    emitSetNonNumber(jit);
+    done.link(&jit);
+    isInt32.link(&jit);
+}
+
+bool ArithProfile::shouldEmitSetDouble() const
+{
+    uint32_t mask = ArithProfile::Int32Overflow | ArithProfile::Int52Overflow | ArithProfile::NegZeroDouble | ArithProfile::NonNegZeroDouble;
+    return (m_bits & mask) != mask;
+}
+
+void ArithProfile::emitSetDouble(CCallHelpers& jit) const
+{
+    if (shouldEmitSetDouble())
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::Int32Overflow | ArithProfile::Int52Overflow | ArithProfile::NegZeroDouble | ArithProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(addressOfBits()));
+}
+
+bool ArithProfile::shouldEmitSetNonNumber() const
+{
+    uint32_t mask = ArithProfile::NonNumber;
+    return (m_bits & mask) != mask;
+}
+
+void ArithProfile::emitSetNonNumber(CCallHelpers& jit) const
+{
+    if (shouldEmitSetNonNumber())
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NonNumber), CCallHelpers::AbsoluteAddress(addressOfBits()));
+}
+#endif // ENABLE(JIT)
+
+} // namespace JSC
+
+namespace WTF {
+    
+using namespace JSC;
+
+void printInternal(PrintStream& out, const ArithProfile& profile)
+{
+    const char* separator = "";
+
+    out.print("Result:<");
+    if (!profile.didObserveNonInt32()) {
+        out.print("Int32");
+        separator = "|";
+    } else {
+        if (profile.didObserveNegZeroDouble()) {
+            out.print(separator, "NegZeroDouble");
+            separator = "|";
+        }
+        if (profile.didObserveNonNegZeroDouble()) {
+            out.print(separator, "NonNegZeroDouble");
+            separator = "|";
+        }
+        if (profile.didObserveNonNumber()) {
+            out.print(separator, "NonNumber");
+            separator = "|";
+        }
+        if (profile.didObserveInt32Overflow()) {
+            out.print(separator, "Int32Overflow");
+            separator = "|";
+        }
+        if (profile.didObserveInt52Overflow()) {
+            out.print(separator, "Int52Overflow");
+            separator = "|";
+        }
+    }
+    if (profile.tookSpecialFastPath())
+        out.print(separator, "Took special fast path.");
+    out.print(">");
+
+    out.print(" LHS ObservedType:<");
+    out.print(profile.lhsObservedType());
+    out.print("> RHS ObservedType:<");
+    out.print(profile.rhsObservedType());
+    out.print(">");
+
+    out.print(" LHS ResultType:<", RawPointer(bitwise_cast(static_cast(profile.lhsResultType().bits()))));
+    out.print("> RHS ResultType:<", RawPointer(bitwise_cast(static_cast(profile.rhsResultType().bits()))));
+    out.print(">");
+}
+
+void printInternal(PrintStream& out, const JSC::ObservedType& observedType)
+{
+    const char* separator = "";
+    if (observedType.sawInt32()) {
+        out.print(separator, "Int32");
+        separator = "|";
+    }
+    if (observedType.sawNumber()) {
+        out.print(separator, "Number");
+        separator = "|";
+    }
+    if (observedType.sawNonNumber()) {
+        out.print(separator, "NonNumber");
+        separator = "|";
+    }
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ArithProfile.h b/Source/JavaScriptCore/bytecode/ArithProfile.h
new file mode 100644
index 000000000..40fad1be3
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ArithProfile.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "GPRInfo.h"
+#include "JSCJSValue.h"
+#include "ResultType.h"
+#include "TagRegistersMode.h"
+
+namespace JSC {
+
+class CCallHelpers;
+
+struct ObservedType {
+    ObservedType(uint8_t bits = TypeEmpty)
+        : m_bits(bits)
+    { }
+
+    bool sawInt32() const { return m_bits & TypeInt32; }
+    bool isOnlyInt32() const { return m_bits == TypeInt32; }
+    bool sawNumber() const { return m_bits & TypeNumber; }
+    bool isOnlyNumber() const { return m_bits == TypeNumber; }
+    bool sawNonNumber() const { return m_bits & TypeNonNumber; }
+    bool isOnlyNonNumber() const { return m_bits == TypeNonNumber; }
+    bool isEmpty() const { return !m_bits; }
+    uint8_t bits() const { return m_bits; }
+
+    ObservedType withInt32() const { return ObservedType(m_bits | TypeInt32); }
+    ObservedType withNumber() const { return ObservedType(m_bits | TypeNumber); }
+    ObservedType withNonNumber() const { return ObservedType(m_bits | TypeNonNumber); }
+    ObservedType withoutNonNumber() const { return ObservedType(m_bits & ~TypeNonNumber); }
+
+    bool operator==(const ObservedType& other) const { return m_bits == other.m_bits; }
+
+    static const uint8_t TypeEmpty = 0x0;
+    static const uint8_t TypeInt32 = 0x1;
+    static const uint8_t TypeNumber = 0x02;
+    static const uint8_t TypeNonNumber = 0x04;
+
+    static const uint32_t numBitsNeeded = 3;
+
+private:
+    uint8_t m_bits;
+};
+
+struct ArithProfile {
+private:
+    static const uint32_t numberOfFlagBits = 5;
+    static const uint32_t rhsResultTypeShift = numberOfFlagBits;
+    static const uint32_t lhsResultTypeShift = rhsResultTypeShift + ResultType::numBitsNeeded;
+    static const uint32_t rhsObservedTypeShift = lhsResultTypeShift + ResultType::numBitsNeeded;
+    static const uint32_t lhsObservedTypeShift = rhsObservedTypeShift + ObservedType::numBitsNeeded;
+
+    static_assert(ObservedType::numBitsNeeded == 3, "We make a hard assumption about that here.");
+    static const uint32_t clearRhsObservedTypeBitMask = static_cast(~((1 << rhsObservedTypeShift) | (1 << (rhsObservedTypeShift + 1)) | (1 << (rhsObservedTypeShift + 2))));
+    static const uint32_t clearLhsObservedTypeBitMask = static_cast(~((1 << lhsObservedTypeShift) | (1 << (lhsObservedTypeShift + 1)) | (1 << (lhsObservedTypeShift + 2))));
+
+    static const uint32_t resultTypeMask = (1 << ResultType::numBitsNeeded) - 1;
+    static const uint32_t observedTypeMask = (1 << ObservedType::numBitsNeeded) - 1;
+public:
+    static const uint32_t specialFastPathBit = 1 << (lhsObservedTypeShift + ObservedType::numBitsNeeded);
+    static_assert((lhsObservedTypeShift + ObservedType::numBitsNeeded) <= (sizeof(uint32_t) * 8) - 1, "Should fit in a uint32_t.");
+    static_assert(!(specialFastPathBit & ~clearLhsObservedTypeBitMask), "These bits should not intersect.");
+    static_assert(specialFastPathBit & clearLhsObservedTypeBitMask, "These bits should intersect.");
+    static_assert(specialFastPathBit > ~clearLhsObservedTypeBitMask, "These bits should not intersect and specialFastPathBit should be a higher bit.");
+
+    ArithProfile(ResultType arg)
+    {
+        m_bits = (arg.bits() << lhsResultTypeShift);
+        ASSERT(lhsResultType().bits() == arg.bits());
+        ASSERT(lhsObservedType().isEmpty());
+        ASSERT(rhsObservedType().isEmpty());
+    }
+
+    ArithProfile(ResultType lhs, ResultType rhs)
+    {
+        m_bits = (lhs.bits() << lhsResultTypeShift) | (rhs.bits() << rhsResultTypeShift);
+        ASSERT(lhsResultType().bits() == lhs.bits() && rhsResultType().bits() == rhs.bits());
+        ASSERT(lhsObservedType().isEmpty());
+        ASSERT(rhsObservedType().isEmpty());
+    }
+    ArithProfile() = default;
+
+    static ArithProfile fromInt(uint32_t bits)
+    {
+        ArithProfile result;
+        result.m_bits = bits;
+        return result;
+    }
+
+    enum ObservedResults {
+        NonNegZeroDouble = 1 << 0,
+        NegZeroDouble    = 1 << 1,
+        NonNumber        = 1 << 2,
+        Int32Overflow    = 1 << 3,
+        Int52Overflow    = 1 << 4,
+    };
+
+    ResultType lhsResultType() const { return ResultType((m_bits >> lhsResultTypeShift) & resultTypeMask); }
+    ResultType rhsResultType() const { return ResultType((m_bits >> rhsResultTypeShift) & resultTypeMask); }
+
+    ObservedType lhsObservedType() const { return ObservedType((m_bits >> lhsObservedTypeShift) & observedTypeMask); }
+    ObservedType rhsObservedType() const { return ObservedType((m_bits >> rhsObservedTypeShift) & observedTypeMask); }
+    void setLhsObservedType(ObservedType type)
+    {
+        uint32_t bits = m_bits;
+        bits &= clearLhsObservedTypeBitMask;
+        bits |= type.bits() << lhsObservedTypeShift;
+        m_bits = bits;
+        ASSERT(lhsObservedType() == type);
+    }
+
+    void setRhsObservedType(ObservedType type)
+    { 
+        uint32_t bits = m_bits;
+        bits &= clearRhsObservedTypeBitMask;
+        bits |= type.bits() << rhsObservedTypeShift;
+        m_bits = bits;
+        ASSERT(rhsObservedType() == type);
+    }
+
+    bool tookSpecialFastPath() const { return m_bits & specialFastPathBit; }
+
+    bool didObserveNonInt32() const { return hasBits(NonNegZeroDouble | NegZeroDouble | NonNumber); }
+    bool didObserveDouble() const { return hasBits(NonNegZeroDouble | NegZeroDouble); }
+    bool didObserveNonNegZeroDouble() const { return hasBits(NonNegZeroDouble); }
+    bool didObserveNegZeroDouble() const { return hasBits(NegZeroDouble); }
+    bool didObserveNonNumber() const { return hasBits(NonNumber); }
+    bool didObserveInt32Overflow() const { return hasBits(Int32Overflow); }
+    bool didObserveInt52Overflow() const { return hasBits(Int52Overflow); }
+
+    void setObservedNonNegZeroDouble() { setBit(NonNegZeroDouble); }
+    void setObservedNegZeroDouble() { setBit(NegZeroDouble); }
+    void setObservedNonNumber() { setBit(NonNumber); }
+    void setObservedInt32Overflow() { setBit(Int32Overflow); }
+    void setObservedInt52Overflow() { setBit(Int52Overflow); }
+
+    const void* addressOfBits() const { return &m_bits; }
+
+    void observeResult(JSValue value)
+    {
+        if (value.isInt32())
+            return;
+        if (value.isNumber()) {
+            m_bits |= Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble;
+            return;
+        }
+        m_bits |= NonNumber;
+    }
+
+    void lhsSawInt32() { setLhsObservedType(lhsObservedType().withInt32()); }
+    void lhsSawNumber() { setLhsObservedType(lhsObservedType().withNumber()); }
+    void lhsSawNonNumber() { setLhsObservedType(lhsObservedType().withNonNumber()); }
+    void rhsSawInt32() { setRhsObservedType(rhsObservedType().withInt32()); }
+    void rhsSawNumber() { setRhsObservedType(rhsObservedType().withNumber()); }
+    void rhsSawNonNumber() { setRhsObservedType(rhsObservedType().withNonNumber()); }
+
+    void observeLHS(JSValue lhs)
+    {
+        ArithProfile newProfile = *this;
+        if (lhs.isNumber()) {
+            if (lhs.isInt32())
+                newProfile.lhsSawInt32();
+            else
+                newProfile.lhsSawNumber();
+        } else
+            newProfile.lhsSawNonNumber();
+
+        m_bits = newProfile.bits();
+    }
+
+    void observeLHSAndRHS(JSValue lhs, JSValue rhs)
+    {
+        observeLHS(lhs);
+
+        ArithProfile newProfile = *this;
+        if (rhs.isNumber()) {
+            if (rhs.isInt32())
+                newProfile.rhsSawInt32();
+            else
+                newProfile.rhsSawNumber();
+        } else
+            newProfile.rhsSawNonNumber();
+
+        m_bits = newProfile.bits();
+    }
+
+#if ENABLE(JIT)    
+    // Sets (Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble) if it sees a
+    // double. Sets NonNumber if it sees a non-number.
+    void emitObserveResult(CCallHelpers&, JSValueRegs, TagRegistersMode = HaveTagRegisters);
+    
+    // Sets (Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble).
+    bool shouldEmitSetDouble() const;
+    void emitSetDouble(CCallHelpers&) const;
+    
+    // Sets NonNumber.
+    void emitSetNonNumber(CCallHelpers&) const;
+    bool shouldEmitSetNonNumber() const;
+#endif // ENABLE(JIT)
+
+    uint32_t bits() const { return m_bits; }
+
+private:
+    bool hasBits(int mask) const { return m_bits & mask; }
+    void setBit(int mask) { m_bits |= mask; }
+
+    uint32_t m_bits { 0 }; // We take care to update m_bits only in a single operation. We don't ever store an inconsistent bit representation to it.
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, const JSC::ArithProfile&);
+void printInternal(PrintStream&, const JSC::ObservedType&);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
index 4a008e083..905b5bd3c 100644
--- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
@@ -26,7 +26,7 @@
 #include "config.h"
 #include "ArrayAllocationProfile.h"
 
-#include "Operations.h"
+#include "JSCInlines.h"
 
 namespace JSC {
 
@@ -49,7 +49,7 @@ void ArrayAllocationProfile::updateIndexingType()
     JSArray* lastArray = m_lastArray;
     if (!lastArray)
         return;
-    m_currentIndexingType = leastUpperBoundOfIndexingTypes(m_currentIndexingType, lastArray->structure()->indexingType());
+    m_currentIndexingType = leastUpperBoundOfIndexingTypes(m_currentIndexingType, lastArray->indexingType());
     m_lastArray = 0;
 }
 
diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h
index f77b92a2f..cf30de6b9 100644
--- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h
+++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ArrayAllocationProfile_h
-#define ArrayAllocationProfile_h
+#pragma once
 
 #include "IndexingType.h"
 #include "JSArray.h"
@@ -42,7 +41,7 @@ public:
     IndexingType selectIndexingType()
     {
         JSArray* lastArray = m_lastArray;
-        if (lastArray && UNLIKELY(lastArray->structure()->indexingType() != m_currentIndexingType))
+        if (lastArray && UNLIKELY(lastArray->indexingType() != m_currentIndexingType))
             updateIndexingType();
         return m_currentIndexingType;
     }
@@ -76,6 +75,3 @@ private:
 };
 
 } // namespace JSC
-
-#endif // ArrayAllocationProfile_h
-
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
index 4c055fea5..3146b18f8 100644
--- a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
@@ -27,6 +27,7 @@
 #include "ArrayProfile.h"
 
 #include "CodeBlock.h"
+#include "JSCInlines.h"
 #include 
 #include 
 #include 
@@ -72,37 +73,62 @@ void dumpArrayModes(PrintStream& out, ArrayModes arrayModes)
         out.print(comma, "ArrayWithArrayStorage");
     if (arrayModes & asArrayModes(ArrayWithSlowPutArrayStorage))
         out.print(comma, "ArrayWithSlowPutArrayStorage");
+
+    if (arrayModes & Int8ArrayMode)
+        out.print(comma, "Int8ArrayMode");
+    if (arrayModes & Int16ArrayMode)
+        out.print(comma, "Int16ArrayMode");
+    if (arrayModes & Int32ArrayMode)
+        out.print(comma, "Int32ArrayMode");
+    if (arrayModes & Uint8ArrayMode)
+        out.print(comma, "Uint8ArrayMode");
+    if (arrayModes & Uint8ClampedArrayMode)
+        out.print(comma, "Uint8ClampedArrayMode");
+    if (arrayModes & Uint16ArrayMode)
+        out.print(comma, "Uint16ArrayMode");
+    if (arrayModes & Uint32ArrayMode)
+        out.print(comma, "Uint32ArrayMode");
+    if (arrayModes & Float32ArrayMode)
+        out.print(comma, "Float32ArrayMode");
+    if (arrayModes & Float64ArrayMode)
+        out.print(comma, "Float64ArrayMode");
 }
 
-void ArrayProfile::computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock* codeBlock)
+void ArrayProfile::computeUpdatedPrediction(const ConcurrentJSLocker& locker, CodeBlock* codeBlock)
 {
-    if (!m_lastSeenStructure)
+    if (!m_lastSeenStructureID)
         return;
     
-    m_observedArrayModes |= arrayModeFromStructure(m_lastSeenStructure);
+    Structure* lastSeenStructure = codeBlock->heap()->structureIDTable().get(m_lastSeenStructureID);
+    computeUpdatedPrediction(locker, codeBlock, lastSeenStructure);
+    m_lastSeenStructureID = 0;
+}
+
+void ArrayProfile::computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock* codeBlock, Structure* lastSeenStructure)
+{
+    m_observedArrayModes |= arrayModeFromStructure(lastSeenStructure);
     
     if (!m_didPerformFirstRunPruning
         && hasTwoOrMoreBitsSet(m_observedArrayModes)) {
-        m_observedArrayModes = arrayModeFromStructure(m_lastSeenStructure);
+        m_observedArrayModes = arrayModeFromStructure(lastSeenStructure);
         m_didPerformFirstRunPruning = true;
     }
     
     m_mayInterceptIndexedAccesses |=
-        m_lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero();
+        lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero();
     JSGlobalObject* globalObject = codeBlock->globalObject();
-    if (!globalObject->isOriginalArrayStructure(m_lastSeenStructure)
-        && !globalObject->isOriginalTypedArrayStructure(m_lastSeenStructure))
+    if (!globalObject->isOriginalArrayStructure(lastSeenStructure)
+        && !globalObject->isOriginalTypedArrayStructure(lastSeenStructure))
         m_usesOriginalArrayStructures = false;
-    m_lastSeenStructure = 0;
 }
 
-CString ArrayProfile::briefDescription(const ConcurrentJITLocker& locker, CodeBlock* codeBlock)
+CString ArrayProfile::briefDescription(const ConcurrentJSLocker& locker, CodeBlock* codeBlock)
 {
     computeUpdatedPrediction(locker, codeBlock);
     return briefDescriptionWithoutUpdating(locker);
 }
 
-CString ArrayProfile::briefDescriptionWithoutUpdating(const ConcurrentJITLocker&)
+CString ArrayProfile::briefDescriptionWithoutUpdating(const ConcurrentJSLocker&)
 {
     StringPrintStream out;
     
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.h b/Source/JavaScriptCore/bytecode/ArrayProfile.h
index c23230e06..279906de1 100644
--- a/Source/JavaScriptCore/bytecode/ArrayProfile.h
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.h
@@ -23,13 +23,11 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ArrayProfile_h
-#define ArrayProfile_h
+#pragma once
 
-#include "ConcurrentJITLock.h"
+#include "ConcurrentJSLock.h"
 #include "JSArray.h"
 #include "Structure.h"
-#include 
 #include 
 
 namespace JSC {
@@ -37,20 +35,44 @@ namespace JSC {
 class CodeBlock;
 class LLIntOffsetsExtractor;
 
-// This is a bitfield where each bit represents an IndexingType that we have seen.
-// There are 32 indexing types, so an unsigned is enough.
+// This is a bitfield where each bit represents an type of array access that we have seen.
+// There are 16 indexing types that use the lower bits.
+// There are 9 typed array types taking the bits 16 to 25.
 typedef unsigned ArrayModes;
 
+const ArrayModes Int8ArrayMode = 1 << 16;
+const ArrayModes Int16ArrayMode = 1 << 17;
+const ArrayModes Int32ArrayMode = 1 << 18;
+const ArrayModes Uint8ArrayMode = 1 << 19;
+const ArrayModes Uint8ClampedArrayMode = 1 << 20;
+const ArrayModes Uint16ArrayMode = 1 << 21;
+const ArrayModes Uint32ArrayMode = 1 << 22;
+const ArrayModes Float32ArrayMode = 1 << 23;
+const ArrayModes Float64ArrayMode = 1 << 24;
+
 #define asArrayModes(type) \
     (static_cast(1) << static_cast(type))
 
+#define ALL_TYPED_ARRAY_MODES \
+    (Int8ArrayMode            \
+    | Int16ArrayMode          \
+    | Int32ArrayMode          \
+    | Uint8ArrayMode          \
+    | Uint8ClampedArrayMode   \
+    | Uint16ArrayMode         \
+    | Uint32ArrayMode         \
+    | Float32ArrayMode        \
+    | Float64ArrayMode        \
+    )
+
 #define ALL_NON_ARRAY_ARRAY_MODES                       \
     (asArrayModes(NonArray)                             \
     | asArrayModes(NonArrayWithInt32)                   \
     | asArrayModes(NonArrayWithDouble)                  \
     | asArrayModes(NonArrayWithContiguous)              \
     | asArrayModes(NonArrayWithArrayStorage)            \
-    | asArrayModes(NonArrayWithSlowPutArrayStorage))
+    | asArrayModes(NonArrayWithSlowPutArrayStorage)     \
+    | ALL_TYPED_ARRAY_MODES)
 
 #define ALL_ARRAY_ARRAY_MODES                           \
     (asArrayModes(ArrayClass)                           \
@@ -65,6 +87,29 @@ typedef unsigned ArrayModes;
 
 inline ArrayModes arrayModeFromStructure(Structure* structure)
 {
+    switch (structure->classInfo()->typedArrayStorageType) {
+    case TypeInt8:
+        return Int8ArrayMode;
+    case TypeUint8:
+        return Uint8ArrayMode;
+    case TypeUint8Clamped:
+        return Uint8ClampedArrayMode;
+    case TypeInt16:
+        return Int16ArrayMode;
+    case TypeUint16:
+        return Uint16ArrayMode;
+    case TypeInt32:
+        return Int32ArrayMode;
+    case TypeUint32:
+        return Uint32ArrayMode;
+    case TypeFloat32:
+        return Float32ArrayMode;
+    case TypeFloat64:
+        return Float64ArrayMode;
+    case TypeDataView:
+    case NotTypedArray:
+        break;
+    }
     return asArrayModes(structure->indexingType());
 }
 
@@ -135,7 +180,7 @@ class ArrayProfile {
 public:
     ArrayProfile()
         : m_bytecodeOffset(std::numeric_limits::max())
-        , m_lastSeenStructure(0)
+        , m_lastSeenStructureID(0)
         , m_mayStoreToHole(false)
         , m_outOfBounds(false)
         , m_mayInterceptIndexedAccesses(false)
@@ -147,7 +192,7 @@ public:
     
     ArrayProfile(unsigned bytecodeOffset)
         : m_bytecodeOffset(bytecodeOffset)
-        , m_lastSeenStructure(0)
+        , m_lastSeenStructureID(0)
         , m_mayStoreToHole(false)
         , m_outOfBounds(false)
         , m_mayInterceptIndexedAccesses(false)
@@ -159,28 +204,31 @@ public:
     
     unsigned bytecodeOffset() const { return m_bytecodeOffset; }
     
-    Structure** addressOfLastSeenStructure() { return &m_lastSeenStructure; }
+    StructureID* addressOfLastSeenStructureID() { return &m_lastSeenStructureID; }
     ArrayModes* addressOfArrayModes() { return &m_observedArrayModes; }
     bool* addressOfMayStoreToHole() { return &m_mayStoreToHole; }
+
+    void setOutOfBounds() { m_outOfBounds = true; }
     bool* addressOfOutOfBounds() { return &m_outOfBounds; }
     
     void observeStructure(Structure* structure)
     {
-        m_lastSeenStructure = structure;
+        m_lastSeenStructureID = structure->id();
     }
     
-    void computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock*);
+    void computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock*);
+    void computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock*, Structure* lastSeenStructure);
     
-    ArrayModes observedArrayModes(const ConcurrentJITLocker&) const { return m_observedArrayModes; }
-    bool mayInterceptIndexedAccesses(const ConcurrentJITLocker&) const { return m_mayInterceptIndexedAccesses; }
+    ArrayModes observedArrayModes(const ConcurrentJSLocker&) const { return m_observedArrayModes; }
+    bool mayInterceptIndexedAccesses(const ConcurrentJSLocker&) const { return m_mayInterceptIndexedAccesses; }
     
-    bool mayStoreToHole(const ConcurrentJITLocker&) const { return m_mayStoreToHole; }
-    bool outOfBounds(const ConcurrentJITLocker&) const { return m_outOfBounds; }
+    bool mayStoreToHole(const ConcurrentJSLocker&) const { return m_mayStoreToHole; }
+    bool outOfBounds(const ConcurrentJSLocker&) const { return m_outOfBounds; }
     
-    bool usesOriginalArrayStructures(const ConcurrentJITLocker&) const { return m_usesOriginalArrayStructures; }
+    bool usesOriginalArrayStructures(const ConcurrentJSLocker&) const { return m_usesOriginalArrayStructures; }
     
-    CString briefDescription(const ConcurrentJITLocker&, CodeBlock*);
-    CString briefDescriptionWithoutUpdating(const ConcurrentJITLocker&);
+    CString briefDescription(const ConcurrentJSLocker&, CodeBlock*);
+    CString briefDescriptionWithoutUpdating(const ConcurrentJSLocker&);
     
 private:
     friend class LLIntOffsetsExtractor;
@@ -188,7 +236,7 @@ private:
     static Structure* polymorphicStructure() { return static_cast(reinterpret_cast(1)); }
     
     unsigned m_bytecodeOffset;
-    Structure* m_lastSeenStructure;
+    StructureID m_lastSeenStructureID;
     bool m_mayStoreToHole; // This flag may become overloaded to indicate other special cases that were encountered during array access, as it depends on indexing type. Since we currently have basically just one indexing type (two variants of ArrayStorage), this flag for now just means exactly what its name implies.
     bool m_outOfBounds;
     bool m_mayInterceptIndexedAccesses : 1;
@@ -197,9 +245,6 @@ private:
     ArrayModes m_observedArrayModes;
 };
 
-typedef SegmentedVector ArrayProfileVector;
+typedef SegmentedVector ArrayProfileVector;
 
 } // namespace JSC
-
-#endif // ArrayProfile_h
-
diff --git a/Source/JavaScriptCore/bytecode/ByValInfo.h b/Source/JavaScriptCore/bytecode/ByValInfo.h
index 35fae0c60..e5fa70858 100644
--- a/Source/JavaScriptCore/bytecode/ByValInfo.h
+++ b/Source/JavaScriptCore/bytecode/ByValInfo.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,26 +23,30 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ByValInfo_h
-#define ByValInfo_h
-
-#include 
-
-#if ENABLE(JIT)
+#pragma once
 
 #include "ClassInfo.h"
 #include "CodeLocation.h"
+#include "CodeOrigin.h"
 #include "IndexingType.h"
 #include "JITStubRoutine.h"
 #include "Structure.h"
 
 namespace JSC {
 
+class Symbol;
+
+#if ENABLE(JIT)
+
+class StructureStubInfo;
+
 enum JITArrayMode {
     JITInt32,
     JITDouble,
     JITContiguous,
     JITArrayStorage,
+    JITDirectArguments,
+    JITScopedArguments,
     JITInt8Array,
     JITInt16Array,
     JITInt32Array,
@@ -67,6 +71,17 @@ inline bool isOptimizableIndexingType(IndexingType indexingType)
     }
 }
 
+inline bool hasOptimizableIndexingForJSType(JSType type)
+{
+    switch (type) {
+    case DirectArgumentsType:
+    case ScopedArgumentsType:
+        return true;
+    default:
+        return false;
+    }
+}
+
 inline bool hasOptimizableIndexingForClassInfo(const ClassInfo* classInfo)
 {
     return isTypedView(classInfo->typedArrayStorageType);
@@ -75,6 +90,7 @@ inline bool hasOptimizableIndexingForClassInfo(const ClassInfo* classInfo)
 inline bool hasOptimizableIndexing(Structure* structure)
 {
     return isOptimizableIndexingType(structure->indexingType())
+        || hasOptimizableIndexingForJSType(structure->typeInfo().type())
         || hasOptimizableIndexingForClassInfo(structure->classInfo());
 }
 
@@ -95,6 +111,19 @@ inline JITArrayMode jitArrayModeForIndexingType(IndexingType indexingType)
     }
 }
 
+inline JITArrayMode jitArrayModeForJSType(JSType type)
+{
+    switch (type) {
+    case DirectArgumentsType:
+        return JITDirectArguments;
+    case ScopedArgumentsType:
+        return JITScopedArguments;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return JITContiguous;
+    }
+}
+
 inline JITArrayMode jitArrayModeForClassInfo(const ClassInfo* classInfo)
 {
     switch (classInfo->typedArrayStorageType) {
@@ -122,6 +151,19 @@ inline JITArrayMode jitArrayModeForClassInfo(const ClassInfo* classInfo)
     }
 }
 
+inline bool jitArrayModePermitsPut(JITArrayMode mode)
+{
+    switch (mode) {
+    case JITDirectArguments:
+    case JITScopedArguments:
+        // We could support put_by_val on these at some point, but it's just not that profitable
+        // at the moment.
+        return false;
+    default:
+        return true;
+    }
+}
+
 inline TypedArrayType typedArrayTypeForJITArrayMode(JITArrayMode mode)
 {
     switch (mode) {
@@ -154,30 +196,49 @@ inline JITArrayMode jitArrayModeForStructure(Structure* structure)
     if (isOptimizableIndexingType(structure->indexingType()))
         return jitArrayModeForIndexingType(structure->indexingType());
     
+    if (hasOptimizableIndexingForJSType(structure->typeInfo().type()))
+        return jitArrayModeForJSType(structure->typeInfo().type());
+    
     ASSERT(hasOptimizableIndexingForClassInfo(structure->classInfo()));
     return jitArrayModeForClassInfo(structure->classInfo());
 }
 
 struct ByValInfo {
     ByValInfo() { }
-    
-    ByValInfo(unsigned bytecodeIndex, CodeLocationJump badTypeJump, JITArrayMode arrayMode, int16_t badTypeJumpToDone, int16_t returnAddressToSlowPath)
+
+    ByValInfo(unsigned bytecodeIndex, CodeLocationJump notIndexJump, CodeLocationJump badTypeJump, CodeLocationLabel exceptionHandler, JITArrayMode arrayMode, ArrayProfile* arrayProfile, int16_t badTypeJumpToDone, int16_t badTypeJumpToNextHotPath, int16_t returnAddressToSlowPath)
         : bytecodeIndex(bytecodeIndex)
+        , notIndexJump(notIndexJump)
         , badTypeJump(badTypeJump)
+        , exceptionHandler(exceptionHandler)
         , arrayMode(arrayMode)
+        , arrayProfile(arrayProfile)
         , badTypeJumpToDone(badTypeJumpToDone)
+        , badTypeJumpToNextHotPath(badTypeJumpToNextHotPath)
         , returnAddressToSlowPath(returnAddressToSlowPath)
         , slowPathCount(0)
+        , stubInfo(nullptr)
+        , tookSlowPath(false)
+        , seen(false)
     {
     }
-    
+
     unsigned bytecodeIndex;
+    CodeLocationJump notIndexJump;
     CodeLocationJump badTypeJump;
+    CodeLocationLabel exceptionHandler;
     JITArrayMode arrayMode; // The array mode that was baked into the inline JIT code.
+    ArrayProfile* arrayProfile;
     int16_t badTypeJumpToDone;
+    int16_t badTypeJumpToNextHotPath;
     int16_t returnAddressToSlowPath;
     unsigned slowPathCount;
     RefPtr stubRoutine;
+    Identifier cachedId;
+    WriteBarrier cachedSymbol;
+    StructureStubInfo* stubInfo;
+    bool tookSlowPath : 1;
+    bool seen : 1;
 };
 
 inline unsigned getByValInfoBytecodeIndex(ByValInfo* info)
@@ -185,9 +246,12 @@ inline unsigned getByValInfoBytecodeIndex(ByValInfo* info)
     return info->bytecodeIndex;
 }
 
-} // namespace JSC
+typedef HashMap ByValInfoMap;
 
-#endif // ENABLE(JIT)
+#else // ENABLE(JIT)
+
+typedef HashMap ByValInfoMap;
 
-#endif // ByValInfo_h
+#endif // ENABLE(JIT)
 
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
index d7489d31a..47c481d5d 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -27,120 +27,67 @@
 #include "BytecodeBasicBlock.h"
 
 #include "CodeBlock.h"
+#include "InterpreterInlines.h"
+#include "JSCInlines.h"
 #include "PreciseJumpTargets.h"
 
 namespace JSC {
 
-static bool isBranch(OpcodeID opcodeID)
+void BytecodeBasicBlock::shrinkToFit()
 {
-    switch (opcodeID) {
-    case op_jmp:
-    case op_jtrue:
-    case op_jfalse:
-    case op_jeq_null:
-    case op_jneq_null:
-    case op_jneq_ptr:
-    case op_jless:
-    case op_jlesseq:
-    case op_jgreater:
-    case op_jgreatereq:
-    case op_jnless:
-    case op_jnlesseq:
-    case op_jngreater:
-    case op_jngreatereq:
-    case op_switch_imm:
-    case op_switch_char:
-    case op_switch_string:
-    case op_get_pnames:
-    case op_next_pname:
-    case op_check_has_instance:
-        return true;
-    default:
-        return false;
-    }
+    m_offsets.shrinkToFit();
+    m_successors.shrinkToFit();
 }
 
-static bool isUnconditionalBranch(OpcodeID opcodeID)
+static bool isJumpTarget(OpcodeID opcodeID, const Vector& jumpTargets, unsigned bytecodeOffset)
 {
-    switch (opcodeID) {
-    case op_jmp:
+    if (opcodeID == op_catch)
         return true;
-    default:
-        return false;
-    }
-}
 
-static bool isTerminal(OpcodeID opcodeID)
-{
-    switch (opcodeID) {
-    case op_ret:
-    case op_ret_object_or_this:
-    case op_end:
-        return true;
-    default:
-        return false;
-    }
+    return std::binary_search(jumpTargets.begin(), jumpTargets.end(), bytecodeOffset);
 }
 
-static bool isThrow(OpcodeID opcodeID)
+template
+void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector>& basicBlocks)
 {
-    switch (opcodeID) {
-    case op_throw:
-    case op_throw_static_error:
-        return true;
-    default:
-        return false;
-    }
-}
+    Vector jumpTargets;
+    computePreciseJumpTargets(codeBlock, instructionsBegin, instructionCount, jumpTargets);
 
-static bool isJumpTarget(OpcodeID opcodeID, Vector& jumpTargets, unsigned bytecodeOffset)
-{
-    if (opcodeID == op_catch)
-        return true;
+    auto appendBlock = [&] (std::unique_ptr&& block) {
+        block->m_index = basicBlocks.size();
+        basicBlocks.append(WTFMove(block));
+    };
 
-    for (unsigned i = 0; i < jumpTargets.size(); i++) {
-        if (bytecodeOffset == jumpTargets[i])
-            return true;
-    }
-    return false;
-}
+    auto linkBlocks = [&] (BytecodeBasicBlock* from, BytecodeBasicBlock* to) {
+        from->addSuccessor(to);
+    };
 
-static void linkBlocks(BytecodeBasicBlock* predecessor, BytecodeBasicBlock* successor)
-{
-    predecessor->addSuccessor(successor);
-    successor->addPredecessor(predecessor);
-}
+    // Create the entry and exit basic blocks.
+    basicBlocks.reserveCapacity(jumpTargets.size() + 2);
 
-void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector >& basicBlocks)
-{
-    Vector jumpTargets;
-    computePreciseJumpTargets(codeBlock, jumpTargets);
+    auto entry = std::make_unique(BytecodeBasicBlock::EntryBlock);
+    auto firstBlock = std::make_unique(0, 0);
+    linkBlocks(entry.get(), firstBlock.get());
 
-    // Create the entry and exit basic blocks.
-    BytecodeBasicBlock* entry = new BytecodeBasicBlock(BytecodeBasicBlock::EntryBlock);
-    basicBlocks.append(adoptRef(entry));
-    BytecodeBasicBlock* exit = new BytecodeBasicBlock(BytecodeBasicBlock::ExitBlock);
+    appendBlock(WTFMove(entry));
+    BytecodeBasicBlock* current = firstBlock.get();
+    appendBlock(WTFMove(firstBlock));
 
-    // Find basic block boundaries.
-    BytecodeBasicBlock* current = new BytecodeBasicBlock(0, 0);
-    linkBlocks(entry, current);
-    basicBlocks.append(adoptRef(current));
+    auto exit = std::make_unique(BytecodeBasicBlock::ExitBlock);
 
     bool nextInstructionIsLeader = false;
 
     Interpreter* interpreter = codeBlock->vm()->interpreter;
-    Instruction* instructionsBegin = codeBlock->instructions().begin();
-    unsigned instructionCount = codeBlock->instructions().size();
     for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) {
-        OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
+        OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
         unsigned opcodeLength = opcodeLengths[opcodeID];
 
         bool createdBlock = false;
         // If the current bytecode is a jump target, then it's the leader of its own basic block.
         if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) {
-            BytecodeBasicBlock* block = new BytecodeBasicBlock(bytecodeOffset, opcodeLength);
-            basicBlocks.append(adoptRef(block));
-            current = block;
+            auto newBlock = std::make_unique(bytecodeOffset, opcodeLength);
+            current = newBlock.get();
+            appendBlock(WTFMove(newBlock));
             createdBlock = true;
             nextInstructionIsLeader = false;
             bytecodeOffset += opcodeLength;
@@ -154,7 +101,7 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, VectoraddBytecodeLength(opcodeLength);
+        current->addLength(opcodeLength);
         bytecodeOffset += opcodeLength;
     }
 
@@ -166,14 +113,13 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, VectorleaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) {
-            const Instruction& currentInstruction = instructionsBegin[bytecodeOffset];
-            OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction.u.opcode);
+        for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
+            OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
             unsigned opcodeLength = opcodeLengths[opcodeID];
             // If we found a terminal bytecode, link to the exit block.
             if (isTerminal(opcodeID)) {
-                ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength());
-                linkBlocks(block, exit);
+                ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength());
+                linkBlocks(block, exit.get());
                 fallsThrough = false;
                 break;
             }
@@ -182,16 +128,16 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, VectorleaderBytecodeOffset() + block->totalBytecodeLength());
-                HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset);
+                ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength());
+                auto* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset);
                 fallsThrough = false;
                 if (!handler) {
-                    linkBlocks(block, exit);
+                    linkBlocks(block, exit.get());
                     break;
                 }
                 for (unsigned i = 0; i < basicBlocks.size(); i++) {
                     BytecodeBasicBlock* otherBlock = basicBlocks[i].get();
-                    if (handler->target == otherBlock->leaderBytecodeOffset()) {
+                    if (handler->target == otherBlock->leaderOffset()) {
                         linkBlocks(block, otherBlock);
                         break;
                     }
@@ -201,15 +147,26 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, VectorleaderBytecodeOffset() + block->totalBytecodeLength());
+                ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength());
                 Vector bytecodeOffsetsJumpedTo;
-                findJumpTargetsForBytecodeOffset(codeBlock, bytecodeOffset, bytecodeOffsetsJumpedTo);
+                findJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, bytecodeOffsetsJumpedTo);
 
+                size_t numberOfJumpTargets = bytecodeOffsetsJumpedTo.size();
+                ASSERT(numberOfJumpTargets);
                 for (unsigned i = 0; i < basicBlocks.size(); i++) {
                     BytecodeBasicBlock* otherBlock = basicBlocks[i].get();
-                    if (bytecodeOffsetsJumpedTo.contains(otherBlock->leaderBytecodeOffset()))
+                    if (bytecodeOffsetsJumpedTo.contains(otherBlock->leaderOffset())) {
                         linkBlocks(block, otherBlock);
+                        --numberOfJumpTargets;
+                        if (!numberOfJumpTargets)
+                            break;
+                    }
                 }
+                // numberOfJumpTargets may not be 0 here if there are multiple jumps targeting the same
+                // basic blocks (e.g. in a switch type opcode). Since we only decrement numberOfJumpTargets
+                // once per basic block, the duplicates are not accounted for. For our purpose here,
+                // that doesn't matter because we only need to link to the target block once regardless
+                // of how many ways this block can jump there.
 
                 if (isUnconditionalBranch(opcodeID))
                     fallsThrough = false;
@@ -227,7 +184,20 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, VectorshrinkToFit();
+}
+
+void BytecodeBasicBlock::compute(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector>& basicBlocks)
+{
+    computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks);
+}
+
+void BytecodeBasicBlock::compute(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector>& basicBlocks)
+{
+    BytecodeBasicBlock::computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks);
 }
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h
index 736ba8540..fb81650ca 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,77 +23,80 @@
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef BytecodeBasicBlock_h
-#define BytecodeBasicBlock_h
+#pragma once
 
 #include 
 #include 
-#include 
-#include 
 #include 
 
 namespace JSC {
 
 class CodeBlock;
+class UnlinkedCodeBlock;
+struct Instruction;
+struct UnlinkedInstruction;
 
-class BytecodeBasicBlock : public RefCounted {
+class BytecodeBasicBlock {
+    WTF_MAKE_FAST_ALLOCATED;
 public:
     enum SpecialBlockType { EntryBlock, ExitBlock };
     BytecodeBasicBlock(unsigned start, unsigned length);
     BytecodeBasicBlock(SpecialBlockType);
+    void shrinkToFit();
 
-    bool isEntryBlock() { return !m_leaderBytecodeOffset && !m_totalBytecodeLength; }
-    bool isExitBlock() { return m_leaderBytecodeOffset == UINT_MAX && m_totalBytecodeLength == UINT_MAX; }
+    bool isEntryBlock() { return !m_leaderOffset && !m_totalLength; }
+    bool isExitBlock() { return m_leaderOffset == UINT_MAX && m_totalLength == UINT_MAX; }
 
-    unsigned leaderBytecodeOffset() { return m_leaderBytecodeOffset; }
-    unsigned totalBytecodeLength() { return m_totalBytecodeLength; }
+    unsigned leaderOffset() { return m_leaderOffset; }
+    unsigned totalLength() { return m_totalLength; }
 
-    Vector& bytecodeOffsets() { return m_bytecodeOffsets; }
-    void addBytecodeLength(unsigned);
+    const Vector& offsets() const { return m_offsets; }
 
-    void addPredecessor(BytecodeBasicBlock* block) { m_predecessors.append(block); }
-    void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); }
-
-    Vector& predecessors() { return m_predecessors; }
-    Vector& successors() { return m_successors; }
+    const Vector& successors() const { return m_successors; }
 
     FastBitVector& in() { return m_in; }
     FastBitVector& out() { return m_out; }
 
+    unsigned index() const { return m_index; }
+
+    static void compute(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector>&);
+    static void compute(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector>&);
+
 private:
-    unsigned m_leaderBytecodeOffset;
-    unsigned m_totalBytecodeLength;
+    template static void computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector>& basicBlocks);
 
-    Vector m_bytecodeOffsets;
+    void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); }
 
-    Vector m_predecessors;
+    void addLength(unsigned);
+
+    unsigned m_leaderOffset;
+    unsigned m_totalLength;
+    unsigned m_index;
+
+    Vector m_offsets;
     Vector m_successors;
 
     FastBitVector m_in;
     FastBitVector m_out;
 };
 
-void computeBytecodeBasicBlocks(CodeBlock*, Vector >&);
-
 inline BytecodeBasicBlock::BytecodeBasicBlock(unsigned start, unsigned length)
-    : m_leaderBytecodeOffset(start)
-    , m_totalBytecodeLength(length)
+    : m_leaderOffset(start)
+    , m_totalLength(length)
 {
-    m_bytecodeOffsets.append(m_leaderBytecodeOffset);
+    m_offsets.append(m_leaderOffset);
 }
 
 inline BytecodeBasicBlock::BytecodeBasicBlock(BytecodeBasicBlock::SpecialBlockType blockType)
-    : m_leaderBytecodeOffset(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX)
-    , m_totalBytecodeLength(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX)
+    : m_leaderOffset(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX)
+    , m_totalLength(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX)
 {
 }
 
-inline void BytecodeBasicBlock::addBytecodeLength(unsigned bytecodeLength)
+inline void BytecodeBasicBlock::addLength(unsigned bytecodeLength)
 {
-    m_bytecodeOffsets.append(m_leaderBytecodeOffset + m_totalBytecodeLength);
-    m_totalBytecodeLength += bytecodeLength;
+    m_offsets.append(m_leaderOffset + m_totalLength);
+    m_totalLength += bytecodeLength;
 }
 
 } // namespace JSC
-
-#endif // BytecodeBasicBlock_h
diff --git a/Source/JavaScriptCore/bytecode/BytecodeConventions.h b/Source/JavaScriptCore/bytecode/BytecodeConventions.h
index e375f263c..7781378ce 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeConventions.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeConventions.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,14 +23,10 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef BytecodeConventions_h
-#define BytecodeConventions_h
+#pragma once
 
 // Register numbers used in bytecode operations have different meaning according to their ranges:
-//      0x80000000-0xFFFFFFFF  Negative indices from the CallFrame pointer are entries in the call frame, see JSStack.h.
+//      0x80000000-0xFFFFFFFF  Negative indices from the CallFrame pointer are entries in the call frame.
 //      0x00000000-0x3FFFFFFF  Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe.
 //      0x40000000-0x7FFFFFFF  Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock.
 static const int FirstConstantRegisterIndex = 0x40000000;
-
-#endif // BytecodeConventions_h
-
diff --git a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp
new file mode 100644
index 000000000..f7e1e9a3d
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeGeneratorification.h"
+
+#include "BytecodeLivenessAnalysisInlines.h"
+#include "BytecodeRewriter.h"
+#include "BytecodeUseDef.h"
+#include "IdentifierInlines.h"
+#include "InterpreterInlines.h"
+#include "JSCInlines.h"
+#include "JSCJSValueInlines.h"
+#include "JSGeneratorFunction.h"
+#include "StrongInlines.h"
+#include "UnlinkedCodeBlock.h"
+#include 
+
+namespace JSC {
+
+struct YieldData {
+    size_t point { 0 };
+    int argument { 0 };
+    FastBitVector liveness;
+};
+
+class BytecodeGeneratorification {
+public:
+    typedef Vector Yields;
+
+    BytecodeGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex)
+        : m_graph(codeBlock, instructions)
+        , m_generatorFrameSymbolTable(*codeBlock->vm(), generatorFrameSymbolTable)
+        , m_generatorFrameSymbolTableIndex(generatorFrameSymbolTableIndex)
+    {
+        for (BytecodeBasicBlock* block : m_graph) {
+            for (unsigned bytecodeOffset : block->offsets()) {
+                const UnlinkedInstruction* pc = &m_graph.instructions()[bytecodeOffset];
+                switch (pc->u.opcode) {
+                case op_enter: {
+                    m_enterPoint = bytecodeOffset;
+                    break;
+                }
+
+                case op_yield: {
+                    unsigned liveCalleeLocalsIndex = pc[2].u.index;
+                    if (liveCalleeLocalsIndex >= m_yields.size())
+                        m_yields.resize(liveCalleeLocalsIndex + 1);
+                    YieldData& data = m_yields[liveCalleeLocalsIndex];
+                    data.point = bytecodeOffset;
+                    data.argument = pc[3].u.operand;
+                    break;
+                }
+
+                default:
+                    break;
+                }
+            }
+        }
+    }
+
+    struct Storage {
+        Identifier identifier;
+        unsigned identifierIndex;
+        ScopeOffset scopeOffset;
+    };
+
+    void run();
+
+    BytecodeGraph& graph() { return m_graph; }
+
+    const Yields& yields() const
+    {
+        return m_yields;
+    }
+
+    Yields& yields()
+    {
+        return m_yields;
+    }
+
+    unsigned enterPoint() const
+    {
+        return m_enterPoint;
+    }
+
+private:
+    Storage storageForGeneratorLocal(unsigned index)
+    {
+        // We assign a symbol to a register. There is one-on-one corresponding between a register and a symbol.
+        // By doing so, we allocate the specific storage to save the given register.
+        // This allow us not to save all the live registers even if the registers are not overwritten from the previous resuming time.
+        // It means that, the register can be retrieved even if the immediate previous op_save does not save it.
+
+        if (m_storages.size() <= index)
+            m_storages.resize(index + 1);
+        if (std::optional storage = m_storages[index])
+            return *storage;
+
+        UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+        Identifier identifier = Identifier::fromUid(PrivateName());
+        unsigned identifierIndex = codeBlock->numberOfIdentifiers();
+        codeBlock->addIdentifier(identifier);
+        ScopeOffset scopeOffset = m_generatorFrameSymbolTable->takeNextScopeOffset(NoLockingNecessary);
+        m_generatorFrameSymbolTable->set(NoLockingNecessary, identifier.impl(), SymbolTableEntry(VarOffset(scopeOffset)));
+
+        Storage storage = {
+            identifier,
+            identifierIndex,
+            scopeOffset
+        };
+        m_storages[index] = storage;
+        return storage;
+    }
+
+    unsigned m_enterPoint { 0 };
+    BytecodeGraph m_graph;
+    Vector> m_storages;
+    Yields m_yields;
+    Strong m_generatorFrameSymbolTable;
+    int m_generatorFrameSymbolTableIndex;
+};
+
+class GeneratorLivenessAnalysis : public BytecodeLivenessPropagation {
+public:
+    GeneratorLivenessAnalysis(BytecodeGeneratorification& generatorification)
+        : m_generatorification(generatorification)
+    {
+    }
+
+    template
+    void computeDefsForBytecodeOffset(UnlinkedCodeBlock* codeBlock, OpcodeID opcodeID, UnlinkedInstruction* instruction, FastBitVector&, const Functor& functor)
+    {
+        JSC::computeDefsForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
+    }
+
+    template
+    void computeUsesForBytecodeOffset(UnlinkedCodeBlock* codeBlock, OpcodeID opcodeID, UnlinkedInstruction* instruction, FastBitVector&, const Functor& functor)
+    {
+        JSC::computeUsesForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
+    }
+
+    void run()
+    {
+        // Perform modified liveness analysis to determine which locals are live at the merge points.
+        // This produces the conservative results for the question, "which variables should be saved and resumed?".
+
+        runLivenessFixpoint(m_generatorification.graph());
+
+        for (YieldData& data : m_generatorification.yields())
+            data.liveness = getLivenessInfoAtBytecodeOffset(m_generatorification.graph(), data.point + opcodeLength(op_yield));
+    }
+
+private:
+    BytecodeGeneratorification& m_generatorification;
+};
+
+void BytecodeGeneratorification::run()
+{
+    // We calculate the liveness at each merge point. This gives us the information which registers should be saved and resumed conservatively.
+
+    {
+        GeneratorLivenessAnalysis pass(*this);
+        pass.run();
+    }
+
+    UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+    BytecodeRewriter rewriter(m_graph);
+
+    // Setup the global switch for the generator.
+    {
+        unsigned nextToEnterPoint = enterPoint() + opcodeLength(op_enter);
+        unsigned switchTableIndex = m_graph.codeBlock()->numberOfSwitchJumpTables();
+        VirtualRegister state = virtualRegisterForArgument(static_cast(JSGeneratorFunction::GeneratorArgument::State));
+        auto& jumpTable = m_graph.codeBlock()->addSwitchJumpTable();
+        jumpTable.min = 0;
+        jumpTable.branchOffsets.resize(m_yields.size() + 1);
+        jumpTable.branchOffsets.fill(0);
+        jumpTable.add(0, nextToEnterPoint);
+        for (unsigned i = 0; i < m_yields.size(); ++i)
+            jumpTable.add(i + 1, m_yields[i].point);
+
+        rewriter.insertFragmentBefore(nextToEnterPoint, [&](BytecodeRewriter::Fragment& fragment) {
+            fragment.appendInstruction(op_switch_imm, switchTableIndex, nextToEnterPoint, state.offset());
+        });
+    }
+
+    for (const YieldData& data : m_yields) {
+        VirtualRegister scope = virtualRegisterForArgument(static_cast(JSGeneratorFunction::GeneratorArgument::Frame));
+
+        // Emit save sequence.
+        rewriter.insertFragmentBefore(data.point, [&](BytecodeRewriter::Fragment& fragment) {
+            data.liveness.forEachSetBit([&](size_t index) {
+                VirtualRegister operand = virtualRegisterForLocal(index);
+                Storage storage = storageForGeneratorLocal(index);
+
+                fragment.appendInstruction(
+                    op_put_to_scope,
+                    scope.offset(), // scope
+                    storage.identifierIndex, // identifier
+                    operand.offset(), // value
+                    GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info
+                    m_generatorFrameSymbolTableIndex, // symbol table constant index
+                    storage.scopeOffset.offset() // scope offset
+                );
+            });
+
+            // Insert op_ret just after save sequence.
+            fragment.appendInstruction(op_ret, data.argument);
+        });
+
+        // Emit resume sequence.
+        rewriter.insertFragmentAfter(data.point, [&](BytecodeRewriter::Fragment& fragment) {
+            data.liveness.forEachSetBit([&](size_t index) {
+                VirtualRegister operand = virtualRegisterForLocal(index);
+                Storage storage = storageForGeneratorLocal(index);
+
+                UnlinkedValueProfile profile = codeBlock->addValueProfile();
+                fragment.appendInstruction(
+                    op_get_from_scope,
+                    operand.offset(), // dst
+                    scope.offset(), // scope
+                    storage.identifierIndex, // identifier
+                    GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info
+                    0, // local scope depth
+                    storage.scopeOffset.offset(), // scope offset
+                    profile // profile
+                );
+            });
+        });
+
+        // Clip the unnecessary bytecodes.
+        rewriter.removeBytecode(data.point);
+    }
+
+    rewriter.execute();
+}
+
+void performGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex)
+{
+    BytecodeGeneratorification pass(codeBlock, instructions, generatorFrameSymbolTable, generatorFrameSymbolTableIndex);
+    pass.run();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h
new file mode 100644
index 000000000..c7b613746
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedCodeBlock.h"
+
+namespace JSC {
+
+class SymbolTable;
+
+void performGeneratorification(UnlinkedCodeBlock*, UnlinkedCodeBlock::UnpackedInstructions&, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex);
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeGraph.h b/Source/JavaScriptCore/bytecode/BytecodeGraph.h
new file mode 100644
index 000000000..38a13c601
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeGraph.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeBasicBlock.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+class BytecodeBasicBlock;
+
+template
+class BytecodeGraph {
+    WTF_MAKE_FAST_ALLOCATED;
+    WTF_MAKE_NONCOPYABLE(BytecodeGraph);
+public:
+    typedef Block CodeBlock;
+    typedef typename Block::Instruction Instruction;
+    typedef Vector> BasicBlocksVector;
+
+    typedef WTF::IndexedContainerIterator> iterator;
+
+    inline BytecodeGraph(Block*, typename Block::UnpackedInstructions&);
+
+    Block* codeBlock() const { return m_codeBlock; }
+
+    typename Block::UnpackedInstructions& instructions() { return m_instructions; }
+
+    WTF::IteratorRange basicBlocksInReverseOrder()
+    {
+        return WTF::makeIteratorRange(m_basicBlocks.rbegin(), m_basicBlocks.rend());
+    }
+
+    static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned bytecodeOffset)
+    {
+        unsigned leaderOffset = block->leaderOffset();
+        return bytecodeOffset >= leaderOffset && bytecodeOffset < leaderOffset + block->totalLength();
+    }
+
+    BytecodeBasicBlock* findBasicBlockForBytecodeOffset(unsigned bytecodeOffset)
+    {
+        /*
+            for (unsigned i = 0; i < m_basicBlocks.size(); i++) {
+                if (blockContainsBytecodeOffset(m_basicBlocks[i].get(), bytecodeOffset))
+                    return m_basicBlocks[i].get();
+            }
+            return 0;
+        */
+
+        std::unique_ptr* basicBlock = approximateBinarySearch, unsigned>(m_basicBlocks, m_basicBlocks.size(), bytecodeOffset, [] (std::unique_ptr* basicBlock) { return (*basicBlock)->leaderOffset(); });
+        // We found the block we were looking for.
+        if (blockContainsBytecodeOffset((*basicBlock).get(), bytecodeOffset))
+            return (*basicBlock).get();
+
+        // Basic block is to the left of the returned block.
+        if (bytecodeOffset < (*basicBlock)->leaderOffset()) {
+            ASSERT(basicBlock - 1 >= m_basicBlocks.data());
+            ASSERT(blockContainsBytecodeOffset(basicBlock[-1].get(), bytecodeOffset));
+            return basicBlock[-1].get();
+        }
+
+        // Basic block is to the right of the returned block.
+        ASSERT(&basicBlock[1] <= &m_basicBlocks.last());
+        ASSERT(blockContainsBytecodeOffset(basicBlock[1].get(), bytecodeOffset));
+        return basicBlock[1].get();
+    }
+
+    BytecodeBasicBlock* findBasicBlockWithLeaderOffset(unsigned leaderOffset)
+    {
+        return (*tryBinarySearch, unsigned>(m_basicBlocks, m_basicBlocks.size(), leaderOffset, [] (std::unique_ptr* basicBlock) { return (*basicBlock)->leaderOffset(); })).get();
+    }
+
+    unsigned size() const { return m_basicBlocks.size(); }
+    BytecodeBasicBlock* at(unsigned index) const { return m_basicBlocks[index].get(); }
+    BytecodeBasicBlock* operator[](unsigned index) const { return at(index); }
+
+    iterator begin() const { return iterator(*this, 0); }
+    iterator end() const { return iterator(*this, size()); }
+    BytecodeBasicBlock* first() { return at(0); }
+    BytecodeBasicBlock* last() { return at(size() - 1); }
+
+private:
+    Block* m_codeBlock;
+    BasicBlocksVector m_basicBlocks;
+    typename Block::UnpackedInstructions& m_instructions;
+};
+
+
+template
+BytecodeGraph::BytecodeGraph(Block* codeBlock, typename Block::UnpackedInstructions& instructions)
+    : m_codeBlock(codeBlock)
+    , m_instructions(instructions)
+{
+    ASSERT(m_codeBlock);
+    BytecodeBasicBlock::compute(m_codeBlock, instructions.begin(), instructions.size(), m_basicBlocks);
+    ASSERT(m_basicBlocks.size());
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp
new file mode 100644
index 000000000..00c9c01bb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeIntrinsicRegistry.h"
+
+#include "ArrayIteratorPrototype.h"
+#include "BuiltinNames.h"
+#include "BytecodeGenerator.h"
+#include "JSCJSValueInlines.h"
+#include "JSGeneratorFunction.h"
+#include "JSModuleLoader.h"
+#include "JSPromise.h"
+#include "Nodes.h"
+#include "StrongInlines.h"
+
+namespace JSC {
+
+#define INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET(name) m_bytecodeIntrinsicMap.add(vm.propertyNames->builtinNames().name##PrivateName().impl(), &BytecodeIntrinsicNode::emit_intrinsic_##name);
+
+BytecodeIntrinsicRegistry::BytecodeIntrinsicRegistry(VM& vm)
+    : m_vm(vm)
+    , m_bytecodeIntrinsicMap()
+{
+    JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET)
+    JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET)
+
+    m_undefined.set(m_vm, jsUndefined());
+    m_Infinity.set(m_vm, jsDoubleNumber(std::numeric_limits::infinity()));
+    m_iterationKindKey.set(m_vm, jsNumber(IterateKey));
+    m_iterationKindValue.set(m_vm, jsNumber(IterateValue));
+    m_iterationKindKeyValue.set(m_vm, jsNumber(IterateKeyValue));
+    m_MAX_ARRAY_INDEX.set(m_vm, jsNumber(MAX_ARRAY_INDEX));
+    m_MAX_STRING_LENGTH.set(m_vm, jsNumber(JSString::MaxLength));
+    m_MAX_SAFE_INTEGER.set(m_vm, jsDoubleNumber(maxSafeInteger()));
+    m_ModuleFetch.set(m_vm, jsNumber(static_cast(JSModuleLoader::Status::Fetch)));
+    m_ModuleInstantiate.set(m_vm, jsNumber(static_cast(JSModuleLoader::Status::Instantiate)));
+    m_ModuleSatisfy.set(m_vm, jsNumber(static_cast(JSModuleLoader::Status::Satisfy)));
+    m_ModuleLink.set(m_vm, jsNumber(static_cast(JSModuleLoader::Status::Link)));
+    m_ModuleReady.set(m_vm, jsNumber(static_cast(JSModuleLoader::Status::Ready)));
+    m_promiseStatePending.set(m_vm, jsNumber(static_cast(JSPromise::Status::Pending)));
+    m_promiseStateFulfilled.set(m_vm, jsNumber(static_cast(JSPromise::Status::Fulfilled)));
+    m_promiseStateRejected.set(m_vm, jsNumber(static_cast(JSPromise::Status::Rejected)));
+    m_GeneratorResumeModeNormal.set(m_vm, jsNumber(static_cast(JSGeneratorFunction::GeneratorResumeMode::NormalMode)));
+    m_GeneratorResumeModeThrow.set(m_vm, jsNumber(static_cast(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)));
+    m_GeneratorResumeModeReturn.set(m_vm, jsNumber(static_cast(JSGeneratorFunction::GeneratorResumeMode::ReturnMode)));
+    m_GeneratorStateCompleted.set(m_vm, jsNumber(static_cast(JSGeneratorFunction::GeneratorState::Completed)));
+    m_GeneratorStateExecuting.set(m_vm, jsNumber(static_cast(JSGeneratorFunction::GeneratorState::Executing)));
+}
+
+BytecodeIntrinsicNode::EmitterType BytecodeIntrinsicRegistry::lookup(const Identifier& ident) const
+{
+    if (!m_vm.propertyNames->isPrivateName(ident))
+        return nullptr;
+    auto iterator = m_bytecodeIntrinsicMap.find(ident.impl());
+    if (iterator == m_bytecodeIntrinsicMap.end())
+        return nullptr;
+    return iterator->value;
+}
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) \
+    JSValue BytecodeIntrinsicRegistry::name##Value(BytecodeGenerator&) \
+    { \
+        return m_##name.get(); \
+    }
+    JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h
new file mode 100644
index 000000000..0259bc652
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Identifier.h"
+#include 
+
+namespace JSC {
+
+class CommonIdentifiers;
+class BytecodeGenerator;
+class BytecodeIntrinsicNode;
+class RegisterID;
+class Identifier;
+
+#define JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(macro) \
+    macro(argument) \
+    macro(argumentCount) \
+    macro(assert) \
+    macro(isObject) \
+    macro(isJSArray) \
+    macro(isProxyObject) \
+    macro(isDerivedArray) \
+    macro(isRegExpObject) \
+    macro(isMap) \
+    macro(isSet) \
+    macro(tailCallForwardArguments) \
+    macro(throwTypeError) \
+    macro(throwRangeError) \
+    macro(throwOutOfMemoryError) \
+    macro(tryGetById) \
+    macro(putByValDirect) \
+    macro(toNumber) \
+    macro(toString) \
+    macro(newArrayWithSize) \
+
+#define JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(macro) \
+    macro(undefined) \
+    macro(Infinity) \
+    macro(iterationKindKey) \
+    macro(iterationKindValue) \
+    macro(iterationKindKeyValue) \
+    macro(MAX_ARRAY_INDEX) \
+    macro(MAX_STRING_LENGTH) \
+    macro(MAX_SAFE_INTEGER) \
+    macro(ModuleFetch) \
+    macro(ModuleTranslate) \
+    macro(ModuleInstantiate) \
+    macro(ModuleSatisfy) \
+    macro(ModuleLink) \
+    macro(ModuleReady) \
+    macro(promiseStatePending) \
+    macro(promiseStateFulfilled) \
+    macro(promiseStateRejected) \
+    macro(GeneratorResumeModeNormal) \
+    macro(GeneratorResumeModeThrow) \
+    macro(GeneratorResumeModeReturn) \
+    macro(GeneratorStateCompleted) \
+    macro(GeneratorStateExecuting) \
+
+
+class BytecodeIntrinsicRegistry {
+    WTF_MAKE_NONCOPYABLE(BytecodeIntrinsicRegistry);
+public:
+    explicit BytecodeIntrinsicRegistry(VM&);
+
+    typedef RegisterID* (BytecodeIntrinsicNode::* EmitterType)(BytecodeGenerator&, RegisterID*);
+
+    EmitterType lookup(const Identifier&) const;
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) JSValue name##Value(BytecodeGenerator&);
+    JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+
+private:
+    VM& m_vm;
+    HashMap, EmitterType, IdentifierRepHash> m_bytecodeIntrinsicMap;
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) Strong m_##name;
+    JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeKills.h b/Source/JavaScriptCore/bytecode/BytecodeKills.h
new file mode 100644
index 000000000..dbdd44d7a
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeKills.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include 
+
+namespace JSC {
+
+class BytecodeLivenessAnalysis;
+
+class BytecodeKills {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    BytecodeKills()
+        : m_codeBlock(nullptr)
+    {
+    }
+    
+    // By convention, we say that non-local operands are never killed.
+    bool operandIsKilled(unsigned bytecodeIndex, int operand) const
+    {
+        ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size());
+        VirtualRegister reg(operand);
+        if (reg.isLocal())
+            return m_killSets[bytecodeIndex].contains(operand);
+        return false;
+    }
+    
+    bool operandIsKilled(Instruction* instruction, int operand) const
+    {
+        return operandIsKilled(instruction - m_codeBlock->instructions().begin(), operand);
+    }
+    
+    template
+    void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const
+    {
+        ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size());
+        m_killSets[bytecodeIndex].forEachLocal(
+            [&] (unsigned local) {
+                functor(virtualRegisterForLocal(local));
+            });
+    }
+    
+    template
+    void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const
+    {
+        forEachOperandKilledAt(pc - m_codeBlock->instructions().begin(), functor);
+    }
+    
+private:
+    friend class BytecodeLivenessAnalysis;
+
+    class KillSet {
+    public:
+        KillSet()
+            : m_word(0)
+        {
+        }
+
+        ~KillSet()
+        {
+            if (hasVector())
+                delete vector();
+        }
+        
+        void add(unsigned local)
+        {
+            if (isEmpty()) {
+                setOneItem(local);
+                return;
+            }
+            if (hasOneItem()) {
+                ASSERT(oneItem() != local);
+                Vector* vector = new Vector();
+                vector->append(oneItem());
+                vector->append(local);
+                setVector(vector);
+                return;
+            }
+            ASSERT(!vector()->contains(local));
+            vector()->append(local);
+        }
+        
+        template
+        void forEachLocal(const Functor& functor)
+        {
+            if (isEmpty())
+                return;
+            if (hasOneItem()) {
+                functor(oneItem());
+                return;
+            }
+            for (unsigned local : *vector())
+                functor(local);
+        }
+        
+        bool contains(unsigned expectedLocal)
+        {
+            if (isEmpty())
+                return false;
+            if (hasOneItem())
+                return oneItem() == expectedLocal;
+            for (unsigned local : *vector()) {
+                if (local == expectedLocal)
+                    return true;
+            }
+            return false;
+        }
+        
+    private:
+        bool isEmpty() const
+        {
+            return !m_word;
+        }
+        
+        bool hasOneItem() const
+        {
+            return m_word & 1;
+        }
+        
+        unsigned oneItem() const
+        {
+            return m_word >> 1;
+        }
+        
+        void setOneItem(unsigned value)
+        {
+            m_word = (value << 1) | 1;
+        }
+        
+        bool hasVector() const
+        {
+            return !isEmpty() && !hasOneItem();
+        }
+        
+        Vector* vector()
+        {
+            return bitwise_cast*>(m_word);
+        }
+        
+        void setVector(Vector* value)
+        {
+            m_word = bitwise_cast(value);
+        }
+        
+        uintptr_t m_word;
+    };
+    
+    CodeBlock* m_codeBlock;
+    std::unique_ptr m_killSets;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeList.json b/Source/JavaScriptCore/bytecode/BytecodeList.json
new file mode 100644
index 000000000..ada4429f7
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeList.json
@@ -0,0 +1,200 @@
+[
+    {
+        "section" : "Bytecodes", "emitInHFile" : true, "emitInASMFile" : true, 
+        "macroNameComponent" : "BYTECODE", "asmPrefix" : "llint_", 
+        "bytecodes" : [
+            { "name" : "op_enter", "length" : 1 },
+            { "name" : "op_get_scope", "length" : 2 },
+            { "name" : "op_create_direct_arguments", "length" : 2 },
+            { "name" : "op_create_scoped_arguments", "length" : 3 },
+            { "name" : "op_create_cloned_arguments", "length" : 2 },
+            { "name" : "op_create_this", "length" : 5 },
+            { "name" : "op_get_argument", "length" : 4 },
+            { "name" : "op_argument_count", "length" : 2 },
+            { "name" : "op_to_this", "length" : 4 },
+            { "name" : "op_check_tdz", "length" : 2 },
+            { "name" : "op_new_object", "length" : 4 },
+            { "name" : "op_new_array", "length" : 5 },
+            { "name" : "op_new_array_with_size", "length" : 4 },
+            { "name" : "op_new_array_with_spread", "length" : 5 },
+            { "name" : "op_spread", "length" : 3 },
+            { "name" : "op_new_array_buffer", "length" : 5 },
+            { "name" : "op_new_regexp", "length" : 3 },
+            { "name" : "op_mov", "length" : 3 },
+            { "name" : "op_not", "length" : 3 },
+            { "name" : "op_eq", "length" : 4 },
+            { "name" : "op_eq_null", "length" : 3 },
+            { "name" : "op_neq", "length" : 4 },
+            { "name" : "op_neq_null", "length" : 3 },
+            { "name" : "op_stricteq", "length" : 4 },
+            { "name" : "op_nstricteq", "length" : 4 },
+            { "name" : "op_less", "length" : 4 },
+            { "name" : "op_lesseq", "length" : 4 },
+            { "name" : "op_greater", "length" : 4 },
+            { "name" : "op_greatereq", "length" : 4 },
+            { "name" : "op_inc", "length" : 2 },
+            { "name" : "op_dec", "length" : 2 },
+            { "name" : "op_to_number", "length" : 4 },
+            { "name" : "op_to_string", "length" : 3 },
+            { "name" : "op_negate", "length" : 4 },
+            { "name" : "op_add", "length" : 5 },
+            { "name" : "op_mul", "length" : 5 },
+            { "name" : "op_div", "length" : 5 },
+            { "name" : "op_mod", "length" : 4 },
+            { "name" : "op_sub", "length" : 5 },
+            { "name" : "op_pow", "length" : 4 },
+            { "name" : "op_lshift", "length" : 4 },
+            { "name" : "op_rshift", "length" : 4 },
+            { "name" : "op_urshift", "length" : 4 },
+            { "name" : "op_unsigned", "length" : 3 },
+            { "name" : "op_bitand", "length" : 5 },
+            { "name" : "op_bitxor", "length" : 5 },
+            { "name" : "op_bitor", "length" : 5 },
+            { "name" : "op_overrides_has_instance", "length" : 4 },
+            { "name" : "op_instanceof", "length" : 4 },
+            { "name" : "op_instanceof_custom", "length" : 5 },
+            { "name" : "op_typeof", "length" : 3 },
+            { "name" : "op_is_empty", "length" : 3 },
+            { "name" : "op_is_undefined", "length" : 3 },
+            { "name" : "op_is_boolean", "length" : 3 },
+            { "name" : "op_is_number", "length" : 3 },
+            { "name" : "op_is_object", "length" : 3 },
+            { "name" : "op_is_object_or_null", "length" : 3 },
+            { "name" : "op_is_function", "length" : 3 },
+            { "name" : "op_is_cell_with_type", "length" : 4 },
+            { "name" : "op_in", "length" : 5 },
+            { "name" : "op_get_array_length", "length" : 9 },
+            { "name" : "op_get_by_id", "length" : 9  },
+            { "name" : "op_get_by_id_proto_load", "length" : 9 },
+            { "name" : "op_get_by_id_unset", "length" : 9 },
+            { "name" : "op_get_by_id_with_this", "length" : 6 },
+            { "name" : "op_get_by_val_with_this", "length" : 6 },
+            { "name" : "op_try_get_by_id", "length" : 5 },
+            { "name" : "op_put_by_id", "length" : 9 },
+            { "name" : "op_put_by_id_with_this", "length" : 5 },
+            { "name" : "op_del_by_id", "length" : 4 },
+            { "name" : "op_get_by_val", "length" : 6 },
+            { "name" : "op_put_by_val", "length" : 5 },
+            { "name" : "op_put_by_val_with_this", "length" : 5 },
+            { "name" : "op_put_by_val_direct", "length" : 5 },
+            { "name" : "op_del_by_val", "length" : 4 },
+            { "name" : "op_put_by_index", "length" : 4 },
+            { "name" : "op_put_getter_by_id", "length" : 5 },
+            { "name" : "op_put_setter_by_id", "length" : 5 },
+            { "name" : "op_put_getter_setter_by_id", "length" : 6 },
+            { "name" : "op_put_getter_by_val", "length" : 5 },
+            { "name" : "op_put_setter_by_val", "length" : 5 },
+            { "name" : "op_define_data_property", "length" : 5 },
+            { "name" : "op_define_accessor_property", "length" : 6 },
+            { "name" : "op_jmp", "length" : 2 },
+            { "name" : "op_jtrue", "length" : 3 },
+            { "name" : "op_jfalse", "length" : 3 },
+            { "name" : "op_jeq_null", "length" : 3 },
+            { "name" : "op_jneq_null", "length" : 3 },
+            { "name" : "op_jneq_ptr", "length" : 5 },
+            { "name" : "op_jless", "length" : 4 },
+            { "name" : "op_jlesseq", "length" : 4 },
+            { "name" : "op_jgreater", "length" : 4 },
+            { "name" : "op_jgreatereq", "length" : 4 },
+            { "name" : "op_jnless", "length" : 4 },
+            { "name" : "op_jnlesseq", "length" : 4 },
+            { "name" : "op_jngreater", "length" : 4 },
+            { "name" : "op_jngreatereq", "length" : 4 },
+            { "name" : "op_loop_hint", "length" : 1 },
+            { "name" : "op_switch_imm", "length" : 4 },
+            { "name" : "op_switch_char", "length" : 4 },
+            { "name" : "op_switch_string", "length" : 4 },
+            { "name" : "op_new_func", "length" : 4 },
+            { "name" : "op_new_func_exp", "length" : 4 },
+            { "name" : "op_new_generator_func", "length" : 4 },
+            { "name" : "op_new_generator_func_exp", "length" : 4 },
+            { "name" : "op_new_async_func", "length" : 4 },
+            { "name" : "op_new_async_func_exp", "length" : 4 },
+            { "name" : "op_set_function_name", "length" : 3 },
+            { "name" : "op_call", "length" : 9 },
+            { "name" : "op_tail_call", "length" : 9 },
+            { "name" : "op_call_eval", "length" : 9 },
+            { "name" : "op_call_varargs", "length" : 9 },
+            { "name" : "op_tail_call_varargs", "length" : 9 },
+            { "name" : "op_tail_call_forward_arguments", "length" : 9 },
+            { "name" : "op_ret", "length" : 2 },
+            { "name" : "op_construct", "length" : 9 },
+            { "name" : "op_construct_varargs", "length" : 9 },
+            { "name" : "op_strcat", "length" : 4 },
+            { "name" : "op_to_primitive", "length" : 3 },
+            { "name" : "op_resolve_scope", "length" : 7 },
+            { "name" : "op_get_from_scope", "length" : 8 },
+            { "name" : "op_put_to_scope", "length" : 7 },
+            { "name" : "op_get_from_arguments", "length" : 5 },
+            { "name" : "op_put_to_arguments", "length" : 4 },
+            { "name" : "op_push_with_scope", "length" : 4 },
+            { "name" : "op_create_lexical_environment", "length" : 5 },
+            { "name" : "op_get_parent_scope", "length" : 3 },
+            { "name" : "op_catch", "length" : 3 },
+            { "name" : "op_throw", "length" : 2 },
+            { "name" : "op_throw_static_error", "length" : 3 },
+            { "name" : "op_debug", "length" : 3 },
+            { "name" : "op_end", "length" : 2 },
+            { "name" : "op_profile_type", "length" : 6 },
+            { "name" : "op_profile_control_flow", "length" : 2 },
+            { "name" : "op_get_enumerable_length", "length" : 3 },
+            { "name" : "op_has_indexed_property", "length" : 5 },
+            { "name" : "op_has_structure_property", "length" : 5 },
+            { "name" : "op_has_generic_property", "length" : 4 },
+            { "name" : "op_get_direct_pname", "length" : 7 },
+            { "name" : "op_get_property_enumerator", "length" : 3 },
+            { "name" : "op_enumerator_structure_pname", "length" : 4 },
+            { "name" : "op_enumerator_generic_pname", "length" : 4 },
+            { "name" : "op_to_index_string", "length" : 3 },
+            { "name" : "op_assert", "length" : 3 },
+            { "name" : "op_create_rest", "length": 4 },
+            { "name" : "op_get_rest_length", "length": 3 },
+            { "name" : "op_yield", "length" : 4 },
+            { "name" : "op_watchdog", "length" : 1 },
+            { "name" : "op_log_shadow_chicken_prologue", "length" : 2},
+            { "name" : "op_log_shadow_chicken_tail", "length" : 3}
+        ]
+    },
+    {
+        "section" : "CLoopHelpers", "emitInHFile" : true, "emitInASMFile" : false, "defaultLength" : 1,
+        "macroNameComponent" : "CLOOP_BYTECODE_HELPER",
+        "bytecodes" : [
+            { "name" : "llint_entry" },
+            { "name" : "getHostCallReturnValue" },
+            { "name" : "llint_return_to_host" },
+            { "name" : "llint_vm_entry_to_javascript" },
+            { "name" : "llint_vm_entry_to_native" },
+            { "name" : "llint_cloop_did_return_from_js_1" },
+            { "name" : "llint_cloop_did_return_from_js_2" },
+            { "name" : "llint_cloop_did_return_from_js_3" },
+            { "name" : "llint_cloop_did_return_from_js_4" },
+            { "name" : "llint_cloop_did_return_from_js_5" },
+            { "name" : "llint_cloop_did_return_from_js_6" },
+            { "name" : "llint_cloop_did_return_from_js_7" },
+            { "name" : "llint_cloop_did_return_from_js_8" },
+            { "name" : "llint_cloop_did_return_from_js_9" },
+            { "name" : "llint_cloop_did_return_from_js_10" },
+            { "name" : "llint_cloop_did_return_from_js_11" },
+            { "name" : "llint_cloop_did_return_from_js_12" }
+        ]
+    },
+    {
+        "section" : "NativeHelpers", "emitInHFile" : true, "emitInASMFile" : true, "defaultLength" : 1,
+        "macroNameComponent" : "BYTECODE_HELPER",
+        "bytecodes" : [
+            { "name" : "llint_program_prologue" },
+            { "name" : "llint_eval_prologue" },
+            { "name" : "llint_module_program_prologue" },
+            { "name" : "llint_function_for_call_prologue" },
+            { "name" : "llint_function_for_construct_prologue" },
+            { "name" : "llint_function_for_call_arity_check" },
+            { "name" : "llint_function_for_construct_arity_check" },
+            { "name" : "llint_generic_return_point" },
+            { "name" : "llint_throw_from_slow_path_trampoline" },
+            { "name" : "llint_throw_during_call_trampoline" },
+            { "name" : "llint_native_call_trampoline" },
+            { "name" : "llint_native_construct_trampoline" },
+            { "name" : "handleUncaughtException" }
+        ]
+    }
+]
diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
index 926334c44..60eeb7174 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -26,299 +26,159 @@
 #include "config.h"
 #include "BytecodeLivenessAnalysis.h"
 
+#include "BytecodeKills.h"
 #include "BytecodeLivenessAnalysisInlines.h"
 #include "BytecodeUseDef.h"
 #include "CodeBlock.h"
 #include "FullBytecodeLiveness.h"
+#include "HeapInlines.h"
+#include "InterpreterInlines.h"
 #include "PreciseJumpTargets.h"
 
 namespace JSC {
 
 BytecodeLivenessAnalysis::BytecodeLivenessAnalysis(CodeBlock* codeBlock)
-    : m_codeBlock(codeBlock)
+    : m_graph(codeBlock, codeBlock->instructions())
 {
-    ASSERT(m_codeBlock);
     compute();
 }
 
-static bool isValidRegisterForLiveness(CodeBlock* codeBlock, int operand)
+template
+void BytecodeLivenessAnalysis::computeDefsForBytecodeOffset(CodeBlock* codeBlock, OpcodeID opcodeID, Instruction* instruction, FastBitVector&, const Functor& functor)
 {
-    if (codeBlock->isConstantRegisterIndex(operand))
-        return false;
-    
-    VirtualRegister virtualReg(operand);
-    if (!virtualReg.isLocal())
-        return false;
-    
-    if (codeBlock->captureCount()
-        && operand <= codeBlock->captureStart()
-        && operand > codeBlock->captureEnd())
-        return false;
-    
-    return true;
-}
-
-static void setForOperand(CodeBlock* codeBlock, FastBitVector& bits, int operand)
-{
-    ASSERT(isValidRegisterForLiveness(codeBlock, operand));
-    VirtualRegister virtualReg(operand);
-    if (virtualReg.offset() > codeBlock->captureStart())
-        bits.set(virtualReg.toLocal());
-    else
-        bits.set(virtualReg.toLocal() - codeBlock->captureCount());
-}
-
-namespace {
-
-class SetBit {
-public:
-    SetBit(FastBitVector& bits)
-        : m_bits(bits)
-    {
-    }
-    
-    void operator()(CodeBlock* codeBlock, Instruction*, OpcodeID, int operand)
-    {
-        if (isValidRegisterForLiveness(codeBlock, operand))
-            setForOperand(codeBlock, m_bits, operand);
-    }
-    
-private:
-    FastBitVector& m_bits;
-};
-
-} // anonymous namespace
-
-static unsigned getLeaderOffsetForBasicBlock(RefPtr* basicBlock)
-{
-    return (*basicBlock)->leaderBytecodeOffset();
-}
-
-static BytecodeBasicBlock* findBasicBlockWithLeaderOffset(Vector >& basicBlocks, unsigned leaderOffset)
-{
-    return (*tryBinarySearch, unsigned>(basicBlocks, basicBlocks.size(), leaderOffset, getLeaderOffsetForBasicBlock)).get();
-}
-
-static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned bytecodeOffset)
-{
-    unsigned leaderOffset = block->leaderBytecodeOffset();
-    return bytecodeOffset >= leaderOffset && bytecodeOffset < leaderOffset + block->totalBytecodeLength();
+    JSC::computeDefsForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
 }
 
-static BytecodeBasicBlock* findBasicBlockForBytecodeOffset(Vector >& basicBlocks, unsigned bytecodeOffset)
+template
+void BytecodeLivenessAnalysis::computeUsesForBytecodeOffset(CodeBlock* codeBlock, OpcodeID opcodeID, Instruction* instruction, FastBitVector&, const Functor& functor)
 {
-/*
-    for (unsigned i = 0; i < basicBlocks.size(); i++) {
-        if (blockContainsBytecodeOffset(basicBlocks[i].get(), bytecodeOffset))
-            return basicBlocks[i].get();
-    }
-    return 0;
-*/
-    RefPtr* basicBlock = approximateBinarySearch, unsigned>(
-        basicBlocks, basicBlocks.size(), bytecodeOffset, getLeaderOffsetForBasicBlock);
-    // We found the block we were looking for.
-    if (blockContainsBytecodeOffset((*basicBlock).get(), bytecodeOffset))
-        return (*basicBlock).get();
-
-    // Basic block is to the left of the returned block.
-    if (bytecodeOffset < (*basicBlock)->leaderBytecodeOffset()) {
-        ASSERT(basicBlock - 1 >= basicBlocks.data());
-        ASSERT(blockContainsBytecodeOffset(basicBlock[-1].get(), bytecodeOffset));
-        return basicBlock[-1].get();
-    }
-
-    // Basic block is to the right of the returned block.
-    ASSERT(&basicBlock[1] <= &basicBlocks.last());
-    ASSERT(blockContainsBytecodeOffset(basicBlock[1].get(), bytecodeOffset));
-    return basicBlock[1].get();
-}
-
-static void stepOverInstruction(CodeBlock* codeBlock, Vector>& basicBlocks, unsigned bytecodeOffset, FastBitVector& uses, FastBitVector& defs, FastBitVector& out)
-{
-    uses.clearAll();
-    defs.clearAll();
-    
-    SetBit setUses(uses);
-    SetBit setDefs(defs);
-    computeUsesForBytecodeOffset(codeBlock, bytecodeOffset, setUses);
-    computeDefsForBytecodeOffset(codeBlock, bytecodeOffset, setDefs);
-    
-    out.exclude(defs);
-    out.merge(uses);
-    
-    // If we have an exception handler, we want the live-in variables of the 
-    // exception handler block to be included in the live-in of this particular bytecode.
-    if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) {
-        BytecodeBasicBlock* handlerBlock = findBasicBlockWithLeaderOffset(basicBlocks, handler->target);
-        ASSERT(handlerBlock);
-        out.merge(handlerBlock->in());
-    }
+    JSC::computeUsesForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
 }
 
-static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector >& basicBlocks, unsigned targetOffset, FastBitVector& result)
+void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result)
 {
-    ASSERT(!block->isExitBlock());
-    ASSERT(!block->isEntryBlock());
-
-    FastBitVector out = block->out();
-
-    FastBitVector uses;
-    FastBitVector defs;
-    uses.resize(out.numBits());
-    defs.resize(out.numBits());
-
-    for (int i = block->bytecodeOffsets().size() - 1; i >= 0; i--) {
-        unsigned bytecodeOffset = block->bytecodeOffsets()[i];
-        if (targetOffset > bytecodeOffset)
-            break;
-        
-        stepOverInstruction(codeBlock, basicBlocks, bytecodeOffset, uses, defs, out);
-    }
-
-    result.set(out);
-}
-
-static void computeLocalLivenessForBlock(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector >& basicBlocks)
-{
-    if (block->isExitBlock() || block->isEntryBlock())
-        return;
-    computeLocalLivenessForBytecodeOffset(codeBlock, block, basicBlocks, block->leaderBytecodeOffset(), block->in());
-}
-
-void BytecodeLivenessAnalysis::runLivenessFixpoint()
-{
-    UnlinkedCodeBlock* unlinkedCodeBlock = m_codeBlock->unlinkedCodeBlock();
-    unsigned numberOfVariables =
-        unlinkedCodeBlock->m_numCalleeRegisters - m_codeBlock->captureCount();
-
-    for (unsigned i = 0; i < m_basicBlocks.size(); i++) {
-        BytecodeBasicBlock* block = m_basicBlocks[i].get();
-        block->in().resize(numberOfVariables);
-        block->out().resize(numberOfVariables);
-    }
-
-    bool changed;
-    m_basicBlocks.last()->in().clearAll();
-    m_basicBlocks.last()->out().clearAll();
-    FastBitVector newOut;
-    newOut.resize(m_basicBlocks.last()->out().numBits());
-    do {
-        changed = false;
-        for (int i = m_basicBlocks.size() - 2; i >= 0; i--) {
-            BytecodeBasicBlock* block = m_basicBlocks[i].get();
-            newOut.clearAll();
-            for (unsigned j = 0; j < block->successors().size(); j++)
-                newOut.merge(block->successors()[j]->in());
-            bool outDidChange = block->out().setAndCheck(newOut);
-            computeLocalLivenessForBlock(m_codeBlock, block, m_basicBlocks);
-            changed |= outDidChange;
-        }
-    } while (changed);
-}
-
-void BytecodeLivenessAnalysis::getLivenessInfoForNonCapturedVarsAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result)
-{
-    BytecodeBasicBlock* block = findBasicBlockForBytecodeOffset(m_basicBlocks, bytecodeOffset);
+    BytecodeBasicBlock* block = m_graph.findBasicBlockForBytecodeOffset(bytecodeOffset);
     ASSERT(block);
     ASSERT(!block->isEntryBlock());
     ASSERT(!block->isExitBlock());
     result.resize(block->out().numBits());
-    computeLocalLivenessForBytecodeOffset(m_codeBlock, block, m_basicBlocks, bytecodeOffset, result);
+    computeLocalLivenessForBytecodeOffset(m_graph, block, bytecodeOffset, result);
 }
 
 bool BytecodeLivenessAnalysis::operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset)
 {
-    if (operandIsAlwaysLive(m_codeBlock, operand))
+    if (operandIsAlwaysLive(operand))
         return true;
     FastBitVector result;
-    getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, result);
-    return operandThatIsNotAlwaysLiveIsLive(m_codeBlock, result, operand);
+    getLivenessInfoAtBytecodeOffset(bytecodeOffset, result);
+    return operandThatIsNotAlwaysLiveIsLive(result, operand);
 }
 
-FastBitVector getLivenessInfo(CodeBlock* codeBlock, const FastBitVector& out)
+FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset)
 {
-    FastBitVector result;
-
-    unsigned numCapturedVars = codeBlock->captureCount();
-    if (numCapturedVars) {
-        int firstCapturedLocal = VirtualRegister(codeBlock->captureStart()).toLocal();
-        result.resize(out.numBits() + numCapturedVars);
-        for (unsigned i = 0; i < numCapturedVars; ++i)
-            result.set(firstCapturedLocal + i);
-    } else
-        result.resize(out.numBits());
-
-    int outLength = out.numBits();
-    ASSERT(outLength >= 0);
-    for (int i = 0; i < outLength; i++) {
-        if (!out.get(i))
-            continue;
-
-        if (!numCapturedVars) {
-            result.set(i);
-            continue;
-        }
-
-        if (virtualRegisterForLocal(i).offset() > codeBlock->captureStart())
-            result.set(i);
-        else 
-            result.set(numCapturedVars + i);
-    }
-    return result;
+    FastBitVector out;
+    getLivenessInfoAtBytecodeOffset(bytecodeOffset, out);
+    return out;
 }
 
-FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset)
+void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result)
 {
     FastBitVector out;
-    getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, out);
-    return getLivenessInfo(m_codeBlock, out);
+    CodeBlock* codeBlock = m_graph.codeBlock();
+    
+    result.m_map.resize(codeBlock->instructions().size());
+    
+    for (std::unique_ptr& block : m_graph.basicBlocksInReverseOrder()) {
+        if (block->isEntryBlock() || block->isExitBlock())
+            continue;
+        
+        out = block->out();
+        
+        for (unsigned i = block->offsets().size(); i--;) {
+            unsigned bytecodeOffset = block->offsets()[i];
+            stepOverInstruction(m_graph, bytecodeOffset, out);
+            result.m_map[bytecodeOffset] = out;
+        }
+    }
 }
 
-void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result)
+void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result)
 {
     FastBitVector out;
-    FastBitVector uses;
-    FastBitVector defs;
     
-    result.m_codeBlock = m_codeBlock;
-    result.m_map.clear();
+    CodeBlock* codeBlock = m_graph.codeBlock();
+    result.m_codeBlock = codeBlock;
+    result.m_killSets = std::make_unique(codeBlock->instructions().size());
     
-    for (unsigned i = m_basicBlocks.size(); i--;) {
-        BytecodeBasicBlock* block = m_basicBlocks[i].get();
+    for (std::unique_ptr& block : m_graph.basicBlocksInReverseOrder()) {
         if (block->isEntryBlock() || block->isExitBlock())
             continue;
         
         out = block->out();
-        uses.resize(out.numBits());
-        defs.resize(out.numBits());
         
-        for (unsigned i = block->bytecodeOffsets().size(); i--;) {
-            unsigned bytecodeOffset = block->bytecodeOffsets()[i];
-            stepOverInstruction(m_codeBlock, m_basicBlocks, bytecodeOffset, uses, defs, out);
-            result.m_map.add(bytecodeOffset, out);
+        for (unsigned i = block->offsets().size(); i--;) {
+            unsigned bytecodeOffset = block->offsets()[i];
+            stepOverInstruction(
+                m_graph, bytecodeOffset, out,
+                [&] (unsigned index) {
+                    // This is for uses.
+                    if (out[index])
+                        return;
+                    result.m_killSets[bytecodeOffset].add(index);
+                    out[index] = true;
+                },
+                [&] (unsigned index) {
+                    // This is for defs.
+                    out[index] = false;
+                });
         }
     }
 }
 
 void BytecodeLivenessAnalysis::dumpResults()
 {
-    Interpreter* interpreter = m_codeBlock->vm()->interpreter;
-    Instruction* instructionsBegin = m_codeBlock->instructions().begin();
-    for (unsigned i = 0; i < m_basicBlocks.size(); i++) {
-        BytecodeBasicBlock* block = m_basicBlocks[i].get();
-        dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i, block, block->leaderBytecodeOffset(), block->totalBytecodeLength());
-        dataLogF("Predecessors: ");
-        for (unsigned j = 0; j < block->predecessors().size(); j++) {
-            BytecodeBasicBlock* predecessor = block->predecessors()[j];
-            dataLogF("%p ", predecessor);
+    CodeBlock* codeBlock = m_graph.codeBlock();
+    dataLog("\nDumping bytecode liveness for ", *codeBlock, ":\n");
+    Interpreter* interpreter = codeBlock->vm()->interpreter;
+    Instruction* instructionsBegin = codeBlock->instructions().begin();
+    unsigned i = 0;
+
+    unsigned numberOfBlocks = m_graph.size();
+    Vector predecessors(numberOfBlocks);
+    for (BytecodeBasicBlock* block : m_graph)
+        predecessors[block->index()].resize(numberOfBlocks);
+    for (BytecodeBasicBlock* block : m_graph) {
+        for (unsigned j = 0; j < block->successors().size(); j++) {
+            unsigned blockIndex = block->index();
+            unsigned successorIndex = block->successors()[j]->index();
+            predecessors[successorIndex][blockIndex] = true;
+        }
+    }
+
+    auto dumpBitVector = [] (FastBitVector& bits) {
+        for (unsigned j = 0; j < bits.numBits(); j++) {
+            if (bits[j])
+                dataLogF(" %u", j);
         }
+    };
+
+    for (BytecodeBasicBlock* block : m_graph) {
+        dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i++, block, block->leaderOffset(), block->totalLength());
+
+        dataLogF("Predecessors:");
+        dumpBitVector(predecessors[block->index()]);
         dataLogF("\n");
-        dataLogF("Successors: ");
+
+        dataLogF("Successors:");
+        FastBitVector successors;
+        successors.resize(numberOfBlocks);
         for (unsigned j = 0; j < block->successors().size(); j++) {
             BytecodeBasicBlock* successor = block->successors()[j];
-            dataLogF("%p ", successor);
+            successors[successor->index()] = true;
         }
+        dumpBitVector(successors); // Dump in sorted order.
         dataLogF("\n");
+
         if (block->isEntryBlock()) {
             dataLogF("Entry block %p\n", block);
             continue;
@@ -327,38 +187,30 @@ void BytecodeLivenessAnalysis::dumpResults()
             dataLogF("Exit block: %p\n", block);
             continue;
         }
-        for (unsigned bytecodeOffset = block->leaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) {
+        for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
             const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset];
 
-            dataLogF("Live variables: ");
+            dataLogF("Live variables:");
             FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(bytecodeOffset);
-            for (unsigned j = 0; j < liveBefore.numBits(); j++) {
-                if (liveBefore.get(j))
-                    dataLogF("%u ", j);
-            }
+            dumpBitVector(liveBefore);
             dataLogF("\n");
-            m_codeBlock->dumpBytecode(WTF::dataFile(), m_codeBlock->globalObject()->globalExec(), instructionsBegin, currentInstruction);
+            codeBlock->dumpBytecode(WTF::dataFile(), codeBlock->globalObject()->globalExec(), instructionsBegin, currentInstruction);
 
             OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
             unsigned opcodeLength = opcodeLengths[opcodeID];
             bytecodeOffset += opcodeLength;
         }
 
-        dataLogF("Live variables: ");
+        dataLogF("Live variables:");
         FastBitVector liveAfter = block->out();
-        for (unsigned j = 0; j < liveAfter.numBits(); j++) {
-            if (liveAfter.get(j))
-                dataLogF("%u ", j);
-        }
+        dumpBitVector(liveAfter);
         dataLogF("\n");
     }
 }
 
 void BytecodeLivenessAnalysis::compute()
 {
-    computeBytecodeBasicBlocks(m_codeBlock, m_basicBlocks);
-    ASSERT(m_basicBlocks.size());
-    runLivenessFixpoint();
+    runLivenessFixpoint(m_graph);
 
     if (Options::dumpBytecodeLivenessResults())
         dumpResults();
diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
index 349912175..e12cd8edc 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,45 +23,64 @@
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef BytecodeLivenessAnalysis_h
-#define BytecodeLivenessAnalysis_h
+#pragma once
 
 #include "BytecodeBasicBlock.h"
+#include "BytecodeGraph.h"
+#include "CodeBlock.h"
 #include 
 #include 
 #include 
 
 namespace JSC {
 
-class CodeBlock;
+class BytecodeKills;
 class FullBytecodeLiveness;
 
-class BytecodeLivenessAnalysis {
+template
+class BytecodeLivenessPropagation {
+protected:
+    template void stepOverInstruction(Graph&, unsigned bytecodeOffset, FastBitVector& out, const UseFunctor&, const DefFunctor&);
+
+    template void stepOverInstruction(Graph&, unsigned bytecodeOffset, FastBitVector& out);
+
+    template bool computeLocalLivenessForBytecodeOffset(Graph&, BytecodeBasicBlock*, unsigned targetOffset, FastBitVector& result);
+
+    template bool computeLocalLivenessForBlock(Graph&, BytecodeBasicBlock*);
+
+    template FastBitVector getLivenessInfoAtBytecodeOffset(Graph&, unsigned bytecodeOffset);
+
+    template void runLivenessFixpoint(Graph&);
+};
+
+class BytecodeLivenessAnalysis : private BytecodeLivenessPropagation {
+    WTF_MAKE_FAST_ALLOCATED;
+    WTF_MAKE_NONCOPYABLE(BytecodeLivenessAnalysis);
 public:
+    friend class BytecodeLivenessPropagation;
     BytecodeLivenessAnalysis(CodeBlock*);
     
     bool operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset);
     FastBitVector getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset);
     
     void computeFullLiveness(FullBytecodeLiveness& result);
+    void computeKills(BytecodeKills& result);
 
 private:
     void compute();
-    void runLivenessFixpoint();
     void dumpResults();
 
-    void getLivenessInfoForNonCapturedVarsAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector&);
+    void getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector&);
 
-    CodeBlock* m_codeBlock;
-    Vector > m_basicBlocks;
-};
+    template void computeDefsForBytecodeOffset(CodeBlock*, OpcodeID, Instruction*, FastBitVector&, const Functor&);
+    template void computeUsesForBytecodeOffset(CodeBlock*, OpcodeID, Instruction*, FastBitVector&, const Functor&);
 
-inline bool operandIsAlwaysLive(CodeBlock*, int operand);
-inline bool operandThatIsNotAlwaysLiveIsLive(CodeBlock*, const FastBitVector& out, int operand);
-inline bool operandIsLive(CodeBlock*, const FastBitVector& out, int operand);
+    BytecodeGraph m_graph;
+};
 
-FastBitVector getLivenessInfo(CodeBlock*, const FastBitVector& out);
+inline bool operandIsAlwaysLive(int operand);
+inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand);
+inline bool operandIsLive(const FastBitVector& out, int operand);
+inline bool isValidRegisterForLiveness(int operand);
 
 } // namespace JSC
-
-#endif // BytecodeLivenessAnalysis_h
diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
index 8824bd85c..3371237b8 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,38 +23,179 @@
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef BytecodeLivenessAnalysisInlines_h
-#define BytecodeLivenessAnalysisInlines_h
+#pragma once
 
+#include "BytecodeGraph.h"
 #include "BytecodeLivenessAnalysis.h"
 #include "CodeBlock.h"
+#include "Interpreter.h"
+#include "Operations.h"
 
 namespace JSC {
 
-inline bool operandIsAlwaysLive(CodeBlock* codeBlock, int operand)
+inline bool operandIsAlwaysLive(int operand)
 {
-    if (VirtualRegister(operand).isArgument())
-        return true;
-    return operand <= codeBlock->captureStart() && operand > codeBlock->captureEnd();
+    return !VirtualRegister(operand).isLocal();
 }
 
-inline bool operandThatIsNotAlwaysLiveIsLive(CodeBlock* codeBlock, const FastBitVector& out, int operand)
+inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand)
+{
+    unsigned local = VirtualRegister(operand).toLocal();
+    if (local >= out.numBits())
+        return false;
+    return out[local];
+}
+
+inline bool operandIsLive(const FastBitVector& out, int operand)
+{
+    return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(out, operand);
+}
+
+inline bool isValidRegisterForLiveness(int operand)
 {
     VirtualRegister virtualReg(operand);
-    if (virtualReg.offset() > codeBlock->captureStart())
-        return out.get(virtualReg.toLocal());
-    size_t index = virtualReg.toLocal() - codeBlock->captureCount();
-    if (index >= out.numBits())
+    if (virtualReg.isConstant())
         return false;
-    return out.get(index);
+    return virtualReg.isLocal();
+}
+
+// Simplified interface to bytecode use/def, which determines defs first and then uses, and includes
+// exception handlers in the uses.
+template
+template
+inline void BytecodeLivenessPropagation::stepOverInstruction(Graph& graph, unsigned bytecodeOffset, FastBitVector& out, const UseFunctor& use, const DefFunctor& def)
+{
+    // This abstractly execute the instruction in reverse. Instructions logically first use operands and
+    // then define operands. This logical ordering is necessary for operations that use and def the same
+    // operand, like:
+    //
+    //     op_add loc1, loc1, loc2
+    //
+    // The use of loc1 happens before the def of loc1. That's a semantic requirement since the add
+    // operation cannot travel forward in time to read the value that it will produce after reading that
+    // value. Since we are executing in reverse, this means that we must do defs before uses (reverse of
+    // uses before defs).
+    //
+    // Since this is a liveness analysis, this ordering ends up being particularly important: if we did
+    // uses before defs, then the add operation above would appear to not have loc1 live, since we'd
+    // first add it to the out set (the use), and then we'd remove it (the def).
+
+    auto* codeBlock = graph.codeBlock();
+    Interpreter* interpreter = codeBlock->vm()->interpreter;
+    auto* instructionsBegin = graph.instructions().begin();
+    auto* instruction = &instructionsBegin[bytecodeOffset];
+    OpcodeID opcodeID = interpreter->getOpcodeID(*instruction);
+
+    static_cast(this)->computeDefsForBytecodeOffset(
+        codeBlock, opcodeID, instruction, out,
+        [&] (typename Graph::CodeBlock*, typename Graph::Instruction*, OpcodeID, int operand) {
+            if (isValidRegisterForLiveness(operand))
+                def(VirtualRegister(operand).toLocal());
+        });
+
+    static_cast(this)->computeUsesForBytecodeOffset(
+        codeBlock, opcodeID, instruction, out,
+        [&] (typename Graph::CodeBlock*, typename Graph::Instruction*, OpcodeID, int operand) {
+            if (isValidRegisterForLiveness(operand))
+                use(VirtualRegister(operand).toLocal());
+        });
+
+    // If we have an exception handler, we want the live-in variables of the 
+    // exception handler block to be included in the live-in of this particular bytecode.
+    if (auto* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) {
+        BytecodeBasicBlock* handlerBlock = graph.findBasicBlockWithLeaderOffset(handler->target);
+        ASSERT(handlerBlock);
+        handlerBlock->in().forEachSetBit(use);
+    }
 }
 
-inline bool operandIsLive(CodeBlock* codeBlock, const FastBitVector& out, int operand)
+template
+template
+inline void BytecodeLivenessPropagation::stepOverInstruction(Graph& graph, unsigned bytecodeOffset, FastBitVector& out)
 {
-    return operandIsAlwaysLive(codeBlock, operand) || operandThatIsNotAlwaysLiveIsLive(codeBlock, out, operand);
+    stepOverInstruction(
+        graph, bytecodeOffset, out,
+        [&] (unsigned bitIndex) {
+            // This is the use functor, so we set the bit.
+            out[bitIndex] = true;
+        },
+        [&] (unsigned bitIndex) {
+            // This is the def functor, so we clear the bit.
+            out[bitIndex] = false;
+        });
 }
 
-} // namespace JSC
+template
+template
+inline bool BytecodeLivenessPropagation::computeLocalLivenessForBytecodeOffset(Graph& graph, BytecodeBasicBlock* block, unsigned targetOffset, FastBitVector& result)
+{
+    ASSERT(!block->isExitBlock());
+    ASSERT(!block->isEntryBlock());
 
-#endif // BytecodeLivenessAnalysisInlines_h
+    FastBitVector out = block->out();
 
+    for (int i = block->offsets().size() - 1; i >= 0; i--) {
+        unsigned bytecodeOffset = block->offsets()[i];
+        if (targetOffset > bytecodeOffset)
+            break;
+        stepOverInstruction(graph, bytecodeOffset, out);
+    }
+
+    return result.setAndCheck(out);
+}
+
+template
+template
+inline bool BytecodeLivenessPropagation::computeLocalLivenessForBlock(Graph& graph, BytecodeBasicBlock* block)
+{
+    if (block->isExitBlock() || block->isEntryBlock())
+        return false;
+    return computeLocalLivenessForBytecodeOffset(graph, block, block->leaderOffset(), block->in());
+}
+
+template
+template
+inline FastBitVector BytecodeLivenessPropagation::getLivenessInfoAtBytecodeOffset(Graph& graph, unsigned bytecodeOffset)
+{
+    BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(bytecodeOffset);
+    ASSERT(block);
+    ASSERT(!block->isEntryBlock());
+    ASSERT(!block->isExitBlock());
+    FastBitVector out;
+    out.resize(block->out().numBits());
+    computeLocalLivenessForBytecodeOffset(graph, block, bytecodeOffset, out);
+    return out;
+}
+
+template
+template
+inline void BytecodeLivenessPropagation::runLivenessFixpoint(Graph& graph)
+{
+    auto* codeBlock = graph.codeBlock();
+    unsigned numberOfVariables = codeBlock->numCalleeLocals();
+    for (BytecodeBasicBlock* block : graph) {
+        block->in().resize(numberOfVariables);
+        block->out().resize(numberOfVariables);
+        block->in().clearAll();
+        block->out().clearAll();
+    }
+
+    bool changed;
+    BytecodeBasicBlock* lastBlock = graph.last();
+    lastBlock->in().clearAll();
+    lastBlock->out().clearAll();
+    FastBitVector newOut;
+    newOut.resize(lastBlock->out().numBits());
+    do {
+        changed = false;
+        for (std::unique_ptr& block : graph.basicBlocksInReverseOrder()) {
+            newOut.clearAll();
+            for (BytecodeBasicBlock* successor : block->successors())
+                newOut |= successor->in();
+            block->out() = newOut;
+            changed |= computeLocalLivenessForBlock(graph, block.get());
+        }
+    } while (changed);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp b/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp
new file mode 100644
index 000000000..6dadb6e74
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeRewriter.h"
+
+#include "HeapInlines.h"
+#include "PreciseJumpTargetsInlines.h"
+#include 
+
+namespace JSC {
+
+void BytecodeRewriter::applyModification()
+{
+    for (size_t insertionIndex = m_insertions.size(); insertionIndex--;) {
+        Insertion& insertion = m_insertions[insertionIndex];
+        if (insertion.type == Insertion::Type::Remove)
+            m_graph.instructions().remove(insertion.index.bytecodeOffset, insertion.length());
+        else {
+            if (insertion.includeBranch == IncludeBranch::Yes) {
+                int finalOffset = insertion.index.bytecodeOffset + calculateDifference(m_insertions.begin(), m_insertions.begin() + insertionIndex);
+                adjustJumpTargetsInFragment(finalOffset, insertion);
+            }
+            m_graph.instructions().insertVector(insertion.index.bytecodeOffset, insertion.instructions);
+        }
+    }
+    m_insertions.clear();
+}
+
+void BytecodeRewriter::execute()
+{
+    WTF::bubbleSort(m_insertions.begin(), m_insertions.end(), [] (const Insertion& lhs, const Insertion& rhs) {
+        return lhs.index < rhs.index;
+    });
+
+    UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+    codeBlock->applyModification(*this);
+}
+
+void BytecodeRewriter::adjustJumpTargetsInFragment(unsigned finalOffset, Insertion& insertion)
+{
+    auto& fragment = insertion.instructions;
+    UnlinkedInstruction* instructionsBegin = fragment.data();
+    for (unsigned fragmentOffset = 0, fragmentCount = fragment.size(); fragmentOffset < fragmentCount;) {
+        UnlinkedInstruction& instruction = fragment[fragmentOffset];
+        OpcodeID opcodeID = instruction.u.opcode;
+        if (isBranch(opcodeID)) {
+            unsigned bytecodeOffset = finalOffset + fragmentOffset;
+            UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+            extractStoredJumpTargetsForBytecodeOffset(codeBlock, codeBlock->vm()->interpreter, instructionsBegin, fragmentOffset, [&](int32_t& label) {
+                int absoluteOffset = adjustAbsoluteOffset(label);
+                label = absoluteOffset - static_cast(bytecodeOffset);
+            });
+        }
+        fragmentOffset += opcodeLength(opcodeID);
+    }
+}
+
+void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch includeBranch, Vector&& fragment)
+{
+    ASSERT(insertionPoint.position == Position::Before || insertionPoint.position == Position::After);
+    m_insertions.append(Insertion {
+        insertionPoint,
+        Insertion::Type::Insert,
+        includeBranch,
+        0,
+        WTFMove(fragment)
+    });
+}
+
+int BytecodeRewriter::adjustJumpTarget(InsertionPoint startPoint, InsertionPoint jumpTargetPoint)
+{
+    if (startPoint < jumpTargetPoint) {
+        int jumpTarget = jumpTargetPoint.bytecodeOffset;
+        auto start = std::lower_bound(m_insertions.begin(), m_insertions.end(), startPoint, [&] (const Insertion& insertion, InsertionPoint startPoint) {
+            return insertion.index < startPoint;
+        });
+        if (start != m_insertions.end()) {
+            auto end = std::lower_bound(m_insertions.begin(), m_insertions.end(), jumpTargetPoint, [&] (const Insertion& insertion, InsertionPoint jumpTargetPoint) {
+                return insertion.index < jumpTargetPoint;
+            });
+            jumpTarget += calculateDifference(start, end);
+        }
+        return jumpTarget - startPoint.bytecodeOffset;
+    }
+
+    if (startPoint == jumpTargetPoint)
+        return 0;
+
+    return -adjustJumpTarget(jumpTargetPoint, startPoint);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeRewriter.h b/Source/JavaScriptCore/bytecode/BytecodeRewriter.h
new file mode 100644
index 000000000..035f900a7
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeRewriter.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeGraph.h"
+#include "Bytecodes.h"
+#include "Opcode.h"
+#include "UnlinkedCodeBlock.h"
+#include 
+
+namespace JSC {
+
+// BytecodeRewriter offers the ability to insert and remove the bytecodes including jump operations.
+//
+// We use the original bytecode offsets as labels. When you emit some jumps, you can specify the jump target by
+// using the original bytecode offsets. These bytecode offsets are later converted appropriate values by the
+// rewriter. And we also use the labels to represents the position the new bytecodes inserted.
+//
+//                      |  [bytecode]  |  [bytecode]  |
+//   offsets            A              B              C
+//
+// We can use the above "A", "B", and "C" offsets as labels. And the rewriter has the ability to insert bytecode fragments
+// before and after the label. For example, if you insert the fragment after "B", the layout becomes like this.
+//
+//                      |  [bytecode]  |  [fragment]  [bytecode]  |
+//   offsets            A              B                          C
+//
+//  And even if you remove some original bytecodes, the offset remains as labels. For example, when you remove the A's bytecode,
+//  the layout becomes like this.
+//
+//                      |              |  [bytecode]  |
+//   offsets            A              B              C
+//
+//  And still you can insert fragments before and after "A".
+//
+//                      |  [fragment]  |  [bytecode]  |
+//   offsets            A              B              C
+//
+//   We can insert bytecode fragments "Before" and "After" the labels. This inserted position, either "Before" and "After",
+//   has effect when the label is involved with jumps. For example, when you have jump to the position "B",
+//
+//                      |  [bytecode]  |  [bytecode]  |
+//   offsets            A              B              C
+//                                     ^
+//                                     jump to here.
+//
+//  and you insert the bytecode before/after "B",
+//
+//                      |  [bytecode] [before]  |  [after] [bytecode]  |
+//   offsets            A                       B              C
+//                                              ^
+//                                              jump to here.
+//
+//  as you can see, the execution jumping into "B" does not execute [before] code.
+class BytecodeRewriter {
+WTF_MAKE_NONCOPYABLE(BytecodeRewriter);
+public:
+    enum class Position : int8_t {
+        EntryPoint = -2,
+        Before = -1,
+        LabelPoint = 0,
+        After = 1,
+        OriginalBytecodePoint = 2,
+    };
+
+    enum class IncludeBranch : uint8_t {
+        No = 0,
+        Yes = 1,
+    };
+
+    struct InsertionPoint {
+        int bytecodeOffset;
+        Position position;
+
+        InsertionPoint(int offset, Position pos)
+            : bytecodeOffset(offset)
+            , position(pos)
+        {
+        }
+
+        bool operator<(const InsertionPoint& other) const
+        {
+            if (bytecodeOffset == other.bytecodeOffset)
+                return position < other.position;
+            return bytecodeOffset < other.bytecodeOffset;
+        }
+
+        bool operator==(const InsertionPoint& other) const
+        {
+            return bytecodeOffset == other.bytecodeOffset && position == other.position;
+        }
+    };
+
+private:
+    struct Insertion {
+        enum class Type : uint8_t { Insert = 0, Remove = 1, };
+
+        size_t length() const
+        {
+            if (type == Type::Remove)
+                return removeLength;
+            return instructions.size();
+        }
+
+        InsertionPoint index;
+        Type type;
+        IncludeBranch includeBranch;
+        size_t removeLength;
+        Vector instructions;
+    };
+
+public:
+    class Fragment {
+    WTF_MAKE_NONCOPYABLE(Fragment);
+    public:
+        Fragment(Vector& fragment, IncludeBranch& includeBranch)
+            : m_fragment(fragment)
+            , m_includeBranch(includeBranch)
+        {
+        }
+
+        template
+        void appendInstruction(OpcodeID opcodeID, Args... args)
+        {
+            if (isBranch(opcodeID))
+                m_includeBranch = IncludeBranch::Yes;
+
+            UnlinkedInstruction instructions[sizeof...(args) + 1] = {
+                UnlinkedInstruction(opcodeID),
+                UnlinkedInstruction(args)...
+            };
+            m_fragment.append(instructions, sizeof...(args) + 1);
+        }
+
+    private:
+        Vector& m_fragment;
+        IncludeBranch& m_includeBranch;
+    };
+
+    BytecodeRewriter(BytecodeGraph& graph)
+        : m_graph(graph)
+    {
+    }
+
+    template
+    void insertFragmentBefore(unsigned bytecodeOffset, Function function)
+    {
+        IncludeBranch includeBranch = IncludeBranch::No;
+        Vector instructions;
+        Fragment fragment(instructions, includeBranch);
+        function(fragment);
+        insertImpl(InsertionPoint(bytecodeOffset, Position::Before), includeBranch, WTFMove(instructions));
+    }
+
+    template
+    void insertFragmentAfter(unsigned bytecodeOffset, Function function)
+    {
+        IncludeBranch includeBranch = IncludeBranch::No;
+        Vector instructions;
+        Fragment fragment(instructions, includeBranch);
+        function(fragment);
+        insertImpl(InsertionPoint(bytecodeOffset, Position::After), includeBranch, WTFMove(instructions));
+    }
+
+    void removeBytecode(unsigned bytecodeOffset)
+    {
+        m_insertions.append(Insertion { InsertionPoint(bytecodeOffset, Position::OriginalBytecodePoint), Insertion::Type::Remove, IncludeBranch::No, opcodeLength(m_graph.instructions()[bytecodeOffset].u.opcode), { } });
+    }
+
+    void execute();
+
+    BytecodeGraph& graph() { return m_graph; }
+
+    int adjustAbsoluteOffset(int absoluteOffset)
+    {
+        return adjustJumpTarget(InsertionPoint(0, Position::EntryPoint), InsertionPoint(absoluteOffset, Position::LabelPoint));
+    }
+
+    int adjustJumpTarget(int originalBytecodeOffset, int originalJumpTarget)
+    {
+        return adjustJumpTarget(InsertionPoint(originalBytecodeOffset, Position::LabelPoint), InsertionPoint(originalJumpTarget, Position::LabelPoint));
+    }
+
+private:
+    void insertImpl(InsertionPoint, IncludeBranch, Vector&& fragment);
+
+    friend class UnlinkedCodeBlock;
+    void applyModification();
+    void adjustJumpTargetsInFragment(unsigned finalOffset, Insertion&);
+
+    int adjustJumpTarget(InsertionPoint startPoint, InsertionPoint jumpTargetPoint);
+    template int calculateDifference(Iterator begin, Iterator end);
+
+    BytecodeGraph& m_graph;
+    Vector m_insertions;
+};
+
+template
+inline int BytecodeRewriter::calculateDifference(Iterator begin, Iterator end)
+{
+    int result = 0;
+    for (; begin != end; ++begin) {
+        if (begin->type == Insertion::Type::Remove)
+            result -= begin->length();
+        else
+            result += begin->length();
+    }
+    return result;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h
index 45cb91a1c..99b939403 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,50 +23,45 @@
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef BytecodeUseDef_h
-#define BytecodeUseDef_h
+#pragma once
 
 #include "CodeBlock.h"
+#include "Interpreter.h"
 
 namespace JSC {
 
-template
-void computeUsesForBytecodeOffset(
-    CodeBlock* codeBlock, unsigned bytecodeOffset, Functor& functor)
+template
+void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor)
 {
-    Interpreter* interpreter = codeBlock->vm()->interpreter;
-    Instruction* instructionsBegin = codeBlock->instructions().begin();
-    Instruction* instruction = &instructionsBegin[bytecodeOffset];
-    OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode);
+    if (opcodeID != op_enter && codeBlock->wasCompiledWithDebuggingOpcodes() && codeBlock->scopeRegister().isValid())
+        functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset());
+
     switch (opcodeID) {
     // No uses.
     case op_new_regexp:
     case op_new_array_buffer:
     case op_throw_static_error:
     case op_debug:
-    case op_resolve_scope:
-    case op_pop_scope:
     case op_jneq_ptr:
-    case op_new_func_exp:
     case op_loop_hint:
     case op_jmp:
     case op_new_object:
-    case op_init_lazy_reg:
-    case op_get_callee:
     case op_enter:
+    case op_argument_count:
     case op_catch:
-    case op_touch_entry:
+    case op_profile_control_flow:
+    case op_create_direct_arguments:
+    case op_create_cloned_arguments:
+    case op_get_rest_length:
+    case op_watchdog:
+    case op_get_argument:
         return;
-    case op_new_func:
-    case op_new_captured_func:
-    case op_create_activation: 
-    case op_create_arguments:
+    case op_assert:
+    case op_get_scope:
     case op_to_this:
-    case op_tear_off_activation:
-    case op_profile_will_call:
-    case op_profile_did_call:
+    case op_check_tdz:
+    case op_profile_type:
     case op_throw:
-    case op_push_with_scope:
     case op_end:
     case op_ret:
     case op_jtrue:
@@ -74,11 +69,12 @@ void computeUsesForBytecodeOffset(
     case op_jeq_null:
     case op_jneq_null:
     case op_dec:
-    case op_inc: {
+    case op_inc:
+    case op_log_shadow_chicken_prologue: {
+        ASSERT(opcodeLengths[opcodeID] > 1);
         functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
         return;
     }
-    case op_ret_object_or_this:
     case op_jlesseq:
     case op_jgreater:
     case op_jgreatereq:
@@ -86,91 +82,146 @@ void computeUsesForBytecodeOffset(
     case op_jnlesseq:
     case op_jngreater:
     case op_jngreatereq:
-    case op_jless: {
+    case op_jless:
+    case op_set_function_name:
+    case op_log_shadow_chicken_tail: {
+        ASSERT(opcodeLengths[opcodeID] > 2);
         functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         return;
     }
     case op_put_by_val_direct:
     case op_put_by_val: {
+        ASSERT(opcodeLengths[opcodeID] > 3);
         functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
         return;
     }
     case op_put_by_index:
-    case op_put_by_id_replace:
-    case op_put_by_id_transition:
-    case op_put_by_id_transition_direct:
-    case op_put_by_id_transition_direct_out_of_line:
-    case op_put_by_id_transition_normal:
-    case op_put_by_id_transition_normal_out_of_line:
-    case op_put_by_id_generic:
-    case op_put_by_id_out_of_line:
     case op_put_by_id:
-    case op_put_to_scope: {
+    case op_put_to_scope:
+    case op_put_to_arguments: {
+        ASSERT(opcodeLengths[opcodeID] > 3);
         functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
         return;
     }
-    case op_put_getter_setter: {
+    case op_put_by_id_with_this: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
         functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_put_by_val_with_this: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
         return;
     }
-    case op_init_global_const_nop:
-    case op_init_global_const:
-    case op_push_name_scope:
+    case op_put_getter_by_id:
+    case op_put_setter_by_id: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_put_getter_setter_by_id: {
+        ASSERT(opcodeLengths[opcodeID] > 5);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
+        return;
+    }
+    case op_put_getter_by_val:
+    case op_put_setter_by_val: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_define_data_property: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_define_accessor_property: {
+        ASSERT(opcodeLengths[opcodeID] > 5);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
+        return;
+    }
+    case op_spread:
+    case op_get_property_enumerator:
+    case op_get_enumerable_length:
+    case op_new_func_exp:
+    case op_new_generator_func_exp:
+    case op_new_async_func_exp:
+    case op_to_index_string:
+    case op_create_lexical_environment:
+    case op_resolve_scope:
     case op_get_from_scope:
     case op_to_primitive:
+    case op_try_get_by_id:
     case op_get_by_id:
-    case op_get_by_id_out_of_line:
-    case op_get_by_id_self:
-    case op_get_by_id_proto:
-    case op_get_by_id_chain:
-    case op_get_by_id_getter_self:
-    case op_get_by_id_getter_proto:
-    case op_get_by_id_getter_chain:
-    case op_get_by_id_custom_self:
-    case op_get_by_id_custom_proto:
-    case op_get_by_id_custom_chain:
-    case op_get_by_id_generic:
+    case op_get_by_id_proto_load:
+    case op_get_by_id_unset:
     case op_get_array_length:
-    case op_get_string_length:
-    case op_get_arguments_length:
     case op_typeof:
+    case op_is_empty:
     case op_is_undefined:
     case op_is_boolean:
     case op_is_number:
-    case op_is_string:
     case op_is_object:
+    case op_is_object_or_null:
+    case op_is_cell_with_type:
     case op_is_function:
     case op_to_number:
+    case op_to_string:
     case op_negate:
     case op_neq_null:
     case op_eq_null:
     case op_not:
     case op_mov:
-    case op_captured_mov:
     case op_new_array_with_size:
     case op_create_this:
-    case op_get_pnames:
     case op_del_by_id:
-    case op_unsigned: {
+    case op_unsigned:
+    case op_new_func:
+    case op_new_generator_func:
+    case op_new_async_func:
+    case op_get_parent_scope:
+    case op_create_scoped_arguments:
+    case op_create_rest:
+    case op_get_from_arguments: {
+        ASSERT(opcodeLengths[opcodeID] > 2);
         functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         return;
     }
+    case op_has_generic_property:
+    case op_has_indexed_property:
+    case op_enumerator_structure_pname:
+    case op_enumerator_generic_pname:
     case op_get_by_val:
-    case op_get_argument_by_val:
     case op_in:
+    case op_overrides_has_instance:
     case op_instanceof:
-    case op_check_has_instance:
     case op_add:
     case op_mul:
     case op_div:
     case op_mod:
     case op_sub:
+    case op_pow:
     case op_lshift:
     case op_rshift:
     case op_urshift:
@@ -185,38 +236,49 @@ void computeUsesForBytecodeOffset(
     case op_stricteq:
     case op_neq:
     case op_eq:
-    case op_del_by_val: {
+    case op_push_with_scope:
+    case op_get_by_id_with_this:
+    case op_del_by_val:
+    case op_tail_call_forward_arguments: {
+        ASSERT(opcodeLengths[opcodeID] > 3);
         functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
         return;
     }
-    case op_call_varargs: {
+    case op_get_by_val_with_this: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
         functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
         return;
     }
-    case op_next_pname: {
+    case op_instanceof_custom:
+    case op_has_structure_property:
+    case op_construct_varargs:
+    case op_call_varargs:
+    case op_tail_call_varargs: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
         functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
-        functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
         return;
     }
-    case op_get_by_pname: {
+    case op_get_direct_pname: {
+        ASSERT(opcodeLengths[opcodeID] > 5);
         functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
-        functor(codeBlock, instruction, opcodeID, instruction[6].u.operand);
         return;
     }
     case op_switch_string:
     case op_switch_char:
     case op_switch_imm: {
+        ASSERT(opcodeLengths[opcodeID] > 3);
         functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
         return;
     }
+    case op_new_array_with_spread:
     case op_new_array:
     case op_strcat: {
         int base = instruction[2].u.operand;
@@ -227,19 +289,21 @@ void computeUsesForBytecodeOffset(
     }
     case op_construct:
     case op_call_eval:
-    case op_call: {
+    case op_call:
+    case op_tail_call: {
         functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         int argCount = instruction[3].u.operand;
         int registerOffset = -instruction[4].u.operand;
         int lastArg = registerOffset + CallFrame::thisArgumentOffset();
-        for (int i = opcodeID == op_construct ? 1 : 0; i < argCount; i++)
+        for (int i = 0; i < argCount; i++)
             functor(codeBlock, instruction, opcodeID, lastArg + i);
+        if (opcodeID == op_call_eval)
+            functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset());
         return;
     }
-    case op_tear_off_arguments: {
+    case op_yield: {
         functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
-        functor(codeBlock, instruction, opcodeID, unmodifiedArgumentsRegister(VirtualRegister(instruction[1].u.operand)).offset());
-        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
         return;
     }
     default:
@@ -248,29 +312,18 @@ void computeUsesForBytecodeOffset(
     }
 }
 
-template
-void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, Functor& functor)
+template
+void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor)
 {
-    Interpreter* interpreter = codeBlock->vm()->interpreter;
-    Instruction* instructionsBegin = codeBlock->instructions().begin();
-    Instruction* instruction = &instructionsBegin[bytecodeOffset];
-    OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode);
     switch (opcodeID) {
     // These don't define anything.
-    case op_init_global_const:
-    case op_init_global_const_nop:
-    case op_push_name_scope:
-    case op_push_with_scope:
     case op_put_to_scope:
-    case op_pop_scope:
     case op_end:
-    case op_profile_will_call:
-    case op_profile_did_call:
     case op_throw:
     case op_throw_static_error:
+    case op_assert:
     case op_debug:
     case op_ret:
-    case op_ret_object_or_this:
     case op_jmp:
     case op_jtrue:
     case op_jfalse:
@@ -290,79 +343,99 @@ void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset,
     case op_switch_char:
     case op_switch_string:
     case op_put_by_id:
-    case op_put_by_id_out_of_line:
-    case op_put_by_id_replace:
-    case op_put_by_id_transition:
-    case op_put_by_id_transition_direct:
-    case op_put_by_id_transition_direct_out_of_line:
-    case op_put_by_id_transition_normal:
-    case op_put_by_id_transition_normal_out_of_line:
-    case op_put_by_id_generic:
-    case op_put_getter_setter:
+    case op_put_by_id_with_this:
+    case op_put_by_val_with_this:
+    case op_put_getter_by_id:
+    case op_put_setter_by_id:
+    case op_put_getter_setter_by_id:
+    case op_put_getter_by_val:
+    case op_put_setter_by_val:
     case op_put_by_val:
     case op_put_by_val_direct:
     case op_put_by_index:
-    case op_tear_off_arguments:
-    case op_touch_entry:
+    case op_define_data_property:
+    case op_define_accessor_property:
+    case op_profile_type:
+    case op_profile_control_flow:
+    case op_put_to_arguments:
+    case op_set_function_name:
+    case op_watchdog:
+    case op_log_shadow_chicken_prologue:
+    case op_log_shadow_chicken_tail:
+    case op_yield:
 #define LLINT_HELPER_OPCODES(opcode, length) case opcode:
         FOR_EACH_LLINT_OPCODE_EXTENSION(LLINT_HELPER_OPCODES);
 #undef LLINT_HELPER_OPCODES
         return;
     // These all have a single destination for the first argument.
-    case op_next_pname:
+    case op_argument_count:
+    case op_to_index_string:
+    case op_get_enumerable_length:
+    case op_has_indexed_property:
+    case op_has_structure_property:
+    case op_has_generic_property:
+    case op_get_direct_pname:
+    case op_get_property_enumerator:
+    case op_enumerator_structure_pname:
+    case op_enumerator_generic_pname:
+    case op_get_parent_scope:
+    case op_push_with_scope:
+    case op_create_lexical_environment:
     case op_resolve_scope:
     case op_strcat:
-    case op_tear_off_activation:
     case op_to_primitive:
-    case op_catch:
     case op_create_this:
     case op_new_array:
+    case op_new_array_with_spread:
+    case op_spread:
     case op_new_array_buffer:
     case op_new_array_with_size:
     case op_new_regexp:
     case op_new_func:
-    case op_new_captured_func:
     case op_new_func_exp:
+    case op_new_generator_func:
+    case op_new_generator_func_exp:
+    case op_new_async_func:
+    case op_new_async_func_exp:
     case op_call_varargs:
+    case op_tail_call_varargs:
+    case op_tail_call_forward_arguments:
+    case op_construct_varargs:
     case op_get_from_scope:
     case op_call:
+    case op_tail_call:
     case op_call_eval:
     case op_construct:
+    case op_try_get_by_id:
     case op_get_by_id:
-    case op_get_by_id_out_of_line:
-    case op_get_by_id_self:
-    case op_get_by_id_proto:
-    case op_get_by_id_chain:
-    case op_get_by_id_getter_self:
-    case op_get_by_id_getter_proto:
-    case op_get_by_id_getter_chain:
-    case op_get_by_id_custom_self:
-    case op_get_by_id_custom_proto:
-    case op_get_by_id_custom_chain:
-    case op_get_by_id_generic:
+    case op_get_by_id_proto_load:
+    case op_get_by_id_unset:
+    case op_get_by_id_with_this:
+    case op_get_by_val_with_this:
     case op_get_array_length:
-    case op_get_string_length:
-    case op_check_has_instance:
+    case op_overrides_has_instance:
     case op_instanceof:
+    case op_instanceof_custom:
     case op_get_by_val:
-    case op_get_argument_by_val:
-    case op_get_by_pname:
-    case op_get_arguments_length:
     case op_typeof:
+    case op_is_empty:
     case op_is_undefined:
     case op_is_boolean:
     case op_is_number:
-    case op_is_string:
     case op_is_object:
+    case op_is_object_or_null:
+    case op_is_cell_with_type:
     case op_is_function:
     case op_in:
     case op_to_number:
+    case op_to_string:
     case op_negate:
     case op_add:
     case op_mul:
     case op_div:
     case op_mod:
     case op_sub:
+    case op_pow:
     case op_lshift:
     case op_rshift:
     case op_urshift:
@@ -383,33 +456,36 @@ void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset,
     case op_eq_null:
     case op_not:
     case op_mov:
-    case op_captured_mov:
     case op_new_object:
     case op_to_this:
-    case op_get_callee:
-    case op_init_lazy_reg:
-    case op_create_activation:
-    case op_create_arguments:
+    case op_check_tdz:
+    case op_get_scope:
+    case op_create_direct_arguments:
+    case op_create_scoped_arguments:
+    case op_create_cloned_arguments:
     case op_del_by_id:
     case op_del_by_val:
-    case op_unsigned: {
+    case op_unsigned:
+    case op_get_from_arguments: 
+    case op_get_argument:
+    case op_create_rest:
+    case op_get_rest_length: {
+        ASSERT(opcodeLengths[opcodeID] > 1);
         functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
         return;
     }
-    case op_get_pnames: {
+    case op_catch: {
+        ASSERT(opcodeLengths[opcodeID] > 2);
         functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
-        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
-        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         return;
     }
     case op_enter: {
         for (unsigned i = codeBlock->m_numVars; i--;)
             functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(i).offset());
         return;
-    } }
+    }
+    }
 }
 
 } // namespace JSC
-
-#endif // BytecodeUseDef_h
-
diff --git a/Source/JavaScriptCore/bytecode/CallEdge.cpp b/Source/JavaScriptCore/bytecode/CallEdge.cpp
new file mode 100644
index 000000000..dffff6dfd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallEdge.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CallEdge.h"
+
+namespace JSC {
+
+void CallEdge::dump(PrintStream& out) const
+{
+    out.print("<", m_callee, ", count: ", m_count, ">");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/CallEdge.h b/Source/JavaScriptCore/bytecode/CallEdge.h
new file mode 100644
index 000000000..8c7abbcb8
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallEdge.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CallVariant.h"
+
+namespace JSC {
+
+class CallEdge {
+public:
+    CallEdge();
+    CallEdge(CallVariant, uint32_t);
+    
+    bool operator!() const { return !m_callee; }
+    
+    CallVariant callee() const { return m_callee; }
+    uint32_t count() const { return m_count; }
+    
+    CallEdge despecifiedClosure() const
+    {
+        return CallEdge(m_callee.despecifiedClosure(), m_count);
+    }
+    
+    void dump(PrintStream&) const;
+    
+private:
+    CallVariant m_callee;
+    uint32_t m_count;
+};
+
+inline CallEdge::CallEdge(CallVariant callee, uint32_t count)
+    : m_callee(callee)
+    , m_count(count)
+{
+}
+
+inline CallEdge::CallEdge()
+    : CallEdge(CallVariant(), 0)
+{
+}
+
+typedef Vector CallEdgeList;
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
index a4baa6100..7ffda05f4 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2014, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -26,35 +26,248 @@
 #include "config.h"
 #include "CallLinkInfo.h"
 
+#include "CallFrameShuffleData.h"
 #include "DFGOperations.h"
 #include "DFGThunks.h"
-#include "RepatchBuffer.h"
+#include "FunctionCodeBlock.h"
+#include "JSCInlines.h"
+#include "MacroAssembler.h"
+#include "Opcode.h"
+#include "Repatch.h"
+#include 
 
 #if ENABLE(JIT)
 namespace JSC {
 
-void CallLinkInfo::unlink(VM& vm, RepatchBuffer& repatchBuffer)
+CallLinkInfo::CallType CallLinkInfo::callTypeFor(OpcodeID opcodeID)
 {
-    ASSERT(isLinked());
+    if (opcodeID == op_call || opcodeID == op_call_eval)
+        return Call;
+    if (opcodeID == op_call_varargs)
+        return CallVarargs;
+    if (opcodeID == op_construct)
+        return Construct;
+    if (opcodeID == op_construct_varargs)
+        return ConstructVarargs;
+    if (opcodeID == op_tail_call)
+        return TailCall;
+    ASSERT(opcodeID == op_tail_call_varargs || op_tail_call_forward_arguments);
+    return TailCallVarargs;
+}
+
+CallLinkInfo::CallLinkInfo()
+    : m_hasSeenShouldRepatch(false)
+    , m_hasSeenClosure(false)
+    , m_clearedByGC(false)
+    , m_allowStubs(true)
+    , m_isLinked(false)
+    , m_callType(None)
+    , m_calleeGPR(255)
+    , m_maxNumArguments(0)
+    , m_slowPathCount(0)
+{
+}
+
+CallLinkInfo::~CallLinkInfo()
+{
+    clearStub();
     
-    repatchBuffer.revertJumpReplacementToBranchPtrWithPatch(RepatchBuffer::startOfBranchPtrWithPatchOnRegister(hotPathBegin), static_cast(calleeGPR), 0);
-    if (isDFG) {
-#if ENABLE(DFG_JIT)
-        repatchBuffer.relink(callReturnLocation, (callType == Construct ? vm.getCTIStub(linkConstructThunkGenerator) : vm.getCTIStub(linkCallThunkGenerator)).code());
-#else
-        RELEASE_ASSERT_NOT_REACHED();
-#endif
-    } else
-        repatchBuffer.relink(callReturnLocation, callType == Construct ? vm.getCTIStub(linkConstructThunkGenerator).code() : vm.getCTIStub(linkCallThunkGenerator).code());
-    hasSeenShouldRepatch = false;
-    callee.clear();
-    stub.clear();
-
-    // It will be on a list if the callee has a code block.
     if (isOnList())
         remove();
 }
 
+void CallLinkInfo::clearStub()
+{
+    if (!stub())
+        return;
+
+    m_stub->clearCallNodesFor(this);
+    m_stub = nullptr;
+}
+
+void CallLinkInfo::unlink(VM& vm)
+{
+    // We could be called even if we're not linked anymore because of how polymorphic calls
+    // work. Each callsite within the polymorphic call stub may separately ask us to unlink().
+    if (isLinked())
+        unlinkFor(vm, *this);
+
+    // Either we were unlinked, in which case we should not have been on any list, or we unlinked
+    // ourselves so that we're not on any list anymore.
+    RELEASE_ASSERT(!isOnList());
+}
+
+CodeLocationNearCall CallLinkInfo::callReturnLocation()
+{
+    RELEASE_ASSERT(!isDirect());
+    return CodeLocationNearCall(m_callReturnLocationOrPatchableJump, Regular);
+}
+
+CodeLocationJump CallLinkInfo::patchableJump()
+{
+    RELEASE_ASSERT(callType() == DirectTailCall);
+    return CodeLocationJump(m_callReturnLocationOrPatchableJump);
+}
+
+CodeLocationDataLabelPtr CallLinkInfo::hotPathBegin()
+{
+    RELEASE_ASSERT(!isDirect());
+    return CodeLocationDataLabelPtr(m_hotPathBeginOrSlowPathStart);
+}
+
+CodeLocationLabel CallLinkInfo::slowPathStart()
+{
+    RELEASE_ASSERT(isDirect());
+    return m_hotPathBeginOrSlowPathStart;
+}
+
+void CallLinkInfo::setCallee(VM& vm, JSCell* owner, JSFunction* callee)
+{
+    RELEASE_ASSERT(!isDirect());
+    MacroAssembler::repatchPointer(hotPathBegin(), callee);
+    m_calleeOrCodeBlock.set(vm, owner, callee);
+    m_isLinked = true;
+}
+
+void CallLinkInfo::clearCallee()
+{
+    RELEASE_ASSERT(!isDirect());
+    MacroAssembler::repatchPointer(hotPathBegin(), nullptr);
+    m_calleeOrCodeBlock.clear();
+    m_isLinked = false;
+}
+
+JSFunction* CallLinkInfo::callee()
+{
+    RELEASE_ASSERT(!isDirect());
+    return jsCast(m_calleeOrCodeBlock.get());
+}
+
+void CallLinkInfo::setCodeBlock(VM& vm, JSCell* owner, FunctionCodeBlock* codeBlock)
+{
+    RELEASE_ASSERT(isDirect());
+    m_calleeOrCodeBlock.setMayBeNull(vm, owner, codeBlock);
+    m_isLinked = true;
+}
+
+void CallLinkInfo::clearCodeBlock()
+{
+    RELEASE_ASSERT(isDirect());
+    m_calleeOrCodeBlock.clear();
+    m_isLinked = false;
+}
+
+FunctionCodeBlock* CallLinkInfo::codeBlock()
+{
+    RELEASE_ASSERT(isDirect());
+    return jsCast(m_calleeOrCodeBlock.get());
+}
+
+void CallLinkInfo::setLastSeenCallee(VM& vm, const JSCell* owner, JSFunction* callee)
+{
+    RELEASE_ASSERT(!isDirect());
+    m_lastSeenCalleeOrExecutable.set(vm, owner, callee);
+}
+
+void CallLinkInfo::clearLastSeenCallee()
+{
+    RELEASE_ASSERT(!isDirect());
+    m_lastSeenCalleeOrExecutable.clear();
+}
+
+JSFunction* CallLinkInfo::lastSeenCallee()
+{
+    RELEASE_ASSERT(!isDirect());
+    return jsCast(m_lastSeenCalleeOrExecutable.get());
+}
+
+bool CallLinkInfo::haveLastSeenCallee()
+{
+    RELEASE_ASSERT(!isDirect());
+    return !!m_lastSeenCalleeOrExecutable;
+}
+
+void CallLinkInfo::setExecutableDuringCompilation(ExecutableBase* executable)
+{
+    RELEASE_ASSERT(isDirect());
+    m_lastSeenCalleeOrExecutable.setWithoutWriteBarrier(executable);
+}
+
+ExecutableBase* CallLinkInfo::executable()
+{
+    RELEASE_ASSERT(isDirect());
+    return jsCast(m_lastSeenCalleeOrExecutable.get());
+}
+
+void CallLinkInfo::setMaxNumArguments(unsigned value)
+{
+    RELEASE_ASSERT(isDirect());
+    RELEASE_ASSERT(value);
+    m_maxNumArguments = value;
+}
+
+void CallLinkInfo::visitWeak(VM& vm)
+{
+    auto handleSpecificCallee = [&] (JSFunction* callee) {
+        if (Heap::isMarked(callee->executable()))
+            m_hasSeenClosure = true;
+        else
+            m_clearedByGC = true;
+    };
+    
+    if (isLinked()) {
+        if (stub()) {
+            if (!stub()->visitWeak(vm)) {
+                if (Options::verboseOSR()) {
+                    dataLog(
+                        "Clearing closure call to ",
+                        listDump(stub()->variants()), ", stub routine ", RawPointer(stub()),
+                        ".\n");
+                }
+                unlink(vm);
+                m_clearedByGC = true;
+            }
+        } else if (!Heap::isMarked(m_calleeOrCodeBlock.get())) {
+            if (isDirect()) {
+                if (Options::verboseOSR()) {
+                    dataLog(
+                        "Clearing call to ", RawPointer(codeBlock()), " (",
+                        pointerDump(codeBlock()), ").\n");
+                }
+            } else {
+                if (Options::verboseOSR()) {
+                    dataLog(
+                        "Clearing call to ",
+                        RawPointer(callee()), " (",
+                        callee()->executable()->hashFor(specializationKind()),
+                        ").\n");
+                }
+                handleSpecificCallee(callee());
+            }
+            unlink(vm);
+        } else if (isDirect() && !Heap::isMarked(m_lastSeenCalleeOrExecutable.get())) {
+            if (Options::verboseOSR()) {
+                dataLog(
+                    "Clearing call to ", RawPointer(executable()),
+                    " because the executable is dead.\n");
+            }
+            unlink(vm);
+            // We should only get here once the owning CodeBlock is dying, since the executable must
+            // already be in the owner's weak references.
+            m_lastSeenCalleeOrExecutable.clear();
+        }
+    }
+    if (!isDirect() && haveLastSeenCallee() && !Heap::isMarked(lastSeenCallee())) {
+        handleSpecificCallee(lastSeenCallee());
+        clearLastSeenCallee();
+    }
+}
+
+void CallLinkInfo::setFrameShuffleData(const CallFrameShuffleData& shuffleData)
+{
+    m_frameShuffleData = std::make_unique(shuffleData);
+}
+
 } // namespace JSC
 #endif // ENABLE(JIT)
 
diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.h b/Source/JavaScriptCore/bytecode/CallLinkInfo.h
index 0244497df..91d3dd8f7 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkInfo.h
+++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,94 +23,341 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef CallLinkInfo_h
-#define CallLinkInfo_h
+#pragma once
 
-#include "ClosureCallStubRoutine.h"
+#include "CallMode.h"
 #include "CodeLocation.h"
 #include "CodeSpecializationKind.h"
-#include "JITWriteBarrier.h"
-#include "JSFunction.h"
-#include "Opcode.h"
+#include "PolymorphicCallStubRoutine.h"
 #include "WriteBarrier.h"
-#include 
 #include 
 
 namespace JSC {
 
 #if ENABLE(JIT)
 
-class RepatchBuffer;
+class FunctionCodeBlock;
+class JSFunction;
+enum OpcodeID : unsigned;
+struct CallFrameShuffleData;
 
-struct CallLinkInfo : public BasicRawSentinelNode {
-    enum CallType { None, Call, CallVarargs, Construct };
-    static CallType callTypeFor(OpcodeID opcodeID)
+class CallLinkInfo : public BasicRawSentinelNode {
+public:
+    enum CallType {
+        None,
+        Call,
+        CallVarargs,
+        Construct,
+        ConstructVarargs,
+        TailCall,
+        TailCallVarargs,
+        DirectCall,
+        DirectConstruct,
+        DirectTailCall
+    };
+    
+    static CallType callTypeFor(OpcodeID opcodeID);
+
+    static bool isVarargsCallType(CallType callType)
     {
-        if (opcodeID == op_call || opcodeID == op_call_eval)
-            return Call;
-        if (opcodeID == op_construct)
-            return Construct;
-        ASSERT(opcodeID == op_call_varargs);
-        return CallVarargs;
+        switch (callType) {
+        case CallVarargs:
+        case ConstructVarargs:
+        case TailCallVarargs:
+            return true;
+
+        default:
+            return false;
+        }
     }
+
+    CallLinkInfo();
         
-    CallLinkInfo()
-        : hasSeenShouldRepatch(false)
-        , isDFG(false)
-        , hasSeenClosure(false)
-        , callType(None)
+    ~CallLinkInfo();
+    
+    static CodeSpecializationKind specializationKindFor(CallType callType)
     {
+        return specializationFromIsConstruct(callType == Construct || callType == ConstructVarargs || callType == DirectConstruct);
     }
-        
-    ~CallLinkInfo()
+    CodeSpecializationKind specializationKind() const
     {
-        if (isOnList())
-            remove();
+        return specializationKindFor(static_cast(m_callType));
     }
     
-    CodeSpecializationKind specializationKind() const
+    static CallMode callModeFor(CallType callType)
+    {
+        switch (callType) {
+        case Call:
+        case CallVarargs:
+        case DirectCall:
+            return CallMode::Regular;
+        case TailCall:
+        case TailCallVarargs:
+        case DirectTailCall:
+            return CallMode::Tail;
+        case Construct:
+        case ConstructVarargs:
+        case DirectConstruct:
+            return CallMode::Construct;
+        case None:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    static bool isDirect(CallType callType)
+    {
+        switch (callType) {
+        case DirectCall:
+        case DirectTailCall:
+        case DirectConstruct:
+            return true;
+        case Call:
+        case CallVarargs:
+        case TailCall:
+        case TailCallVarargs:
+        case Construct:
+        case ConstructVarargs:
+            return false;
+        case None:
+            RELEASE_ASSERT_NOT_REACHED();
+            return false;
+        }
+
+        RELEASE_ASSERT_NOT_REACHED();
+        return false;
+    }
+    
+    CallMode callMode() const
+    {
+        return callModeFor(static_cast(m_callType));
+    }
+
+    bool isDirect()
+    {
+        return isDirect(static_cast(m_callType));
+    }
+
+    bool isTailCall() const
+    {
+        return callMode() == CallMode::Tail;
+    }
+    
+    NearCallMode nearCallMode() const
+    {
+        return isTailCall() ? Tail : Regular;
+    }
+
+    bool isVarargs() const
+    {
+        return isVarargsCallType(static_cast(m_callType));
+    }
+
+    bool isLinked() { return m_stub || m_calleeOrCodeBlock; }
+    void unlink(VM&);
+
+    void setUpCall(CallType callType, CodeOrigin codeOrigin, unsigned calleeGPR)
+    {
+        m_callType = callType;
+        m_codeOrigin = codeOrigin;
+        m_calleeGPR = calleeGPR;
+    }
+
+    void setCallLocations(
+        CodeLocationLabel callReturnLocationOrPatchableJump,
+        CodeLocationLabel hotPathBeginOrSlowPathStart,
+        CodeLocationNearCall hotPathOther)
     {
-        return specializationFromIsConstruct(callType == Construct);
+        m_callReturnLocationOrPatchableJump = callReturnLocationOrPatchableJump;
+        m_hotPathBeginOrSlowPathStart = hotPathBeginOrSlowPathStart;
+        m_hotPathOther = hotPathOther;
     }
 
-    CodeLocationNearCall callReturnLocation;
-    CodeLocationDataLabelPtr hotPathBegin;
-    CodeLocationNearCall hotPathOther;
-    JITWriteBarrier callee;
-    WriteBarrier lastSeenCallee;
-    RefPtr stub;
-    bool hasSeenShouldRepatch : 1;
-    bool isDFG : 1;
-    bool hasSeenClosure : 1;
-    unsigned callType : 5; // CallType
-    unsigned calleeGPR : 8;
-    CodeOrigin codeOrigin;
+    bool allowStubs() const { return m_allowStubs; }
+
+    void disallowStubs()
+    {
+        m_allowStubs = false;
+    }
+
+    CodeLocationNearCall callReturnLocation();
+    CodeLocationJump patchableJump();
+    CodeLocationDataLabelPtr hotPathBegin();
+    CodeLocationLabel slowPathStart();
+
+    CodeLocationNearCall hotPathOther()
+    {
+        return m_hotPathOther;
+    }
+
+    void setCallee(VM&, JSCell*, JSFunction* callee);
+    void clearCallee();
+    JSFunction* callee();
+
+    void setCodeBlock(VM&, JSCell*, FunctionCodeBlock*);
+    void clearCodeBlock();
+    FunctionCodeBlock* codeBlock();
+
+    void setLastSeenCallee(VM& vm, const JSCell* owner, JSFunction* callee);
+    void clearLastSeenCallee();
+    JSFunction* lastSeenCallee();
+    bool haveLastSeenCallee();
+    
+    void setExecutableDuringCompilation(ExecutableBase*);
+    ExecutableBase* executable();
+    
+    void setStub(Ref&& newStub)
+    {
+        clearStub();
+        m_stub = WTFMove(newStub);
+    }
+
+    void clearStub();
+
+    PolymorphicCallStubRoutine* stub()
+    {
+        return m_stub.get();
+    }
+
+    void setSlowStub(Ref&& newSlowStub)
+    {
+        m_slowStub = WTFMove(newSlowStub);
+    }
+
+    void clearSlowStub()
+    {
+        m_slowStub = nullptr;
+    }
 
-    bool isLinked() { return stub || callee; }
-    void unlink(VM&, RepatchBuffer&);
+    JITStubRoutine* slowStub()
+    {
+        return m_slowStub.get();
+    }
 
     bool seenOnce()
     {
-        return hasSeenShouldRepatch;
+        return m_hasSeenShouldRepatch;
+    }
+
+    void clearSeen()
+    {
+        m_hasSeenShouldRepatch = false;
     }
 
     void setSeen()
     {
-        hasSeenShouldRepatch = true;
+        m_hasSeenShouldRepatch = true;
+    }
+
+    bool hasSeenClosure()
+    {
+        return m_hasSeenClosure;
+    }
+
+    void setHasSeenClosure()
+    {
+        m_hasSeenClosure = true;
+    }
+
+    bool clearedByGC()
+    {
+        return m_clearedByGC;
+    }
+
+    void setCallType(CallType callType)
+    {
+        m_callType = callType;
+    }
+
+    CallType callType()
+    {
+        return static_cast(m_callType);
+    }
+
+    uint32_t* addressOfMaxNumArguments()
+    {
+        return &m_maxNumArguments;
+    }
+
+    uint32_t maxNumArguments()
+    {
+        return m_maxNumArguments;
+    }
+    
+    void setMaxNumArguments(unsigned);
+
+    static ptrdiff_t offsetOfSlowPathCount()
+    {
+        return OBJECT_OFFSETOF(CallLinkInfo, m_slowPathCount);
+    }
+
+    void setCalleeGPR(unsigned calleeGPR)
+    {
+        m_calleeGPR = calleeGPR;
+    }
+
+    unsigned calleeGPR()
+    {
+        return m_calleeGPR;
     }
+
+    uint32_t slowPathCount()
+    {
+        return m_slowPathCount;
+    }
+
+    void setCodeOrigin(CodeOrigin codeOrigin)
+    {
+        m_codeOrigin = codeOrigin;
+    }
+
+    CodeOrigin codeOrigin()
+    {
+        return m_codeOrigin;
+    }
+
+    void visitWeak(VM&);
+
+    void setFrameShuffleData(const CallFrameShuffleData&);
+
+    const CallFrameShuffleData* frameShuffleData()
+    {
+        return m_frameShuffleData.get();
+    }
+
+private:
+    CodeLocationLabel m_callReturnLocationOrPatchableJump;
+    CodeLocationLabel m_hotPathBeginOrSlowPathStart;
+    CodeLocationNearCall m_hotPathOther;
+    WriteBarrier m_calleeOrCodeBlock;
+    WriteBarrier m_lastSeenCalleeOrExecutable;
+    RefPtr m_stub;
+    RefPtr m_slowStub;
+    std::unique_ptr m_frameShuffleData;
+    bool m_hasSeenShouldRepatch : 1;
+    bool m_hasSeenClosure : 1;
+    bool m_clearedByGC : 1;
+    bool m_allowStubs : 1;
+    bool m_isLinked : 1;
+    unsigned m_callType : 4; // CallType
+    unsigned m_calleeGPR : 8;
+    uint32_t m_maxNumArguments; // For varargs: the profiled maximum number of arguments. For direct: the number of stack slots allocated for arguments.
+    uint32_t m_slowPathCount;
+    CodeOrigin m_codeOrigin;
 };
 
-inline void* getCallLinkInfoReturnLocation(CallLinkInfo* callLinkInfo)
+inline CodeOrigin getCallLinkInfoCodeOrigin(CallLinkInfo& callLinkInfo)
 {
-    return callLinkInfo->callReturnLocation.executableAddress();
+    return callLinkInfo.codeOrigin();
 }
 
-inline unsigned getCallLinkInfoBytecodeIndex(CallLinkInfo* callLinkInfo)
-{
-    return callLinkInfo->codeOrigin.bytecodeIndex;
-}
+typedef HashMap CallLinkInfoMap;
+
+#else // ENABLE(JIT)
+
+typedef HashMap CallLinkInfoMap;
+
 #endif // ENABLE(JIT)
 
 } // namespace JSC
-
-#endif // CallLinkInfo_h
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
index b64c967e9..cbc555df1 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -26,103 +26,312 @@
 #include "config.h"
 #include "CallLinkStatus.h"
 
+#include "CallLinkInfo.h"
 #include "CodeBlock.h"
+#include "DFGJITCode.h"
+#include "InlineCallFrame.h"
+#include "Interpreter.h"
 #include "LLIntCallLinkInfo.h"
-#include "Operations.h"
+#include "JSCInlines.h"
 #include 
+#include 
 
 namespace JSC {
 
+static const bool verbose = false;
+
 CallLinkStatus::CallLinkStatus(JSValue value)
-    : m_callTarget(value)
-    , m_executable(0)
-    , m_structure(0)
-    , m_couldTakeSlowPath(false)
+    : m_couldTakeSlowPath(false)
     , m_isProved(false)
 {
-    if (!value || !value.isCell())
-        return;
-    
-    m_structure = value.asCell()->structure();
-    
-    if (!value.asCell()->inherits(JSFunction::info()))
+    if (!value || !value.isCell()) {
+        m_couldTakeSlowPath = true;
         return;
+    }
     
-    m_executable = jsCast(value.asCell())->executable();
+    m_variants.append(CallVariant(value.asCell()));
 }
 
-JSFunction* CallLinkStatus::function() const
+CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
 {
-    if (!m_callTarget || !m_callTarget.isCell())
-        return 0;
-    
-    if (!m_callTarget.asCell()->inherits(JSFunction::info()))
-        return 0;
-    
-    return jsCast(m_callTarget.asCell());
-}
+    UNUSED_PARAM(profiledBlock);
+    UNUSED_PARAM(bytecodeIndex);
+#if ENABLE(DFG_JIT)
+    if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell))) {
+        // We could force this to be a closure call, but instead we'll just assume that it
+        // takes slow path.
+        return takesSlowPath();
+    }
+#else
+    UNUSED_PARAM(locker);
+#endif
 
-InternalFunction* CallLinkStatus::internalFunction() const
-{
-    if (!m_callTarget || !m_callTarget.isCell())
-        return 0;
+    VM& vm = *profiledBlock->vm();
     
-    if (!m_callTarget.asCell()->inherits(InternalFunction::info()))
-        return 0;
+    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+    OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode);
+    if (op != op_call && op != op_construct && op != op_tail_call)
+        return CallLinkStatus();
     
-    return jsCast(m_callTarget.asCell());
-}
-
-Intrinsic CallLinkStatus::intrinsicFor(CodeSpecializationKind kind) const
-{
-    if (!m_executable)
-        return NoIntrinsic;
+    LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;
     
-    return m_executable->intrinsicFor(kind);
+    return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
 }
 
-CallLinkStatus CallLinkStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+CallLinkStatus CallLinkStatus::computeFor(
+    CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map)
 {
+    ConcurrentJSLocker locker(profiledBlock->m_lock);
+    
     UNUSED_PARAM(profiledBlock);
     UNUSED_PARAM(bytecodeIndex);
-#if ENABLE(LLINT)
-    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
-    LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;
+    UNUSED_PARAM(map);
+#if ENABLE(DFG_JIT)
+    ExitSiteData exitSiteData = computeExitSiteData(locker, profiledBlock, bytecodeIndex);
     
-    return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
+    CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex));
+    if (!callLinkInfo) {
+        if (exitSiteData.takesSlowPath)
+            return takesSlowPath();
+        return computeFromLLInt(locker, profiledBlock, bytecodeIndex);
+    }
+    
+    return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData);
 #else
     return CallLinkStatus();
 #endif
 }
 
-CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData(
+    const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
 {
-    ConcurrentJITLocker locker(profiledBlock->m_lock);
+    ExitSiteData exitSiteData;
     
+#if ENABLE(DFG_JIT)
+    exitSiteData.takesSlowPath =
+        profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType))
+        || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable));
+    exitSiteData.badFunction =
+        profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell));
+#else
+    UNUSED_PARAM(locker);
     UNUSED_PARAM(profiledBlock);
     UNUSED_PARAM(bytecodeIndex);
+#endif
+    
+    return exitSiteData;
+}
+
 #if ENABLE(JIT)
-    if (!profiledBlock->hasBaselineJITProfiling())
-        return computeFromLLInt(profiledBlock, bytecodeIndex);
+CallLinkStatus CallLinkStatus::computeFor(
+    const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo)
+{
+    // We don't really need this, but anytime we have to debug this code, it becomes indispensable.
+    UNUSED_PARAM(profiledBlock);
     
-    if (profiledBlock->couldTakeSlowCase(bytecodeIndex))
-        return CallLinkStatus::takesSlowPath();
+    CallLinkStatus result = computeFromCallLinkInfo(locker, callLinkInfo);
+    result.m_maxNumArguments = callLinkInfo.maxNumArguments();
+    return result;
+}
+
+CallLinkStatus CallLinkStatus::computeFromCallLinkInfo(
+    const ConcurrentJSLocker&, CallLinkInfo& callLinkInfo)
+{
+    if (callLinkInfo.clearedByGC())
+        return takesSlowPath();
+    
+    // Note that despite requiring that the locker is held, this code is racy with respect
+    // to the CallLinkInfo: it may get cleared while this code runs! This is because
+    // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns
+    // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns
+    // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock()
+    // itself to figure out which lock to lock.
+    //
+    // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow
+    // path count, the stub, and the target - can all be asked racily. Stubs and targets can
+    // only be deleted at next GC, so if we load a non-null one, then it must contain data
+    // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness
+    // is probably OK for now.
     
-    CallLinkInfo& callLinkInfo = profiledBlock->getCallLinkInfo(bytecodeIndex);
-    if (callLinkInfo.stub)
-        return CallLinkStatus(callLinkInfo.stub->executable(), callLinkInfo.stub->structure());
+    // PolymorphicCallStubRoutine is a GCAwareJITStubRoutine, so if non-null, it will stay alive
+    // until next GC even if the CallLinkInfo is concurrently cleared. Also, the variants list is
+    // never mutated after the PolymorphicCallStubRoutine is instantiated. We have some conservative
+    // fencing in place to make sure that we see the variants list after construction.
+    if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) {
+        WTF::loadLoadFence();
+        
+        CallEdgeList edges = stub->edges();
+        
+        // Now that we've loaded the edges list, there are no further concurrency concerns. We will
+        // just manipulate and prune this list to our liking - mostly removing entries that are too
+        // infrequent and ensuring that it's sorted in descending order of frequency.
+        
+        RELEASE_ASSERT(edges.size());
+        
+        std::sort(
+            edges.begin(), edges.end(),
+            [] (CallEdge a, CallEdge b) {
+                return a.count() > b.count();
+            });
+        RELEASE_ASSERT(edges.first().count() >= edges.last().count());
+        
+        double totalCallsToKnown = 0;
+        double totalCallsToUnknown = callLinkInfo.slowPathCount();
+        CallVariantList variants;
+        for (size_t i = 0; i < edges.size(); ++i) {
+            CallEdge edge = edges[i];
+            // If the call is at the tail of the distribution, then we don't optimize it and we
+            // treat it as if it was a call to something unknown. We define the tail as being either
+            // a call that doesn't belong to the N most frequent callees (N =
+            // maxPolymorphicCallVariantsForInlining) or that has a total call count that is too
+            // small.
+            if (i >= Options::maxPolymorphicCallVariantsForInlining()
+                || edge.count() < Options::frequentCallThreshold())
+                totalCallsToUnknown += edge.count();
+            else {
+                totalCallsToKnown += edge.count();
+                variants.append(edge.callee());
+            }
+        }
+        
+        // Bail if we didn't find any calls that qualified.
+        RELEASE_ASSERT(!!totalCallsToKnown == !!variants.size());
+        if (variants.isEmpty())
+            return takesSlowPath();
+        
+        // We require that the distribution of callees is skewed towards a handful of common ones.
+        if (totalCallsToKnown / totalCallsToUnknown < Options::minimumCallToKnownRate())
+            return takesSlowPath();
+        
+        RELEASE_ASSERT(totalCallsToKnown);
+        RELEASE_ASSERT(variants.size());
+        
+        CallLinkStatus result;
+        result.m_variants = variants;
+        result.m_couldTakeSlowPath = !!totalCallsToUnknown;
+        result.m_isBasedOnStub = true;
+        return result;
+    }
     
-    JSFunction* target = callLinkInfo.lastSeenCallee.get();
-    if (!target)
-        return computeFromLLInt(profiledBlock, bytecodeIndex);
+    CallLinkStatus result;
+    
+    if (JSFunction* target = callLinkInfo.lastSeenCallee()) {
+        CallVariant variant(target);
+        if (callLinkInfo.hasSeenClosure())
+            variant = variant.despecifiedClosure();
+        result.m_variants.append(variant);
+    }
     
-    if (callLinkInfo.hasSeenClosure)
-        return CallLinkStatus(target->executable(), target->structure());
+    result.m_couldTakeSlowPath = !!callLinkInfo.slowPathCount();
 
-    return CallLinkStatus(target);
-#else
-    return CallLinkStatus();
+    return result;
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+    const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo,
+    ExitSiteData exitSiteData)
+{
+    CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo);
+    if (exitSiteData.badFunction) {
+        if (result.isBasedOnStub()) {
+            // If we have a polymorphic stub, then having an exit site is not quite so useful. In
+            // most cases, the information in the stub has higher fidelity.
+            result.makeClosureCall();
+        } else {
+            // We might not have a polymorphic stub for any number of reasons. When this happens, we
+            // are in less certain territory, so exit sites mean a lot.
+            result.m_couldTakeSlowPath = true;
+        }
+    }
+    if (exitSiteData.takesSlowPath)
+        result.m_couldTakeSlowPath = true;
+    
+    return result;
+}
 #endif
+
+void CallLinkStatus::computeDFGStatuses(
+    CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map)
+{
+#if ENABLE(DFG_JIT)
+    RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT);
+    CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative();
+    for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) {
+        CallLinkInfo& info = **iter;
+        if (info.isDirect()) {
+            // If the DFG was able to get a direct call then probably so will we. However, there is
+            // a remote chance that it's bad news to lose information about what the DFG did. We'd
+            // ideally like to just know that the DFG had emitted a DirectCall.
+            continue;
+        }
+        CodeOrigin codeOrigin = info.codeOrigin();
+        
+        // Check if we had already previously made a terrible mistake in the FTL for this
+        // code origin. Note that this is approximate because we could have a monovariant
+        // inline in the FTL that ended up failing. We should fix that at some point by
+        // having data structures to track the context of frequent exits. This is currently
+        // challenging because it would require creating a CodeOrigin-based database in
+        // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the
+        // InlineCallFrames.
+        CodeBlock* currentBaseline =
+            baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
+        ExitSiteData exitSiteData;
+        {
+            ConcurrentJSLocker locker(currentBaseline->m_lock);
+            exitSiteData = computeExitSiteData(
+                locker, currentBaseline, codeOrigin.bytecodeIndex);
+        }
+        
+        {
+            ConcurrentJSLocker locker(dfgCodeBlock->m_lock);
+            map.add(info.codeOrigin(), computeFor(locker, dfgCodeBlock, info, exitSiteData));
+        }
+    }
+#else
+    UNUSED_PARAM(dfgCodeBlock);
+#endif // ENABLE(DFG_JIT)
+    
+    if (verbose) {
+        dataLog("Context map:\n");
+        ContextMap::iterator iter = map.begin();
+        ContextMap::iterator end = map.end();
+        for (; iter != end; ++iter) {
+            dataLog("    ", iter->key, ":\n");
+            dataLog("        ", iter->value, "\n");
+        }
+    }
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+    CodeBlock* profiledBlock, CodeOrigin codeOrigin,
+    const CallLinkInfoMap& baselineMap, const CallLinkStatus::ContextMap& dfgMap)
+{
+    auto iter = dfgMap.find(codeOrigin);
+    if (iter != dfgMap.end())
+        return iter->value;
+    
+    return computeFor(profiledBlock, codeOrigin.bytecodeIndex, baselineMap);
+}
+
+void CallLinkStatus::setProvenConstantCallee(CallVariant variant)
+{
+    m_variants = CallVariantList{ variant };
+    m_couldTakeSlowPath = false;
+    m_isProved = true;
+}
+
+bool CallLinkStatus::isClosureCall() const
+{
+    for (unsigned i = m_variants.size(); i--;) {
+        if (m_variants[i].isClosureCall())
+            return true;
+    }
+    return false;
+}
+
+void CallLinkStatus::makeClosureCall()
+{
+    m_variants = despecifiedVariantList(m_variants);
 }
 
 void CallLinkStatus::dump(PrintStream& out) const
@@ -140,17 +349,14 @@ void CallLinkStatus::dump(PrintStream& out) const
     if (m_couldTakeSlowPath)
         out.print(comma, "Could Take Slow Path");
     
-    if (m_callTarget)
-        out.print(comma, "Known target: ", m_callTarget);
+    if (m_isBasedOnStub)
+        out.print(comma, "Based On Stub");
     
-    if (m_executable) {
-        out.print(comma, "Executable/CallHash: ", RawPointer(m_executable));
-        if (!isCompilationThread())
-            out.print("/", m_executable->hashFor(CodeForCall));
-    }
+    if (!m_variants.isEmpty())
+        out.print(comma, listDump(m_variants));
     
-    if (m_structure)
-        out.print(comma, "Structure: ", RawPointer(m_structure));
+    if (m_maxNumArguments)
+        out.print(comma, "maxNumArguments = ", m_maxNumArguments);
 }
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.h b/Source/JavaScriptCore/bytecode/CallLinkStatus.h
index 51965fe4a..353deaaf8 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.h
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,28 +23,29 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef CallLinkStatus_h
-#define CallLinkStatus_h
+#pragma once
 
+#include "CallLinkInfo.h"
+#include "CallVariant.h"
+#include "CodeOrigin.h"
 #include "CodeSpecializationKind.h"
+#include "ConcurrentJSLock.h"
+#include "ExitingJITType.h"
 #include "Intrinsic.h"
 #include "JSCJSValue.h"
 
 namespace JSC {
 
 class CodeBlock;
-class ExecutableBase;
 class InternalFunction;
 class JSFunction;
 class Structure;
+class CallLinkInfo;
 
 class CallLinkStatus {
+    WTF_MAKE_FAST_ALLOCATED;
 public:
     CallLinkStatus()
-        : m_executable(0)
-        , m_structure(0)
-        , m_couldTakeSlowPath(false)
-        , m_isProved(false)
     {
     }
     
@@ -57,78 +58,77 @@ public:
     
     explicit CallLinkStatus(JSValue);
     
-    CallLinkStatus(ExecutableBase* executable, Structure* structure)
-        : m_executable(executable)
-        , m_structure(structure)
-        , m_couldTakeSlowPath(false)
-        , m_isProved(false)
+    CallLinkStatus(CallVariant variant)
+        : m_variants(1, variant)
     {
-        ASSERT(!!executable == !!structure);
     }
     
-    CallLinkStatus& setIsProved(bool isProved)
-    {
-        m_isProved = isProved;
-        return *this;
-    }
+    static CallLinkStatus computeFor(
+        CodeBlock*, unsigned bytecodeIndex, const CallLinkInfoMap&);
+
+    struct ExitSiteData {
+        bool takesSlowPath { false };
+        bool badFunction { false };
+    };
+    static ExitSiteData computeExitSiteData(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
     
-    static CallLinkStatus computeFor(CodeBlock*, unsigned bytecodeIndex);
+#if ENABLE(JIT)
+    // Computes the status assuming that we never took slow path and never previously
+    // exited.
+    static CallLinkStatus computeFor(const ConcurrentJSLocker&, CodeBlock*, CallLinkInfo&);
+    static CallLinkStatus computeFor(
+        const ConcurrentJSLocker&, CodeBlock*, CallLinkInfo&, ExitSiteData);
+#endif
     
-    CallLinkStatus& setHasBadFunctionExitSite(bool didHaveExitSite)
-    {
-        ASSERT(!m_isProved);
-        if (didHaveExitSite) {
-            // Turn this into a closure call.
-            m_callTarget = JSValue();
-        }
-        return *this;
-    }
+    typedef HashMap ContextMap;
     
-    CallLinkStatus& setHasBadCacheExitSite(bool didHaveExitSite)
-    {
-        ASSERT(!m_isProved);
-        if (didHaveExitSite)
-            *this = takesSlowPath();
-        return *this;
-    }
+    // Computes all of the statuses of the DFG code block. Doesn't include statuses that had
+    // no information. Currently we use this when compiling FTL code, to enable polyvariant
+    // inlining.
+    static void computeDFGStatuses(CodeBlock* dfgCodeBlock, ContextMap&);
     
-    CallLinkStatus& setHasBadExecutableExitSite(bool didHaveExitSite)
-    {
-        ASSERT(!m_isProved);
-        if (didHaveExitSite)
-            *this = takesSlowPath();
-        return *this;
-    }
+    // Helper that first consults the ContextMap and then does computeFor().
+    static CallLinkStatus computeFor(
+        CodeBlock*, CodeOrigin, const CallLinkInfoMap&, const ContextMap&);
     
-    bool isSet() const { return m_callTarget || m_executable || m_couldTakeSlowPath; }
+    void setProvenConstantCallee(CallVariant);
+    
+    bool isSet() const { return !m_variants.isEmpty() || m_couldTakeSlowPath; }
     
     bool operator!() const { return !isSet(); }
     
     bool couldTakeSlowPath() const { return m_couldTakeSlowPath; }
-    bool isClosureCall() const { return m_executable && !m_callTarget; }
-    
-    JSValue callTarget() const { return m_callTarget; }
-    JSFunction* function() const;
-    InternalFunction* internalFunction() const;
-    Intrinsic intrinsicFor(CodeSpecializationKind) const;
-    ExecutableBase* executable() const { return m_executable; }
-    Structure* structure() const { return m_structure; }
+    
+    void setCouldTakeSlowPath(bool value) { m_couldTakeSlowPath = value; }
+    
+    CallVariantList variants() const { return m_variants; }
+    unsigned size() const { return m_variants.size(); }
+    CallVariant at(unsigned i) const { return m_variants[i]; }
+    CallVariant operator[](unsigned i) const { return at(i); }
     bool isProved() const { return m_isProved; }
-    bool canOptimize() const { return (m_callTarget || m_executable) && !m_couldTakeSlowPath; }
+    bool isBasedOnStub() const { return m_isBasedOnStub; }
+    bool canOptimize() const { return !m_variants.isEmpty(); }
+
+    bool isClosureCall() const; // Returns true if any callee is a closure call.
+    
+    unsigned maxNumArguments() const { return m_maxNumArguments; }
     
     void dump(PrintStream&) const;
     
 private:
-    static CallLinkStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex);
+    void makeClosureCall();
+    
+    static CallLinkStatus computeFromLLInt(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+#if ENABLE(JIT)
+    static CallLinkStatus computeFromCallLinkInfo(
+        const ConcurrentJSLocker&, CallLinkInfo&);
+#endif
     
-    JSValue m_callTarget;
-    ExecutableBase* m_executable;
-    Structure* m_structure;
-    bool m_couldTakeSlowPath;
-    bool m_isProved;
+    CallVariantList m_variants;
+    bool m_couldTakeSlowPath { false };
+    bool m_isProved { false };
+    bool m_isBasedOnStub { false };
+    unsigned m_maxNumArguments { 0 };
 };
 
 } // namespace JSC
-
-#endif // CallLinkStatus_h
-
diff --git a/Source/JavaScriptCore/bytecode/CallMode.cpp b/Source/JavaScriptCore/bytecode/CallMode.cpp
new file mode 100644
index 000000000..5757b1850
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallMode.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CallMode.h"
+
+#include 
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::CallMode callMode)
+{
+    switch (callMode) {
+    case JSC::CallMode::Tail:
+        out.print("TailCall");
+        return;
+    case JSC::CallMode::Regular:
+        out.print("Call");
+        return;
+    case JSC::CallMode::Construct:
+        out.print("Construct");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/CallMode.h b/Source/JavaScriptCore/bytecode/CallMode.h
new file mode 100644
index 000000000..02d90e1a0
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallMode.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CodeSpecializationKind.h"
+
+namespace JSC {
+
+enum class CallMode { Regular, Tail, Construct };
+
+enum FrameAction { KeepTheFrame = 0, ReuseTheFrame };
+
+inline CodeSpecializationKind specializationKindFor(CallMode callMode)
+{
+    if (callMode == CallMode::Construct)
+        return CodeForConstruct;
+
+    return CodeForCall;
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::CallMode);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h b/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h
index 3a7448efd..2d1b00cbe 100644
--- a/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h
+++ b/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h
@@ -23,10 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef CallReturnOffsetToBytecodeOffset_h
-#define CallReturnOffsetToBytecodeOffset_h
-
-#include 
+#pragma once
 
 namespace JSC {
 
@@ -55,6 +52,3 @@ inline unsigned getCallReturnOffset(CallReturnOffsetToBytecodeOffset* pc)
 #endif
 
 } // namespace JSC
-
-#endif // CallReturnOffsetToBytecodeOffset_h
-
diff --git a/Source/JavaScriptCore/bytecode/CallVariant.cpp b/Source/JavaScriptCore/bytecode/CallVariant.cpp
new file mode 100644
index 000000000..9745dde2b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallVariant.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CallVariant.h"
+
+#include "JSCInlines.h"
+#include 
+
+namespace JSC {
+
+void CallVariant::dump(PrintStream& out) const
+{
+    if (!*this) {
+        out.print("null");
+        return;
+    }
+    
+    if (InternalFunction* internalFunction = this->internalFunction()) {
+        out.print("InternalFunction: ", JSValue(internalFunction));
+        return;
+    }
+    
+    if (JSFunction* function = this->function()) {
+        out.print("(Function: ", JSValue(function), "; Executable: ", *executable(), ")");
+        return;
+    }
+    
+    out.print("Executable: ", *executable());
+}
+
+CallVariantList variantListWithVariant(const CallVariantList& list, CallVariant variantToAdd)
+{
+    ASSERT(variantToAdd);
+    CallVariantList result;
+    for (CallVariant variant : list) {
+        ASSERT(variant);
+        if (!!variantToAdd) {
+            if (variant == variantToAdd)
+                variantToAdd = CallVariant();
+            else if (variant.despecifiedClosure() == variantToAdd.despecifiedClosure()) {
+                variant = variant.despecifiedClosure();
+                variantToAdd = CallVariant();
+            }
+        }
+        result.append(variant);
+    }
+    if (!!variantToAdd)
+        result.append(variantToAdd);
+    
+    if (!ASSERT_DISABLED) {
+        for (unsigned i = 0; i < result.size(); ++i) {
+            for (unsigned j = i + 1; j < result.size(); ++j) {
+                if (result[i] != result[j])
+                    continue;
+                
+                dataLog("variantListWithVariant(", listDump(list), ", ", variantToAdd, ") failed: got duplicates in result: ", listDump(result), "\n");
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+    }
+    
+    return result;
+}
+
+CallVariantList despecifiedVariantList(const CallVariantList& list)
+{
+    CallVariantList result;
+    for (CallVariant variant : list)
+        result = variantListWithVariant(result, variant.despecifiedClosure());
+    return result;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/CallVariant.h b/Source/JavaScriptCore/bytecode/CallVariant.h
new file mode 100644
index 000000000..94e72bb32
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallVariant.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "FunctionExecutable.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "NativeExecutable.h"
+
+namespace JSC {
+
+// The CallVariant class is meant to encapsulate a callee in a way that is useful for call linking
+// and inlining. Because JavaScript has closures, and because JSC implements the notion of internal
+// non-function objects that nevertheless provide call traps, the call machinery wants to see a
+// callee in one of the following four forms:
+//
+// JSFunction callee: This means that we expect the callsite to always call a particular function
+//     instance, that is associated with a particular lexical environment. This pinpoints not
+//     just the code that will be called (i.e. the executable) but also the scope within which
+//     the code runs.
+//
+// Executable callee: This corresponds to a call to a closure. In this case, we know that the
+//     callsite will call a JSFunction, but we do not know which particular JSFunction. We do know
+//     what code will be called - i.e. we know the executable.
+//
+// InternalFunction callee: JSC supports a special kind of native functions that support bizarre
+//     semantics. These are always singletons. If we know that the callee is an InternalFunction
+//     then we know both the code that will be called and the scope; in fact the "scope" is really
+//     just the InternalFunction itself.
+//
+// Something else: It's possible call all manner of rubbish in JavaScript. This implicitly supports
+//     bizarre object callees, but it can't really tell you anything interesting about them other
+//     than the fact that they don't fall into any of the above categories.
+//
+// This class serves as a kind of union over these four things. It does so by just holding a
+// JSCell*. We determine which of the modes its in by doing type checks on the cell. Note that we
+// cannot use WriteBarrier<> here because this gets used inside the compiler.
+
+class CallVariant {
+public:
+    explicit CallVariant(JSCell* callee = nullptr)
+        : m_callee(callee)
+    {
+    }
+    
+    CallVariant(WTF::HashTableDeletedValueType)
+        : m_callee(deletedToken())
+    {
+    }
+    
+    bool operator!() const { return !m_callee; }
+    
+    // If this variant refers to a function, change it to refer to its executable.
+    ALWAYS_INLINE CallVariant despecifiedClosure() const
+    {
+        if (m_callee->type() == JSFunctionType)
+            return CallVariant(jsCast(m_callee)->executable());
+        return *this;
+    }
+    
+    JSCell* rawCalleeCell() const { return m_callee; }
+    
+    InternalFunction* internalFunction() const
+    {
+        return jsDynamicCast(*m_callee->vm(), m_callee);
+    }
+    
+    JSFunction* function() const
+    {
+        return jsDynamicCast(*m_callee->vm(), m_callee);
+    }
+    
+    bool isClosureCall() const { return !!jsDynamicCast(*m_callee->vm(), m_callee); }
+    
+    ExecutableBase* executable() const
+    {
+        if (JSFunction* function = this->function())
+            return function->executable();
+        return jsDynamicCast(*m_callee->vm(), m_callee);
+    }
+    
+    JSCell* nonExecutableCallee() const
+    {
+        RELEASE_ASSERT(!isClosureCall());
+        return m_callee;
+    }
+    
+    Intrinsic intrinsicFor(CodeSpecializationKind kind) const
+    {
+        if (ExecutableBase* executable = this->executable())
+            return executable->intrinsicFor(kind);
+        return NoIntrinsic;
+    }
+    
+    FunctionExecutable* functionExecutable() const
+    {
+        if (ExecutableBase* executable = this->executable())
+            return jsDynamicCast(*m_callee->vm(), executable);
+        return nullptr;
+    }
+
+    NativeExecutable* nativeExecutable() const
+    {
+        if (ExecutableBase* executable = this->executable())
+            return jsDynamicCast(*m_callee->vm(), executable);
+        return nullptr;
+    }
+
+    const DOMJIT::Signature* signatureFor(CodeSpecializationKind kind) const
+    {
+        if (NativeExecutable* nativeExecutable = this->nativeExecutable())
+            return nativeExecutable->signatureFor(kind);
+        return nullptr;
+    }
+    
+    void dump(PrintStream& out) const;
+    
+    bool isHashTableDeletedValue() const
+    {
+        return m_callee == deletedToken();
+    }
+    
+    bool operator==(const CallVariant& other) const
+    {
+        return m_callee == other.m_callee;
+    }
+    
+    bool operator!=(const CallVariant& other) const
+    {
+        return !(*this == other);
+    }
+    
+    bool operator<(const CallVariant& other) const
+    {
+        return m_callee < other.m_callee;
+    }
+    
+    bool operator>(const CallVariant& other) const
+    {
+        return other < *this;
+    }
+    
+    bool operator<=(const CallVariant& other) const
+    {
+        return !(*this < other);
+    }
+    
+    bool operator>=(const CallVariant& other) const
+    {
+        return other <= *this;
+    }
+    
+    unsigned hash() const
+    {
+        return WTF::PtrHash::hash(m_callee);
+    }
+    
+private:
+    static JSCell* deletedToken() { return bitwise_cast(static_cast(1)); }
+    
+    JSCell* m_callee;
+};
+
+struct CallVariantHash {
+    static unsigned hash(const CallVariant& key) { return key.hash(); }
+    static bool equal(const CallVariant& a, const CallVariant& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+typedef Vector CallVariantList;
+
+// Returns a new variant list by attempting to either append the given variant or merge it with one
+// of the variants we already have by despecifying closures.
+CallVariantList variantListWithVariant(const CallVariantList&, CallVariant);
+
+// Returns a new list where every element is despecified, and the list is deduplicated.
+CallVariantList despecifiedVariantList(const CallVariantList&);
+
+} // namespace JSC
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::CallVariantHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits { };
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index eec5b7076..44d7f83da 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2010, 2012-2017 Apple Inc. All rights reserved.
  * Copyright (C) 2008 Cameron Zwarich 
  *
  * Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -30,35 +30,60 @@
 #include "config.h"
 #include "CodeBlock.h"
 
+#include "ArithProfile.h"
+#include "BasicBlockLocation.h"
 #include "BytecodeGenerator.h"
+#include "BytecodeLivenessAnalysis.h"
 #include "BytecodeUseDef.h"
 #include "CallLinkStatus.h"
+#include "CodeBlockSet.h"
 #include "DFGCapabilities.h"
 #include "DFGCommon.h"
 #include "DFGDriver.h"
-#include "DFGNode.h"
+#include "DFGJITCode.h"
 #include "DFGWorklist.h"
 #include "Debugger.h"
+#include "EvalCodeBlock.h"
+#include "FunctionCodeBlock.h"
+#include "FunctionExecutableDump.h"
+#include "GetPutInfo.h"
+#include "InlineCallFrame.h"
 #include "Interpreter.h"
 #include "JIT.h"
-#include "JITStubs.h"
-#include "JSActivation.h"
+#include "JITMathIC.h"
+#include "JSCInlines.h"
 #include "JSCJSValue.h"
 #include "JSFunction.h"
-#include "JSNameScope.h"
+#include "JSLexicalEnvironment.h"
+#include "JSModuleEnvironment.h"
+#include "LLIntData.h"
 #include "LLIntEntrypoint.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
 #include "LowLevelInterpreter.h"
-#include "Operations.h"
-#include "PolymorphicPutByIdList.h"
+#include "ModuleProgramCodeBlock.h"
+#include "PCToCodeOriginMap.h"
+#include "PolymorphicAccess.h"
+#include "ProfilerDatabase.h"
+#include "ProgramCodeBlock.h"
 #include "ReduceWhitespace.h"
 #include "Repatch.h"
-#include "RepatchBuffer.h"
 #include "SlotVisitorInlines.h"
+#include "StackVisitor.h"
+#include "StructureStubInfo.h"
+#include "TypeLocationCache.h"
+#include "TypeProfiler.h"
 #include "UnlinkedInstructionStream.h"
+#include "VMInlines.h"
 #include 
 #include 
+#include 
 #include 
 #include 
+#include 
+
+#if ENABLE(JIT)
+#include "RegisterAtOffsetList.h"
+#endif
 
 #if ENABLE(DFG_JIT)
 #include "DFGOperations.h"
@@ -70,6 +95,11 @@
 
 namespace JSC {
 
+const ClassInfo CodeBlock::s_info = {
+    "CodeBlock", 0, 0,
+    CREATE_METHOD_TABLE(CodeBlock)
+};
+
 CString CodeBlock::inferredName() const
 {
     switch (codeType()) {
@@ -79,6 +109,8 @@ CString CodeBlock::inferredName() const
         return "";
     case FunctionCode:
         return jsCast(ownerExecutable())->inferredName().utf8();
+    case ModuleCode:
+        return "";
     default:
         CRASH();
         return CString("", 0);
@@ -99,7 +131,7 @@ CodeBlockHash CodeBlock::hash() const
 {
     if (!m_hash) {
         RELEASE_ASSERT(isSafeToComputeHash());
-        m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
+        m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
     }
     return m_hash;
 }
@@ -107,7 +139,7 @@ CodeBlockHash CodeBlock::hash() const
 CString CodeBlock::sourceCodeForTools() const
 {
     if (codeType() != FunctionCode)
-        return ownerExecutable()->source().toUTF8();
+        return ownerScriptExecutable()->source().toUTF8();
     
     SourceProvider* provider = source();
     FunctionExecutable* executable = jsCast(ownerExecutable());
@@ -119,7 +151,7 @@ CString CodeBlock::sourceCodeForTools() const
     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
     return toCString(
         "function ",
-        provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
+        provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
 }
 
 CString CodeBlock::sourceCodeOnOneLine() const
@@ -127,22 +159,42 @@ CString CodeBlock::sourceCodeOnOneLine() const
     return reduceWhitespace(sourceCodeForTools());
 }
 
-void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+CString CodeBlock::hashAsStringIfPossible() const
 {
     if (hasHash() || isSafeToComputeHash())
-        out.print(inferredName(), "#", hash(), ":[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
-    else
-        out.print(inferredName(), "#:[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
+        return toCString(hash());
+    return "";
+}
+
+void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+{
+    out.print(inferredName(), "#", hashAsStringIfPossible());
+    out.print(":[", RawPointer(this), "->");
+    if (!!m_alternative)
+        out.print(RawPointer(alternative()), "->");
+    out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
 
     if (codeType() == FunctionCode)
         out.print(specializationKind());
     out.print(", ", instructionCount());
     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
-        out.print(" (SABI)");
-    if (ownerExecutable()->neverInline())
+        out.print(" (ShouldAlwaysBeInlined)");
+    if (ownerScriptExecutable()->neverInline())
         out.print(" (NeverInline)");
-    if (ownerExecutable()->isStrictMode())
+    if (ownerScriptExecutable()->neverOptimize())
+        out.print(" (NeverOptimize)");
+    else if (ownerScriptExecutable()->neverFTLOptimize())
+        out.print(" (NeverFTLOptimize)");
+    if (ownerScriptExecutable()->didTryToEnterInLoop())
+        out.print(" (DidTryToEnterInLoop)");
+    if (ownerScriptExecutable()->isStrictMode())
         out.print(" (StrictMode)");
+    if (m_didFailJITCompilation)
+        out.print(" (JITFail)");
+    if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
+        out.print(" (FTLFail)");
+    if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
+        out.print(" (HadFTLReplacement)");
     out.print("]");
 }
 
@@ -151,11 +203,6 @@ void CodeBlock::dump(PrintStream& out) const
     dumpAssumingJITType(out, jitType());
 }
 
-static CString constantName(int k, JSValue value)
-{
-    return toCString(value, "(@k", k - FirstConstantRegisterIndex, ")");
-}
-
 static CString idName(int id0, const Identifier& ident)
 {
     return toCString(ident.impl(), "(@id", id0, ")");
@@ -163,19 +210,16 @@ static CString idName(int id0, const Identifier& ident)
 
 CString CodeBlock::registerName(int r) const
 {
-    if (r == missingThisObjectMarker())
-        return "";
-
     if (isConstantRegisterIndex(r))
-        return constantName(r, getConstant(r));
+        return constantName(r);
 
-    if (operandIsArgument(r)) {
-        if (!VirtualRegister(r).toArgument())
-            return "this";
-        return toCString("arg", VirtualRegister(r).toArgument());
-    }
+    return toCString(VirtualRegister(r));
+}
 
-    return toCString("loc", VirtualRegister(r).toLocal());
+CString CodeBlock::constantName(int index) const
+{
+    JSValue value = getConstant(index);
+    return toCString(value, "(", VirtualRegister(index), ")");
 }
 
 static CString regexpToSourceString(RegExp* regExp)
@@ -188,6 +232,10 @@ static CString regexpToSourceString(RegExp* regExp)
         postfix[index++] = 'i';
     if (regExp->multiline())
         postfix[index] = 'm';
+    if (regExp->sticky())
+        postfix[index++] = 'y';
+    if (regExp->unicode())
+        postfix[index++] = 'u';
 
     return toCString("/", regExp->pattern().impl(), postfix);
 }
@@ -197,15 +245,17 @@ static CString regexpName(int re, RegExp* regexp)
     return toCString(regexpToSourceString(regexp), "(@re", re, ")");
 }
 
-NEVER_INLINE static const char* debugHookName(int debugHookID)
+NEVER_INLINE static const char* debugHookName(int debugHookType)
 {
-    switch (static_cast(debugHookID)) {
+    switch (static_cast(debugHookType)) {
         case DidEnterCallFrame:
             return "didEnterCallFrame";
         case WillLeaveCallFrame:
             return "willLeaveCallFrame";
         case WillExecuteStatement:
             return "willExecuteStatement";
+        case WillExecuteExpression:
+            return "willExecuteExpression";
         case WillExecuteProgram:
             return "willExecuteProgram";
         case DidExecuteProgram:
@@ -251,48 +301,20 @@ void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location,
     case op_get_by_id:
         op = "get_by_id";
         break;
-    case op_get_by_id_out_of_line:
-        op = "get_by_id_out_of_line";
-        break;
-    case op_get_by_id_self:
-        op = "get_by_id_self";
-        break;
-    case op_get_by_id_proto:
-        op = "get_by_id_proto";
-        break;
-    case op_get_by_id_chain:
-        op = "get_by_id_chain";
-        break;
-    case op_get_by_id_getter_self:
-        op = "get_by_id_getter_self";
-        break;
-    case op_get_by_id_getter_proto:
-        op = "get_by_id_getter_proto";
-        break;
-    case op_get_by_id_getter_chain:
-        op = "get_by_id_getter_chain";
+    case op_get_by_id_proto_load:
+        op = "get_by_id_proto_load";
         break;
-    case op_get_by_id_custom_self:
-        op = "get_by_id_custom_self";
-        break;
-    case op_get_by_id_custom_proto:
-        op = "get_by_id_custom_proto";
-        break;
-    case op_get_by_id_custom_chain:
-        op = "get_by_id_custom_chain";
-        break;
-    case op_get_by_id_generic:
-        op = "get_by_id_generic";
+    case op_get_by_id_unset:
+        op = "get_by_id_unset";
         break;
     case op_get_array_length:
         op = "array_length";
         break;
-    case op_get_string_length:
-        op = "string_length";
-        break;
     default:
         RELEASE_ASSERT_NOT_REACHED();
+#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
         op = 0;
+#endif
     }
     int r0 = (++it)->u.operand;
     int r1 = (++it)->u.operand;
@@ -302,22 +324,19 @@ void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location,
     it += 4; // Increment up to the value profiler.
 }
 
-#if ENABLE(JIT) || ENABLE(LLINT) // unused in some configurations
-static void dumpStructure(PrintStream& out, const char* name, ExecState* exec, Structure* structure, const Identifier& ident)
+static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
 {
     if (!structure)
         return;
     
     out.printf("%s = %p", name, structure);
     
-    PropertyOffset offset = structure->getConcurrently(exec->vm(), ident.impl());
+    PropertyOffset offset = structure->getConcurrently(ident.impl());
     if (offset != invalidOffset)
         out.printf(" (offset = %d)", offset);
 }
-#endif
 
-#if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
-static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, const Identifier& ident)
+static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
 {
     out.printf("chain = %p: [", chain);
     bool first = true;
@@ -328,11 +347,10 @@ static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain,
             first = false;
         else
             out.printf(", ");
-        dumpStructure(out, "struct", exec, currentStructure->get(), ident);
+        dumpStructure(out, "struct", currentStructure->get(), ident);
     }
     out.printf("]");
 }
-#endif
 
 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
 {
@@ -342,125 +360,131 @@ void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int l
     
     UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
     
-#if ENABLE(LLINT)
     if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
         out.printf(" llint(array_length)");
-    else if (Structure* structure = instruction[4].u.structure.get()) {
+    else if (StructureID structureID = instruction[4].u.structureID) {
+        Structure* structure = m_vm->heap.structureIDTable().get(structureID);
         out.printf(" llint(");
-        dumpStructure(out, "struct", exec, structure, ident);
+        dumpStructure(out, "struct", structure, ident);
         out.printf(")");
+        if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_by_id_proto_load)
+            out.printf(" proto(%p)", instruction[6].u.pointer);
     }
-#endif
 
 #if ENABLE(JIT)
     if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
         StructureStubInfo& stubInfo = *stubPtr;
-        if (stubInfo.seen) {
-            out.printf(" jit(");
-            
-            Structure* baseStructure = 0;
-            Structure* prototypeStructure = 0;
-            StructureChain* chain = 0;
-            PolymorphicAccessStructureList* structureList = 0;
-            int listSize = 0;
-            
-            switch (stubInfo.accessType) {
-            case access_get_by_id_self:
-                out.printf("self");
-                baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
-                break;
-            case access_get_by_id_proto:
-                out.printf("proto");
-                baseStructure = stubInfo.u.getByIdProto.baseObjectStructure.get();
-                prototypeStructure = stubInfo.u.getByIdProto.prototypeStructure.get();
-                break;
-            case access_get_by_id_chain:
-                out.printf("chain");
-                baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get();
-                chain = stubInfo.u.getByIdChain.chain.get();
-                break;
-            case access_get_by_id_self_list:
-                out.printf("self_list");
-                structureList = stubInfo.u.getByIdSelfList.structureList;
-                listSize = stubInfo.u.getByIdSelfList.listSize;
-                break;
-            case access_get_by_id_proto_list:
-                out.printf("proto_list");
-                structureList = stubInfo.u.getByIdProtoList.structureList;
-                listSize = stubInfo.u.getByIdProtoList.listSize;
-                break;
-            case access_unset:
-                out.printf("unset");
-                break;
-            case access_get_by_id_generic:
-                out.printf("generic");
-                break;
-            case access_get_array_length:
-                out.printf("array_length");
-                break;
-            case access_get_string_length:
-                out.printf("string_length");
-                break;
-            default:
-                RELEASE_ASSERT_NOT_REACHED();
-                break;
-            }
-            
-            if (baseStructure) {
-                out.printf(", ");
-                dumpStructure(out, "struct", exec, baseStructure, ident);
-            }
+        if (stubInfo.resetByGC)
+            out.print(" (Reset By GC)");
+        
+        out.printf(" jit(");
             
-            if (prototypeStructure) {
-                out.printf(", ");
-                dumpStructure(out, "prototypeStruct", exec, baseStructure, ident);
-            }
+        Structure* baseStructure = nullptr;
+        PolymorphicAccess* stub = nullptr;
             
-            if (chain) {
-                out.printf(", ");
-                dumpChain(out, exec, chain, ident);
-            }
+        switch (stubInfo.cacheType) {
+        case CacheType::GetByIdSelf:
+            out.printf("self");
+            baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get();
+            break;
+        case CacheType::Stub:
+            out.printf("stub");
+            stub = stubInfo.u.stub;
+            break;
+        case CacheType::Unset:
+            out.printf("unset");
+            break;
+        case CacheType::ArrayLength:
+            out.printf("ArrayLength");
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
             
-            if (structureList) {
-                out.printf(", list = %p: [", structureList);
-                for (int i = 0; i < listSize; ++i) {
-                    if (i)
-                        out.printf(", ");
-                    out.printf("(");
-                    dumpStructure(out, "base", exec, structureList->list[i].base.get(), ident);
-                    if (structureList->list[i].isChain) {
-                        if (structureList->list[i].u.chain.get()) {
-                            out.printf(", ");
-                            dumpChain(out, exec, structureList->list[i].u.chain.get(), ident);
-                        }
-                    } else {
-                        if (structureList->list[i].u.proto.get()) {
-                            out.printf(", ");
-                            dumpStructure(out, "proto", exec, structureList->list[i].u.proto.get(), ident);
-                        }
-                    }
-                    out.printf(")");
-                }
-                out.printf("]");
+        if (baseStructure) {
+            out.printf(", ");
+            dumpStructure(out, "struct", baseStructure, ident);
+        }
+
+        if (stub)
+            out.print(", ", *stub);
+
+        out.printf(")");
+    }
+#else
+    UNUSED_PARAM(map);
+#endif
+}
+
+void CodeBlock::printPutByIdCacheStatus(PrintStream& out, int location, const StubInfoMap& map)
+{
+    Instruction* instruction = instructions().begin() + location;
+
+    const Identifier& ident = identifier(instruction[2].u.operand);
+    
+    UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
+
+    out.print(", ", instruction[8].u.putByIdFlags);
+    
+    if (StructureID structureID = instruction[4].u.structureID) {
+        Structure* structure = m_vm->heap.structureIDTable().get(structureID);
+        out.print(" llint(");
+        if (StructureID newStructureID = instruction[6].u.structureID) {
+            Structure* newStructure = m_vm->heap.structureIDTable().get(newStructureID);
+            dumpStructure(out, "prev", structure, ident);
+            out.print(", ");
+            dumpStructure(out, "next", newStructure, ident);
+            if (StructureChain* chain = instruction[7].u.structureChain.get()) {
+                out.print(", ");
+                dumpChain(out, chain, ident);
             }
-            out.printf(")");
+        } else
+            dumpStructure(out, "struct", structure, ident);
+        out.print(")");
+    }
+
+#if ENABLE(JIT)
+    if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
+        StructureStubInfo& stubInfo = *stubPtr;
+        if (stubInfo.resetByGC)
+            out.print(" (Reset By GC)");
+        
+        out.printf(" jit(");
+        
+        switch (stubInfo.cacheType) {
+        case CacheType::PutByIdReplace:
+            out.print("replace, ");
+            dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident);
+            break;
+        case CacheType::Stub: {
+            out.print("stub, ", *stubInfo.u.stub);
+            break;
+        }
+        case CacheType::Unset:
+            out.printf("unset");
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
         }
+        out.printf(")");
     }
 #else
     UNUSED_PARAM(map);
 #endif
 }
 
-void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling)
+void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
 {
     int dst = (++it)->u.operand;
     int func = (++it)->u.operand;
     int argCount = (++it)->u.operand;
     int registerOffset = (++it)->u.operand;
     printLocationAndOp(out, exec, location, it, op);
-    out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
+    out.print(registerName(dst), ", ", registerName(func), ", ", argCount, ", ", registerOffset);
+    out.print(" (this at ", virtualRegisterForArgument(0, -registerOffset), ")");
     if (cacheDumpMode == DumpCaches) {
-#if ENABLE(LLINT)
         LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
         if (callLinkInfo->lastSeenCallee) {
             out.printf(
@@ -468,17 +492,21 @@ void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, con
                 callLinkInfo->lastSeenCallee.get(),
                 callLinkInfo->lastSeenCallee->executable());
         }
-#endif
 #if ENABLE(JIT)
-        if (numberOfCallLinkInfos()) {
-            JSFunction* target = getCallLinkInfo(location).lastSeenCallee.get();
+        if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
+            JSFunction* target = info->lastSeenCallee();
             if (target)
                 out.printf(" jit(%p, exec %p)", target, target->executable());
         }
+        
+        if (jitType() != JITCode::FTLJIT)
+            out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
+#else
+        UNUSED_PARAM(map);
 #endif
-        out.print(" status(", CallLinkStatus::computeFor(this, location), ")");
     }
     ++it;
+    ++it;
     dumpArrayProfiling(out, it, hasPrintedProfiling);
     dumpValueProfiling(out, it, hasPrintedProfiling);
 }
@@ -493,6 +521,31 @@ void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location,
     it += 5;
 }
 
+void CodeBlock::dumpSource()
+{
+    dumpSource(WTF::dataFile());
+}
+
+void CodeBlock::dumpSource(PrintStream& out)
+{
+    ScriptExecutable* executable = ownerScriptExecutable();
+    if (executable->isFunctionExecutable()) {
+        FunctionExecutable* functionExecutable = reinterpret_cast(executable);
+        StringView source = functionExecutable->source().provider()->getRange(
+            functionExecutable->parametersStartOffset(),
+            functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
+        
+        out.print("function ", inferredName(), source);
+        return;
+    }
+    out.print(executable->source().view());
+}
+
+void CodeBlock::dumpBytecode()
+{
+    dumpBytecode(WTF::dataFile());
+}
+
 void CodeBlock::dumpBytecode(PrintStream& out)
 {
     // We only use the ExecState* for things that don't actually lead to JS execution,
@@ -509,34 +562,19 @@ void CodeBlock::dumpBytecode(PrintStream& out)
         ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
         static_cast(instructions().size()),
         static_cast(instructions().size() * sizeof(Instruction)),
-        m_numParameters, m_numCalleeRegisters, m_numVars);
-    if (symbolTable() && symbolTable()->captureCount()) {
-        out.printf(
-            "; %d captured var(s) (from r%d to r%d, inclusive)",
-            symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() + 1);
-    }
-    if (usesArguments()) {
-        out.printf(
-            "; uses arguments, in r%d, r%d",
-            argumentsRegister().offset(),
-            unmodifiedArgumentsRegister(argumentsRegister()).offset());
-    }
-    if (needsFullScopeChain() && codeType() == FunctionCode)
-        out.printf("; activation in r%d", activationRegister().offset());
+        m_numParameters, m_numCalleeLocals, m_numVars);
+    out.print("; scope at ", scopeRegister());
     out.printf("\n");
     
     StubInfoMap stubInfos;
-#if ENABLE(JIT)
-    {
-        ConcurrentJITLocker locker(m_lock);
-        getStubInfoMap(locker, stubInfos);
-    }
-#endif
+    CallLinkInfoMap callLinkInfos;
+    getStubInfoMap(stubInfos);
+    getCallLinkInfoMap(callLinkInfos);
     
     const Instruction* begin = instructions().begin();
     const Instruction* end = instructions().end();
     for (const Instruction* it = begin; it != end; ++it)
-        dumpBytecode(out, exec, begin, it, stubInfos);
+        dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
     
     if (numberOfIdentifiers()) {
         out.printf("\nIdentifiers:\n");
@@ -551,7 +589,19 @@ void CodeBlock::dumpBytecode(PrintStream& out)
         out.printf("\nConstants:\n");
         size_t i = 0;
         do {
-            out.printf("   k%u = %s\n", static_cast(i), toCString(m_constantRegisters[i].get()).data());
+            const char* sourceCodeRepresentationDescription = nullptr;
+            switch (m_constantsSourceCodeRepresentation[i]) {
+            case SourceCodeRepresentation::Double:
+                sourceCodeRepresentationDescription = ": in source as double";
+                break;
+            case SourceCodeRepresentation::Integer:
+                sourceCodeRepresentationDescription = ": in source as integer";
+                break;
+            case SourceCodeRepresentation::Other:
+                sourceCodeRepresentationDescription = "";
+                break;
+            }
+            out.printf("   k%u = %s%s\n", static_cast(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
             ++i;
         } while (i < m_constantRegisters.size());
     }
@@ -565,14 +615,7 @@ void CodeBlock::dumpBytecode(PrintStream& out)
         } while (i < count);
     }
 
-    if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
-        out.printf("\nException Handlers:\n");
-        unsigned i = 0;
-        do {
-            out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
-            ++i;
-        } while (i < m_rareData->m_exceptionHandlers.size());
-    }
+    dumpExceptionHandlers(out);
     
     if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
         out.printf("Switch Jump Tables:\n");
@@ -598,7 +641,7 @@ void CodeBlock::dumpBytecode(PrintStream& out)
             out.printf("  %1d = {\n", i);
             StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
             for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
-                out.printf("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset);
+                out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
             out.printf("      }\n");
             ++i;
         } while (i < m_rareData->m_stringSwitchJumpTables.size());
@@ -607,6 +650,20 @@ void CodeBlock::dumpBytecode(PrintStream& out)
     out.printf("\n");
 }
 
+void CodeBlock::dumpExceptionHandlers(PrintStream& out)
+{
+    if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
+        out.printf("\nException Handlers:\n");
+        unsigned i = 0;
+        do {
+            HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+            out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n",
+                i + 1, handler.start, handler.end, handler.target, handler.typeName());
+            ++i;
+        } while (i < m_rareData->m_exceptionHandlers.size());
+    }
+}
+
 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
 {
     if (hasPrintedProfiling) {
@@ -620,7 +677,7 @@ void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
 
 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
 {
-    ConcurrentJITLocker locker(m_lock);
+    ConcurrentJSLocker locker(m_lock);
     
     ++it;
     CString description = it->u.profile->briefDescription(locker);
@@ -632,7 +689,7 @@ void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, boo
 
 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
 {
-    ConcurrentJITLocker locker(m_lock);
+    ConcurrentJSLocker locker(m_lock);
     
     ++it;
     if (!it->u.arrayProfile)
@@ -653,52 +710,113 @@ void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCase
     out.print(name, profile->m_counter);
 }
 
-void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it, const StubInfoMap& map)
+void CodeBlock::dumpArithProfile(PrintStream& out, ArithProfile* profile, bool& hasPrintedProfiling)
+{
+    if (!profile)
+        return;
+    
+    beginDumpProfiling(out, hasPrintedProfiling);
+    out.print("results: ", *profile);
+}
+
+void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
+{
+    out.printf("[%4d] %-17s ", location, op);
+}
+
+void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
+{
+    printLocationAndOp(out, exec, location, it, op);
+    out.printf("%s", registerName(operand).data());
+}
+
+void CodeBlock::dumpBytecode(
+    PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
+    const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
 {
     int location = it - begin;
     bool hasPrintedProfiling = false;
-    switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
+    OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
+    switch (opcode) {
         case op_enter: {
             printLocationAndOp(out, exec, location, it, "enter");
             break;
         }
-        case op_touch_entry: {
-            printLocationAndOp(out, exec, location, it, "touch_entry");
+        case op_get_scope: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
+            break;
+        }
+        case op_create_direct_arguments: {
+            int r0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "create_direct_arguments");
+            out.printf("%s", registerName(r0).data());
             break;
         }
-        case op_create_activation: {
+        case op_create_scoped_arguments: {
             int r0 = (++it)->u.operand;
-            printLocationOpAndRegisterOperand(out, exec, location, it, "create_activation", r0);
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
+            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
             break;
         }
-        case op_create_arguments: {
+        case op_create_cloned_arguments: {
             int r0 = (++it)->u.operand;
-            printLocationOpAndRegisterOperand(out, exec, location, it, "create_arguments", r0);
+            printLocationAndOp(out, exec, location, it, "create_cloned_arguments");
+            out.printf("%s", registerName(r0).data());
             break;
         }
-        case op_init_lazy_reg: {
+        case op_argument_count: {
             int r0 = (++it)->u.operand;
-            printLocationOpAndRegisterOperand(out, exec, location, it, "init_lazy_reg", r0);
+            printLocationOpAndRegisterOperand(out, exec, location, it, "argument_count", r0);
             break;
         }
-        case op_get_callee: {
+        case op_get_argument: {
             int r0 = (++it)->u.operand;
-            printLocationOpAndRegisterOperand(out, exec, location, it, "get_callee", r0);
-            ++it;
+            int index = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "argument", r0);
+            out.printf(", %d", index);
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_create_rest: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            unsigned argumentOffset = (++it)->u.unsignedValue;
+            printLocationAndOp(out, exec, location, it, "create_rest");
+            out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data());
+            out.printf("ArgumentsOffset: %u", argumentOffset);
+            break;
+        }
+        case op_get_rest_length: {
+            int r0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "get_rest_length");
+            out.printf("%s, ", registerName(r0).data());
+            unsigned argumentOffset = (++it)->u.unsignedValue;
+            out.printf("ArgumentsOffset: %u", argumentOffset);
             break;
         }
         case op_create_this: {
             int r0 = (++it)->u.operand;
             int r1 = (++it)->u.operand;
             unsigned inferredInlineCapacity = (++it)->u.operand;
+            unsigned cachedFunction = (++it)->u.operand;
             printLocationAndOp(out, exec, location, it, "create_this");
-            out.printf("%s, %s, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity);
+            out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
             break;
         }
         case op_to_this: {
             int r0 = (++it)->u.operand;
             printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
-            ++it; // Skip value profile.
+            Structure* structure = (++it)->u.structure.get();
+            if (structure)
+                out.print(", cache(struct = ", RawPointer(structure), ")");
+            out.print(", ", (++it)->u.toThisStatus);
+            break;
+        }
+        case op_check_tdz: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
             break;
         }
         case op_new_object: {
@@ -718,6 +836,30 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             ++it; // Skip array allocation profile.
             break;
         }
+        case op_new_array_with_spread: {
+            int dst = (++it)->u.operand;
+            int argv = (++it)->u.operand;
+            int argc = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_array_with_spread");
+            out.printf("%s, %s, %d, ", registerName(dst).data(), registerName(argv).data(), argc);
+            unsigned bitVectorIndex = (++it)->u.unsignedValue;
+            const BitVector& bitVector = m_unlinkedCode->bitVector(bitVectorIndex);
+            out.print("BitVector:", bitVectorIndex, ":");
+            for (unsigned i = 0; i < static_cast(argc); i++) {
+                if (bitVector.get(i))
+                    out.print("1");
+                else
+                    out.print("0");
+            }
+            break;
+        }
+        case op_spread: {
+            int dst = (++it)->u.operand;
+            int arg = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "spread");
+            out.printf("%s, %s", registerName(dst).data(), registerName(arg).data());
+            break;
+        }
         case op_new_array_with_size: {
             int dst = (++it)->u.operand;
             int length = (++it)->u.operand;
@@ -753,12 +895,20 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
             break;
         }
-        case op_captured_mov: {
+        case op_profile_type: {
             int r0 = (++it)->u.operand;
-            int r1 = (++it)->u.operand;
-            printLocationAndOp(out, exec, location, it, "captured_mov");
-            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
             ++it;
+            ++it;
+            ++it;
+            ++it;
+            printLocationAndOp(out, exec, location, it, "op_profile_type");
+            out.printf("%s", registerName(r0).data());
+            break;
+        }
+        case op_profile_control_flow: {
+            BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
+            printLocationAndOp(out, exec, location, it, "profile_control_flow");
+            out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
             break;
         }
         case op_not: {
@@ -817,10 +967,16 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
         }
         case op_to_number: {
             printUnaryOp(out, exec, location, it, "to_number");
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_to_string: {
+            printUnaryOp(out, exec, location, it, "to_string");
             break;
         }
         case op_negate: {
             printUnaryOp(out, exec, location, it, "negate");
+            ++it; // op_negate has an extra operand for the ArithProfile.
             break;
         }
         case op_add: {
@@ -842,6 +998,10 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             printBinaryOp(out, exec, location, it, "mod");
             break;
         }
+        case op_pow: {
+            printBinaryOp(out, exec, location, it, "pow");
+            break;
+        }
         case op_sub: {
             printBinaryOp(out, exec, location, it, "sub");
             ++it;
@@ -874,13 +1034,12 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             ++it;
             break;
         }
-        case op_check_has_instance: {
+        case op_overrides_has_instance: {
             int r0 = (++it)->u.operand;
             int r1 = (++it)->u.operand;
             int r2 = (++it)->u.operand;
-            int offset = (++it)->u.operand;
-            printLocationAndOp(out, exec, location, it, "check_has_instance");
-            out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
+            printLocationAndOp(out, exec, location, it, "overrides_has_instance");
+            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
             break;
         }
         case op_instanceof: {
@@ -891,6 +1050,15 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
             break;
         }
+        case op_instanceof_custom: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int r3 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "instanceof_custom");
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+            break;
+        }
         case op_unsigned: {
             printUnaryOp(out, exec, location, it, "unsigned");
             break;
@@ -899,6 +1067,10 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             printUnaryOp(out, exec, location, it, "typeof");
             break;
         }
+        case op_is_empty: {
+            printUnaryOp(out, exec, location, it, "is_empty");
+            break;
+        }
         case op_is_undefined: {
             printUnaryOp(out, exec, location, it, "is_undefined");
             break;
@@ -911,106 +1083,155 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             printUnaryOp(out, exec, location, it, "is_number");
             break;
         }
-        case op_is_string: {
-            printUnaryOp(out, exec, location, it, "is_string");
+        case op_is_cell_with_type: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int type = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "is_cell_with_type");
+            out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), type);
             break;
         }
         case op_is_object: {
             printUnaryOp(out, exec, location, it, "is_object");
             break;
         }
+        case op_is_object_or_null: {
+            printUnaryOp(out, exec, location, it, "is_object_or_null");
+            break;
+        }
         case op_is_function: {
             printUnaryOp(out, exec, location, it, "is_function");
             break;
         }
         case op_in: {
             printBinaryOp(out, exec, location, it, "in");
+            dumpArrayProfiling(out, it, hasPrintedProfiling);
             break;
         }
-        case op_init_global_const_nop: {
-            printLocationAndOp(out, exec, location, it, "init_global_const_nop");
-            it++;
-            it++;
-            it++;
-            it++;
-            break;
-        }
-        case op_init_global_const: {
-            WriteBarrier* registerPointer = (++it)->u.registerPointer;
+        case op_try_get_by_id: {
             int r0 = (++it)->u.operand;
-            printLocationAndOp(out, exec, location, it, "init_global_const");
-            out.printf("g%d(%p), %s", m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(r0).data());
-            it++;
-            it++;
+            int r1 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "try_get_by_id");
+            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
+            dumpValueProfiling(out, it, hasPrintedProfiling);
             break;
         }
         case op_get_by_id:
-        case op_get_by_id_out_of_line:
-        case op_get_by_id_self:
-        case op_get_by_id_proto:
-        case op_get_by_id_chain:
-        case op_get_by_id_getter_self:
-        case op_get_by_id_getter_proto:
-        case op_get_by_id_getter_chain:
-        case op_get_by_id_custom_self:
-        case op_get_by_id_custom_proto:
-        case op_get_by_id_custom_chain:
-        case op_get_by_id_generic:
-        case op_get_array_length:
-        case op_get_string_length: {
+        case op_get_by_id_proto_load:
+        case op_get_by_id_unset:
+        case op_get_array_length: {
             printGetByIdOp(out, exec, location, it);
-            printGetByIdCacheStatus(out, exec, location, map);
+            printGetByIdCacheStatus(out, exec, location, stubInfos);
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_get_by_id_with_this: {
+            printLocationAndOp(out, exec, location, it, "get_by_id_with_this");
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), idName(id0, identifier(id0)).data());
             dumpValueProfiling(out, it, hasPrintedProfiling);
             break;
         }
-        case op_get_arguments_length: {
-            printUnaryOp(out, exec, location, it, "get_arguments_length");
-            it++;
+        case op_get_by_val_with_this: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int r3 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "get_by_val_with_this");
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+            dumpValueProfiling(out, it, hasPrintedProfiling);
             break;
         }
         case op_put_by_id: {
             printPutByIdOp(out, exec, location, it, "put_by_id");
+            printPutByIdCacheStatus(out, location, stubInfos);
             break;
         }
-        case op_put_by_id_out_of_line: {
-            printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
+        case op_put_by_id_with_this: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_by_id_with_this");
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), registerName(r2).data());
             break;
         }
-        case op_put_by_id_replace: {
-            printPutByIdOp(out, exec, location, it, "put_by_id_replace");
+        case op_put_by_val_with_this: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int r3 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_by_val_with_this");
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
             break;
         }
-        case op_put_by_id_transition: {
-            printPutByIdOp(out, exec, location, it, "put_by_id_transition");
+        case op_put_getter_by_id: {
+            int r0 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            int n0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_getter_by_id");
+            out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
             break;
         }
-        case op_put_by_id_transition_direct: {
-            printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
+        case op_put_setter_by_id: {
+            int r0 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            int n0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_setter_by_id");
+            out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
             break;
         }
-        case op_put_by_id_transition_direct_out_of_line: {
-            printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
+        case op_put_getter_setter_by_id: {
+            int r0 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            int n0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_getter_setter_by_id");
+            out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data());
             break;
         }
-        case op_put_by_id_transition_normal: {
-            printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
+        case op_put_getter_by_val: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int n0 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_getter_by_val");
+            out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
             break;
         }
-        case op_put_by_id_transition_normal_out_of_line: {
-            printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
+        case op_put_setter_by_val: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int n0 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_setter_by_val");
+            out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
             break;
         }
-        case op_put_by_id_generic: {
-            printPutByIdOp(out, exec, location, it, "put_by_id_generic");
+        case op_define_data_property: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int r3 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "define_data_property");
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
             break;
         }
-        case op_put_getter_setter: {
+        case op_define_accessor_property: {
             int r0 = (++it)->u.operand;
-            int id0 = (++it)->u.operand;
             int r1 = (++it)->u.operand;
             int r2 = (++it)->u.operand;
-            printLocationAndOp(out, exec, location, it, "put_getter_setter");
-            out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
+            int r3 = (++it)->u.operand;
+            int r4 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "define_accessor_property");
+            out.printf("%s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data());
             break;
         }
         case op_del_by_id: {
@@ -1031,27 +1252,6 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             dumpValueProfiling(out, it, hasPrintedProfiling);
             break;
         }
-        case op_get_argument_by_val: {
-            int r0 = (++it)->u.operand;
-            int r1 = (++it)->u.operand;
-            int r2 = (++it)->u.operand;
-            printLocationAndOp(out, exec, location, it, "get_argument_by_val");
-            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
-            ++it;
-            dumpValueProfiling(out, it, hasPrintedProfiling);
-            break;
-        }
-        case op_get_by_pname: {
-            int r0 = (++it)->u.operand;
-            int r1 = (++it)->u.operand;
-            int r2 = (++it)->u.operand;
-            int r3 = (++it)->u.operand;
-            int r4 = (++it)->u.operand;
-            int r5 = (++it)->u.operand;
-            printLocationAndOp(out, exec, location, it, "get_by_pname");
-            out.printf("%s, %s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data(), registerName(r5).data());
-            break;
-        }
         case op_put_by_val: {
             int r0 = (++it)->u.operand;
             int r1 = (++it)->u.operand;
@@ -1114,6 +1314,7 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             int offset = (++it)->u.operand;
             printLocationAndOp(out, exec, location, it, "jneq_ptr");
             out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
+            ++it;
             break;
         }
         case op_jless: {
@@ -1184,6 +1385,23 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             printLocationAndOp(out, exec, location, it, "loop_hint");
             break;
         }
+        case op_watchdog: {
+            printLocationAndOp(out, exec, location, it, "watchdog");
+            break;
+        }
+        case op_log_shadow_chicken_prologue: {
+            int r0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "log_shadow_chicken_prologue");
+            out.printf("%s", registerName(r0).data());
+            break;
+        }
+        case op_log_shadow_chicken_tail: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "log_shadow_chicken_tail");
+            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
+            break;
+        }
         case op_switch_imm: {
             int tableIndex = (++it)->u.operand;
             int defaultTarget = (++it)->u.operand;
@@ -1210,73 +1428,108 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
         }
         case op_new_func: {
             int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
             int f0 = (++it)->u.operand;
-            int shouldCheck = (++it)->u.operand;
             printLocationAndOp(out, exec, location, it, "new_func");
-            out.printf("%s, f%d, %s", registerName(r0).data(), f0, shouldCheck ? "" : "");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
             break;
         }
-        case op_new_captured_func: {
+        case op_new_generator_func: {
             int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
             int f0 = (++it)->u.operand;
-            printLocationAndOp(out, exec, location, it, "new_captured_func");
-            out.printf("%s, f%d", registerName(r0).data(), f0);
-            ++it;
+            printLocationAndOp(out, exec, location, it, "new_generator_func");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+            break;
+        }
+        case op_new_async_func: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int f0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_async_func");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
             break;
         }
         case op_new_func_exp: {
             int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
             int f0 = (++it)->u.operand;
             printLocationAndOp(out, exec, location, it, "new_func_exp");
-            out.printf("%s, f%d", registerName(r0).data(), f0);
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+            break;
+        }
+        case op_new_generator_func_exp: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int f0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_generator_func_exp");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+            break;
+        }
+        case op_new_async_func_exp: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int f0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_async_func_exp");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+            break;
+        }
+        case op_set_function_name: {
+            int funcReg = (++it)->u.operand;
+            int nameReg = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "set_function_name");
+            out.printf("%s, %s", registerName(funcReg).data(), registerName(nameReg).data());
             break;
         }
         case op_call: {
-            printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling);
+            printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
+            break;
+        }
+        case op_tail_call: {
+            printCallOp(out, exec, location, it, "tail_call", DumpCaches, hasPrintedProfiling, callLinkInfos);
             break;
         }
         case op_call_eval: {
-            printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling);
+            printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
             break;
         }
-        case op_call_varargs: {
+            
+        case op_construct_varargs:
+        case op_call_varargs:
+        case op_tail_call_varargs:
+        case op_tail_call_forward_arguments: {
             int result = (++it)->u.operand;
             int callee = (++it)->u.operand;
             int thisValue = (++it)->u.operand;
             int arguments = (++it)->u.operand;
             int firstFreeRegister = (++it)->u.operand;
+            int varArgOffset = (++it)->u.operand;
             ++it;
-            printLocationAndOp(out, exec, location, it, "call_varargs");
-            out.printf("%s, %s, %s, %s, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister);
+            const char* opName;
+            if (opcode == op_call_varargs)
+                opName = "call_varargs";
+            else if (opcode == op_construct_varargs)
+                opName = "construct_varargs";
+            else if (opcode == op_tail_call_varargs)
+                opName = "tail_call_varargs";
+            else if (opcode == op_tail_call_forward_arguments)
+                opName = "tail_call_forward_arguments";
+            else
+                RELEASE_ASSERT_NOT_REACHED();
+
+            printLocationAndOp(out, exec, location, it, opName);
+            out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
             dumpValueProfiling(out, it, hasPrintedProfiling);
             break;
         }
-        case op_tear_off_activation: {
-            int r0 = (++it)->u.operand;
-            printLocationOpAndRegisterOperand(out, exec, location, it, "tear_off_activation", r0);
-            break;
-        }
-        case op_tear_off_arguments: {
-            int r0 = (++it)->u.operand;
-            int r1 = (++it)->u.operand;
-            printLocationAndOp(out, exec, location, it, "tear_off_arguments");
-            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
-            break;
-        }
+
         case op_ret: {
             int r0 = (++it)->u.operand;
             printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
             break;
         }
-        case op_ret_object_or_this: {
-            int r0 = (++it)->u.operand;
-            int r1 = (++it)->u.operand;
-            printLocationAndOp(out, exec, location, it, "constructor_ret");
-            out.printf("%s %s", registerName(r0).data(), registerName(r1).data());
-            break;
-        }
         case op_construct: {
-            printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling);
+            printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
             break;
         }
         case op_strcat: {
@@ -1294,49 +1547,120 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
             break;
         }
-        case op_get_pnames: {
-            int r0 = it[1].u.operand;
-            int r1 = it[2].u.operand;
-            int r2 = it[3].u.operand;
-            int r3 = it[4].u.operand;
-            int offset = it[5].u.operand;
-            printLocationAndOp(out, exec, location, it, "get_pnames");
-            out.printf("%s, %s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), offset, location + offset);
-            it += OPCODE_LENGTH(op_get_pnames) - 1;
+        case op_get_enumerable_length: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
+            out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
+            it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
+            break;
+        }
+        case op_has_indexed_property: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            int propertyName = it[3].u.operand;
+            ArrayProfile* arrayProfile = it[4].u.arrayProfile;
+            printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
+            out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
+            it += OPCODE_LENGTH(op_has_indexed_property) - 1;
+            break;
+        }
+        case op_has_structure_property: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            int propertyName = it[3].u.operand;
+            int enumerator = it[4].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_has_structure_property");
+            out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
+            it += OPCODE_LENGTH(op_has_structure_property) - 1;
+            break;
+        }
+        case op_has_generic_property: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            int propertyName = it[3].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_has_generic_property");
+            out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
+            it += OPCODE_LENGTH(op_has_generic_property) - 1;
+            break;
+        }
+        case op_get_direct_pname: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            int propertyName = it[3].u.operand;
+            int index = it[4].u.operand;
+            int enumerator = it[5].u.operand;
+            ValueProfile* profile = it[6].u.profile;
+            printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
+            out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
+            it += OPCODE_LENGTH(op_get_direct_pname) - 1;
             break;
+
         }
-        case op_next_pname: {
-            int dest = it[1].u.operand;
+        case op_get_property_enumerator: {
+            int dst = it[1].u.operand;
             int base = it[2].u.operand;
-            int i = it[3].u.operand;
-            int size = it[4].u.operand;
-            int iter = it[5].u.operand;
-            int offset = it[6].u.operand;
-            printLocationAndOp(out, exec, location, it, "next_pname");
-            out.printf("%s, %s, %s, %s, %s, %d(->%d)", registerName(dest).data(), registerName(base).data(), registerName(i).data(), registerName(size).data(), registerName(iter).data(), offset, location + offset);
-            it += OPCODE_LENGTH(op_next_pname) - 1;
+            printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
+            out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
+            it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
+            break;
+        }
+        case op_enumerator_structure_pname: {
+            int dst = it[1].u.operand;
+            int enumerator = it[2].u.operand;
+            int index = it[3].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
+            out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
+            it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
+            break;
+        }
+        case op_enumerator_generic_pname: {
+            int dst = it[1].u.operand;
+            int enumerator = it[2].u.operand;
+            int index = it[3].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
+            out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
+            it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
+            break;
+        }
+        case op_to_index_string: {
+            int dst = it[1].u.operand;
+            int index = it[2].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_to_index_string");
+            out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
+            it += OPCODE_LENGTH(op_to_index_string) - 1;
             break;
         }
         case op_push_with_scope: {
-            int r0 = (++it)->u.operand;
-            printLocationOpAndRegisterOperand(out, exec, location, it, "push_with_scope", r0);
+            int dst = (++it)->u.operand;
+            int newScope = (++it)->u.operand;
+            int currentScope = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "push_with_scope");
+            out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data());
             break;
         }
-        case op_pop_scope: {
-            printLocationAndOp(out, exec, location, it, "pop_scope");
+        case op_get_parent_scope: {
+            int dst = (++it)->u.operand;
+            int parentScope = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "get_parent_scope");
+            out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data());
             break;
         }
-        case op_push_name_scope: {
-            int id0 = (++it)->u.operand;
-            int r1 = (++it)->u.operand;
-            unsigned attributes = (++it)->u.operand;
-            printLocationAndOp(out, exec, location, it, "push_name_scope");
-            out.printf("%s, %s, %u", idName(id0, identifier(id0)).data(), registerName(r1).data(), attributes);
+        case op_create_lexical_environment: {
+            int dst = (++it)->u.operand;
+            int scope = (++it)->u.operand;
+            int symbolTable = (++it)->u.operand;
+            int initialValue = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "create_lexical_environment");
+            out.printf("%s, %s, %s, %s", 
+                registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data());
             break;
         }
         case op_catch: {
             int r0 = (++it)->u.operand;
-            printLocationOpAndRegisterOperand(out, exec, location, it, "catch", r0);
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "catch");
+            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
             break;
         }
         case op_throw: {
@@ -1346,26 +1670,24 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
         }
         case op_throw_static_error: {
             int k0 = (++it)->u.operand;
-            int k1 = (++it)->u.operand;
+            ErrorType k1 = static_cast((++it)->u.unsignedValue);
             printLocationAndOp(out, exec, location, it, "throw_static_error");
-            out.printf("%s, %s", constantName(k0, getConstant(k0)).data(), k1 ? "true" : "false");
+            out.printf("%s, ", constantName(k0).data());
+            out.print(k1);
             break;
         }
         case op_debug: {
-            int debugHookID = (++it)->u.operand;
+            int debugHookType = (++it)->u.operand;
             int hasBreakpointFlag = (++it)->u.operand;
             printLocationAndOp(out, exec, location, it, "debug");
-            out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
-            break;
-        }
-        case op_profile_will_call: {
-            int function = (++it)->u.operand;
-            printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
+            out.printf("%s, %d", debugHookName(debugHookType), hasBreakpointFlag);
             break;
         }
-        case op_profile_did_call: {
-            int function = (++it)->u.operand;
-            printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
+        case op_assert: {
+            int condition = (++it)->u.operand;
+            int line = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "assert");
+            out.printf("%s, %d", registerName(condition).data(), line);
             break;
         }
         case op_end: {
@@ -1375,53 +1697,81 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
         }
         case op_resolve_scope: {
             int r0 = (++it)->u.operand;
+            int scope = (++it)->u.operand;
             int id0 = (++it)->u.operand;
-            int resolveModeAndType = (++it)->u.operand;
-            ++it; // depth
+            ResolveType resolveType = static_cast((++it)->u.operand);
+            int depth = (++it)->u.operand;
+            void* pointer = (++it)->u.pointer;
             printLocationAndOp(out, exec, location, it, "resolve_scope");
-            out.printf("%s, %s, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
-            ++it;
+            out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer);
             break;
         }
         case op_get_from_scope: {
             int r0 = (++it)->u.operand;
             int r1 = (++it)->u.operand;
             int id0 = (++it)->u.operand;
-            int resolveModeAndType = (++it)->u.operand;
+            GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
             ++it; // Structure
-            ++it; // Operand
-            ++it; // Skip value profile.
+            int operand = (++it)->u.operand; // Operand
             printLocationAndOp(out, exec, location, it, "get_from_scope");
-            out.printf("%s, %s, %s, %d", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
+            out.print(registerName(r0), ", ", registerName(r1));
+            if (static_cast(id0) == UINT_MAX)
+                out.print(", anonymous");
+            else
+                out.print(", ", idName(id0, identifier(id0)));
+            out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand);
+            dumpValueProfiling(out, it, hasPrintedProfiling);
             break;
         }
         case op_put_to_scope: {
             int r0 = (++it)->u.operand;
             int id0 = (++it)->u.operand;
             int r1 = (++it)->u.operand;
-            int resolveModeAndType = (++it)->u.operand;
+            GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
             ++it; // Structure
-            ++it; // Operand
+            int operand = (++it)->u.operand; // Operand
             printLocationAndOp(out, exec, location, it, "put_to_scope");
-            out.printf("%s, %s, %s, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), resolveModeAndType);
+            out.print(registerName(r0));
+            if (static_cast(id0) == UINT_MAX)
+                out.print(", anonymous");
+            else
+                out.print(", ", idName(id0, identifier(id0)));
+            out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, , ", operand);
+            break;
+        }
+        case op_get_from_arguments: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "get_from_arguments");
+            out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_put_to_arguments: {
+            int r0 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_to_arguments");
+            out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
             break;
         }
-#if ENABLE(LLINT_C_LOOP)
         default:
             RELEASE_ASSERT_NOT_REACHED();
-#endif
     }
 
     dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
-    dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
+    {
+        dumpArithProfile(out, arithProfileForBytecodeOffset(location), hasPrintedProfiling);
+    }
     
 #if ENABLE(DFG_JIT)
     Vector exitSites = exitProfile().exitSitesFor(location);
     if (!exitSites.isEmpty()) {
         out.print(" !! frequent exits: ");
         CommaPrinter comma;
-        for (unsigned i = 0; i < exitSites.size(); ++i)
-            out.print(comma, exitSites[i].kind());
+        for (auto& exitSite : exitSites)
+            out.print(comma, exitSite.kind(), " ", exitSite.jitType());
     }
 #else // ENABLE(DFG_JIT)
     UNUSED_PARAM(location);
@@ -1429,11 +1779,13 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
     out.print("\n");
 }
 
-void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
+void CodeBlock::dumpBytecode(
+    PrintStream& out, unsigned bytecodeOffset,
+    const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
 {
     ExecState* exec = m_globalObject->globalExec();
     const Instruction* it = instructions().begin() + bytecodeOffset;
-    dumpBytecode(out, exec, instructions().begin(), it);
+    dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
 }
 
 #define FOR_EACH_MEMBER_VECTOR(macro) \
@@ -1444,63 +1796,84 @@ void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
     macro(functionExpressions) \
     macro(constantRegisters)
 
-#define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
-    macro(regexps) \
-    macro(functions) \
-    macro(exceptionHandlers) \
-    macro(switchJumpTables) \
-    macro(stringSwitchJumpTables) \
-    macro(evalCodeCache) \
-    macro(expressionInfo) \
-    macro(lineInfo) \
-    macro(callReturnIndexVector)
-
 template
 static size_t sizeInBytes(const Vector& vector)
 {
     return vector.capacity() * sizeof(T);
 }
 
-CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
-    : m_globalObject(other.m_globalObject)
-    , m_heap(other.m_heap)
-    , m_numCalleeRegisters(other.m_numCalleeRegisters)
+namespace {
+
+class PutToScopeFireDetail : public FireDetail {
+public:
+    PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
+        : m_codeBlock(codeBlock)
+        , m_ident(ident)
+    {
+    }
+    
+    void dump(PrintStream& out) const override
+    {
+        out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast(m_codeBlock->ownerExecutable())), " for ", m_ident);
+    }
+    
+private:
+    CodeBlock* m_codeBlock;
+    const Identifier& m_ident;
+};
+
+} // anonymous namespace
+
+CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
+    : JSCell(*vm, structure)
+    , m_globalObject(other.m_globalObject)
+    , m_numCalleeLocals(other.m_numCalleeLocals)
     , m_numVars(other.m_numVars)
-    , m_isConstructor(other.m_isConstructor)
     , m_shouldAlwaysBeInlined(true)
+#if ENABLE(JIT)
+    , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
+#endif
+    , m_didFailJITCompilation(false)
     , m_didFailFTLCompilation(false)
-    , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
+    , m_hasBeenCompiledWithFTL(false)
+    , m_isConstructor(other.m_isConstructor)
+    , m_isStrictMode(other.m_isStrictMode)
+    , m_codeType(other.m_codeType)
+    , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get())
+    , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
+    , m_hasDebuggerStatement(false)
     , m_steppingMode(SteppingModeDisabled)
     , m_numBreakpoints(0)
-    , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
+    , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get())
     , m_vm(other.m_vm)
     , m_instructions(other.m_instructions)
     , m_thisRegister(other.m_thisRegister)
-    , m_argumentsRegister(other.m_argumentsRegister)
-    , m_activationRegister(other.m_activationRegister)
-    , m_isStrictMode(other.m_isStrictMode)
-    , m_needsActivation(other.m_needsActivation)
+    , m_scopeRegister(other.m_scopeRegister)
+    , m_hash(other.m_hash)
     , m_source(other.m_source)
     , m_sourceOffset(other.m_sourceOffset)
     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
-    , m_codeType(other.m_codeType)
     , m_constantRegisters(other.m_constantRegisters)
+    , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
     , m_functionDecls(other.m_functionDecls)
     , m_functionExprs(other.m_functionExprs)
     , m_osrExitCounter(0)
     , m_optimizationDelayCounter(0)
     , m_reoptimizationRetryCounter(0)
-    , m_hash(other.m_hash)
-#if ENABLE(JIT)
-    , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
-#endif
+    , m_creationTime(std::chrono::steady_clock::now())
 {
-    ASSERT(m_heap->isDeferred());
-    
-    if (SymbolTable* symbolTable = other.symbolTable())
-        m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
-    
+    m_visitWeaklyHasBeenCalled = false;
+
+    ASSERT(heap()->isDeferred());
+    ASSERT(m_scopeRegister.isLocal());
+
     setNumParameters(other.numParameters());
+}
+
+void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
+{
+    Base::finishCreation(vm);
+
     optimizeAfterWarmUp();
     jitAfterWarmUp();
 
@@ -1513,87 +1886,94 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
     }
     
-    m_heap->m_codeBlocks.add(this);
-    m_heap->reportExtraMemoryCost(sizeof(CodeBlock));
+    heap()->m_codeBlocks->add(this);
 }
 
-CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
-    : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
-    , m_heap(&m_globalObject->vm().heap)
-    , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
+CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
+    JSScope* scope, RefPtr&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+    : JSCell(*vm, structure)
+    , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject())
+    , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
     , m_numVars(unlinkedCodeBlock->m_numVars)
-    , m_isConstructor(unlinkedCodeBlock->isConstructor())
     , m_shouldAlwaysBeInlined(true)
+#if ENABLE(JIT)
+    , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
+#endif
+    , m_didFailJITCompilation(false)
     , m_didFailFTLCompilation(false)
-    , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
+    , m_hasBeenCompiledWithFTL(false)
+    , m_isConstructor(unlinkedCodeBlock->isConstructor())
+    , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
+    , m_codeType(unlinkedCodeBlock->codeType())
+    , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock)
+    , m_hasDebuggerStatement(false)
     , m_steppingMode(SteppingModeDisabled)
     , m_numBreakpoints(0)
-    , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
+    , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
     , m_vm(unlinkedCodeBlock->vm())
     , m_thisRegister(unlinkedCodeBlock->thisRegister())
-    , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
-    , m_activationRegister(unlinkedCodeBlock->activationRegister())
-    , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
-    , m_needsActivation(unlinkedCodeBlock->needsFullScopeChain() && unlinkedCodeBlock->codeType() == FunctionCode)
-    , m_source(sourceProvider)
+    , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
+    , m_source(WTFMove(sourceProvider))
     , m_sourceOffset(sourceOffset)
     , m_firstLineColumnOffset(firstLineColumnOffset)
-    , m_codeType(unlinkedCodeBlock->codeType())
     , m_osrExitCounter(0)
     , m_optimizationDelayCounter(0)
     , m_reoptimizationRetryCounter(0)
-#if ENABLE(JIT)
-    , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
-#endif
+    , m_creationTime(std::chrono::steady_clock::now())
 {
-    ASSERT(m_heap->isDeferred());
+    m_visitWeaklyHasBeenCalled = false;
+
+    ASSERT(heap()->isDeferred());
+    ASSERT(m_scopeRegister.isLocal());
 
-    bool didCloneSymbolTable = false;
-    
-    if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
-        if (codeType() == FunctionCode && symbolTable->captureCount()) {
-            m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable->clone(*m_vm));
-            didCloneSymbolTable = true;
-        } else
-            m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
-    }
-    
     ASSERT(m_source);
     setNumParameters(unlinkedCodeBlock->numParameters());
+}
 
-    setConstantRegisters(unlinkedCodeBlock->constantRegisters());
+void CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
+    JSScope* scope)
+{
+    Base::finishCreation(vm);
+
+    if (vm.typeProfiler() || vm.controlFlowProfiler())
+        vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
+
+    setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
     if (unlinkedCodeBlock->usesGlobalObject())
-        m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().offset()].set(*m_vm, ownerExecutable, m_globalObject.get());
-    m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
+        m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get());
+
+    for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
+        LinkTimeConstant type = static_cast(i);
+        if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
+            m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
+    }
+
+    // We already have the cloned symbol table for the module environment since we need to instantiate
+    // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
+    if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast(vm, unlinkedCodeBlock)) {
+        SymbolTable* clonedSymbolTable = jsCast(ownerExecutable)->moduleEnvironmentSymbolTable();
+        if (m_vm->typeProfiler()) {
+            ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
+            clonedSymbolTable->prepareForTypeProfiling(locker);
+        }
+        replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
+    }
+
+    bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
+    m_functionDecls = RefCountedArray>(unlinkedCodeBlock->numberOfFunctionDecls());
     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
-        unsigned lineCount = unlinkedExecutable->lineCount();
-        unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
-        bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
-        unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
-        bool endColumnIsOnStartLine = !lineCount;
-        unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
-        unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
-        unsigned sourceLength = unlinkedExecutable->sourceLength();
-        SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
-        FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
-        m_functionDecls[i].set(*m_vm, ownerExecutable, executable);
-    }
-
-    m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
+        if (shouldUpdateFunctionHasExecutedCache)
+            vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+        m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
+    }
+
+    m_functionExprs = RefCountedArray>(unlinkedCodeBlock->numberOfFunctionExprs());
     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
-        unsigned lineCount = unlinkedExecutable->lineCount();
-        unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
-        bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
-        unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
-        bool endColumnIsOnStartLine = !lineCount;
-        unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
-        unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
-        unsigned sourceLength = unlinkedExecutable->sourceLength();
-        SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
-        FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
-        m_functionExprs[i].set(*m_vm, ownerExecutable, executable);
+        if (shouldUpdateFunctionHasExecutedCache)
+            vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+        m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
     }
 
     if (unlinkedCodeBlock->hasRareData()) {
@@ -1607,15 +1987,13 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
         }
         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
             m_rareData->m_exceptionHandlers.resizeToFit(count);
-            size_t nonLocalScopeDepth = scope->depth();
             for (size_t i = 0; i < count; i++) {
-                const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i);
-                m_rareData->m_exceptionHandlers[i].start = handler.start;
-                m_rareData->m_exceptionHandlers[i].end = handler.end;
-                m_rareData->m_exceptionHandlers[i].target = handler.target;
-                m_rareData->m_exceptionHandlers[i].scopeDepth = nonLocalScopeDepth + handler.scopeDepth;
-#if ENABLE(JIT) && ENABLE(LLINT)
-                m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch)));
+                const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
+                HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+#if ENABLE(JIT)
+                handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
+#else
+                handler.initialize(unlinkedHandler);
 #endif
             }
         }
@@ -1627,7 +2005,7 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
                 for (; ptr != end; ++ptr) {
                     OffsetLocation offset;
-                    offset.branchOffset = ptr->value;
+                    offset.branchOffset = ptr->value.branchOffset;
                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
                 }
             }
@@ -1645,58 +2023,83 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
     }
 
     // Allocate metadata buffers for the bytecode
-#if ENABLE(LLINT)
     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
-        m_llintCallLinkInfos.resizeToFit(size);
-#endif
+        m_llintCallLinkInfos = RefCountedArray(size);
     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
         m_arrayProfiles.grow(size);
     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
-        m_arrayAllocationProfiles.resizeToFit(size);
+        m_arrayAllocationProfiles = RefCountedArray(size);
     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
-        m_valueProfiles.resizeToFit(size);
+        m_valueProfiles = RefCountedArray(size);
     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
-        m_objectAllocationProfiles.resizeToFit(size);
+        m_objectAllocationProfiles = RefCountedArray(size);
+
+#if ENABLE(JIT)
+    setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
+#endif
 
     // Copy and translate the UnlinkedInstructions
     unsigned instructionCount = unlinkedCodeBlock->instructions().count();
     UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
 
-    Vector instructions(instructionCount);
+    // Bookkeep the strongly referenced module environments.
+    HashSet stronglyReferencedModuleEnvironments;
+
+    RefCountedArray instructions(instructionCount);
+
+    unsigned valueProfileCount = 0;
+    auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
+        unsigned valueProfileIndex = valueProfileCount++;
+        ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
+        ASSERT(profile->m_bytecodeOffset == -1);
+        profile->m_bytecodeOffset = bytecodeOffset;
+        instructions[bytecodeOffset + opLength - 1] = profile;
+    };
+
     for (unsigned i = 0; !instructionReader.atEnd(); ) {
         const UnlinkedInstruction* pc = instructionReader.next();
 
         unsigned opLength = opcodeLength(pc[0].u.opcode);
 
-        instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
+        instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode);
         for (size_t j = 1; j < opLength; ++j) {
             if (sizeof(int32_t) != sizeof(intptr_t))
                 instructions[i + j].u.pointer = 0;
             instructions[i + j].u.operand = pc[j].u.operand;
         }
         switch (pc[0].u.opcode) {
+        case op_has_indexed_property: {
+            int arrayProfileIndex = pc[opLength - 1].u.operand;
+            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+
+            instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+            break;
+        }
         case op_call_varargs:
-        case op_get_by_val:
-        case op_get_argument_by_val: {
+        case op_tail_call_varargs:
+        case op_tail_call_forward_arguments:
+        case op_construct_varargs:
+        case op_get_by_val: {
             int arrayProfileIndex = pc[opLength - 2].u.operand;
             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
 
             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
             FALLTHROUGH;
         }
-        case op_get_by_id: {
-            ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
-            ASSERT(profile->m_bytecodeOffset == -1);
-            profile->m_bytecodeOffset = i;
-            instructions[i + opLength - 1] = profile;
-            break;
-        }
-        case op_put_by_val: {
-            int arrayProfileIndex = pc[opLength - 1].u.operand;
-            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
-            instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+        case op_get_direct_pname:
+        case op_get_by_id:
+        case op_get_by_id_with_this:
+        case op_try_get_by_id:
+        case op_get_by_val_with_this:
+        case op_get_from_arguments:
+        case op_to_number:
+        case op_get_argument: {
+            linkValueProfile(i, opLength);
             break;
         }
+
+        case op_in:
+        case op_put_by_val:
         case op_put_by_val_direct: {
             int arrayProfileIndex = pc[opLength - 1].u.operand;
             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
@@ -1717,125 +2120,216 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
             int inferredInlineCapacity = pc[opLength - 2].u.operand;
 
             instructions[i + opLength - 1] = objectAllocationProfile;
-            objectAllocationProfile->initialize(*vm(),
-                m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
+            objectAllocationProfile->initialize(vm,
+                m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
             break;
         }
 
         case op_call:
+        case op_tail_call:
         case op_call_eval: {
-            ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
-            ASSERT(profile->m_bytecodeOffset == -1);
-            profile->m_bytecodeOffset = i;
-            instructions[i + opLength - 1] = profile;
+            linkValueProfile(i, opLength);
             int arrayProfileIndex = pc[opLength - 2].u.operand;
             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
-#if ENABLE(LLINT)
             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
-#endif
             break;
         }
         case op_construct: {
-#if ENABLE(LLINT)
             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
-#endif
-            ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
-            ASSERT(profile->m_bytecodeOffset == -1);
-            profile->m_bytecodeOffset = i;
-            instructions[i + opLength - 1] = profile;
-            break;
-        }
-        case op_get_by_id_out_of_line:
-        case op_get_by_id_self:
-        case op_get_by_id_proto:
-        case op_get_by_id_chain:
-        case op_get_by_id_getter_self:
-        case op_get_by_id_getter_proto:
-        case op_get_by_id_getter_chain:
-        case op_get_by_id_custom_self:
-        case op_get_by_id_custom_proto:
-        case op_get_by_id_custom_chain:
-        case op_get_by_id_generic:
-        case op_get_array_length:
-        case op_get_string_length:
-            CRASH();
-
-        case op_init_global_const_nop: {
-            ASSERT(codeType() == GlobalCode);
-            Identifier ident = identifier(pc[4].u.operand);
-            SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
-            if (entry.isNull())
-                break;
-
-            instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
-            instructions[i + 1] = &m_globalObject->registerAt(entry.getIndex());
+            linkValueProfile(i, opLength);
             break;
         }
+        case op_get_array_length:
+            CRASH();
 
         case op_resolve_scope: {
-            const Identifier& ident = identifier(pc[2].u.operand);
-            ResolveType type = static_cast(pc[3].u.operand);
-
-            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, type);
-            instructions[i + 3].u.operand = op.type;
-            instructions[i + 4].u.operand = op.depth;
-            if (op.activation)
-                instructions[i + 5].u.activation.set(*vm(), ownerExecutable, op.activation);
+            const Identifier& ident = identifier(pc[3].u.operand);
+            ResolveType type = static_cast(pc[4].u.operand);
+            RELEASE_ASSERT(type != LocalClosureVar);
+            int localScopeDepth = pc[5].u.operand;
+
+            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
+            instructions[i + 4].u.operand = op.type;
+            instructions[i + 5].u.operand = op.depth;
+            if (op.lexicalEnvironment) {
+                if (op.type == ModuleVar) {
+                    // Keep the linked module environment strongly referenced.
+                    if (stronglyReferencedModuleEnvironments.add(jsCast(op.lexicalEnvironment)).isNewEntry)
+                        addConstant(op.lexicalEnvironment);
+                    instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
+                } else
+                    instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
+            } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
+                instructions[i + 6].u.jsCell.set(vm, this, constantScope);
+            else
+                instructions[i + 6].u.pointer = nullptr;
             break;
         }
 
         case op_get_from_scope: {
-            ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
-            ASSERT(profile->m_bytecodeOffset == -1);
-            profile->m_bytecodeOffset = i;
-            instructions[i + opLength - 1] = profile;
+            linkValueProfile(i, opLength);
+
+            // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
+
+            int localScopeDepth = pc[5].u.operand;
+            instructions[i + 5].u.pointer = nullptr;
+
+            GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+            ASSERT(!isInitialization(getPutInfo.initializationMode()));
+            if (getPutInfo.resolveType() == LocalClosureVar) {
+                instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+                break;
+            }
 
-            // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
             const Identifier& ident = identifier(pc[3].u.operand);
-            ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
-            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, modeAndType.type());
+            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
 
-            instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
-            if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
+            instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+            if (op.type == ModuleVar)
+                instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+            if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
             else if (op.structure)
-                instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
+                instructions[i + 5].u.structure.set(vm, this, op.structure);
             instructions[i + 6].u.pointer = reinterpret_cast(op.operand);
             break;
         }
 
         case op_put_to_scope: {
-            // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
+            // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
+            GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+            if (getPutInfo.resolveType() == LocalClosureVar) {
+                // Only do watching if the property we're putting to is not anonymous.
+                if (static_cast(pc[2].u.operand) != UINT_MAX) {
+                    int symbolTableIndex = pc[5].u.operand;
+                    SymbolTable* symbolTable = jsCast(getConstant(symbolTableIndex));
+                    const Identifier& ident = identifier(pc[2].u.operand);
+                    ConcurrentJSLocker locker(symbolTable->m_lock);
+                    auto iter = symbolTable->find(locker, ident.impl());
+                    ASSERT(iter != symbolTable->end(locker));
+                    iter->value.prepareToWatch();
+                    instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
+                } else
+                    instructions[i + 5].u.watchpointSet = nullptr;
+                break;
+            }
+
             const Identifier& ident = identifier(pc[2].u.operand);
-            ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
-            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Put, modeAndType.type());
+            int localScopeDepth = pc[5].u.operand;
+            instructions[i + 5].u.pointer = nullptr;
+            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
 
-            instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
-            if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
+            instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+            if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
                 if (op.watchpointSet)
-                    op.watchpointSet->invalidate();
+                    op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
             } else if (op.structure)
-                instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
+                instructions[i + 5].u.structure.set(vm, this, op.structure);
             instructions[i + 6].u.pointer = reinterpret_cast(op.operand);
+
             break;
         }
-            
-        case op_captured_mov:
-        case op_new_captured_func: {
-            if (pc[3].u.index == UINT_MAX) {
-                instructions[i + 3].u.watchpointSet = 0;
+
+        case op_profile_type: {
+            RELEASE_ASSERT(vm.typeProfiler());
+            // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
+            size_t instructionOffset = i + opLength - 1;
+            unsigned divotStart, divotEnd;
+            GlobalVariableID globalVariableID = 0;
+            RefPtr globalTypeSet;
+            bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
+            VirtualRegister profileRegister(pc[1].u.operand);
+            ProfileTypeBytecodeFlag flag = static_cast(pc[3].u.operand);
+            SymbolTable* symbolTable = nullptr;
+
+            switch (flag) {
+            case ProfileTypeBytecodeClosureVar: {
+                const Identifier& ident = identifier(pc[4].u.operand);
+                int localScopeDepth = pc[2].u.operand;
+                ResolveType type = static_cast(pc[5].u.operand);
+                // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
+                // we're abstractly "read"ing from a JSScope.
+                ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
+
+                if (op.type == ClosureVar || op.type == ModuleVar)
+                    symbolTable = op.lexicalEnvironment->symbolTable();
+                else if (op.type == GlobalVar)
+                    symbolTable = m_globalObject.get()->symbolTable();
+
+                UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
+                if (symbolTable) {
+                    ConcurrentJSLocker locker(symbolTable->m_lock);
+                    // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+                    symbolTable->prepareForTypeProfiling(locker);
+                    globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
+                    globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
+                } else
+                    globalVariableID = TypeProfilerNoGlobalIDExists;
+
+                break;
+            }
+            case ProfileTypeBytecodeLocallyResolved: {
+                int symbolTableIndex = pc[2].u.operand;
+                SymbolTable* symbolTable = jsCast(getConstant(symbolTableIndex));
+                const Identifier& ident = identifier(pc[4].u.operand);
+                ConcurrentJSLocker locker(symbolTable->m_lock);
+                // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+                globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
+                globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
+
+                break;
+            }
+            case ProfileTypeBytecodeDoesNotHaveGlobalID: 
+            case ProfileTypeBytecodeFunctionArgument: {
+                globalVariableID = TypeProfilerNoGlobalIDExists;
+                break;
+            }
+            case ProfileTypeBytecodeFunctionReturnStatement: {
+                RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
+                globalTypeSet = jsCast(ownerExecutable)->returnStatementTypeSet();
+                globalVariableID = TypeProfilerReturnStatement;
+                if (!shouldAnalyze) {
+                    // Because a return statement can be added implicitly to return undefined at the end of a function,
+                    // and these nodes don't emit expression ranges because they aren't in the actual source text of
+                    // the user's program, give the type profiler some range to identify these return statements.
+                    // Currently, the text offset that is used as identification is "f" in the function keyword
+                    // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
+                    divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
+                    shouldAnalyze = true;
+                }
                 break;
             }
-            StringImpl* uid = identifier(pc[3].u.index).impl();
-            RELEASE_ASSERT(didCloneSymbolTable);
-            ConcurrentJITLocker locker(m_symbolTable->m_lock);
-            SymbolTable::Map::iterator iter = m_symbolTable->find(locker, uid);
-            ASSERT(iter != m_symbolTable->end(locker));
-            iter->value.prepareToWatch();
-            instructions[i + 3].u.watchpointSet = iter->value.watchpointSet();
+            }
+
+            std::pair locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
+                ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
+            TypeLocation* location = locationPair.first;
+            bool isNewLocation = locationPair.second;
+
+            if (flag == ProfileTypeBytecodeFunctionReturnStatement)
+                location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
+
+            if (shouldAnalyze && isNewLocation)
+                vm.typeProfiler()->insertNewLocation(location);
+
+            instructions[i + 2].u.location = location;
+            break;
+        }
+
+        case op_debug: {
+            if (pc[1].u.index == DidReachBreakpoint)
+                m_hasDebuggerStatement = true;
+            break;
+        }
+
+        case op_create_rest: {
+            int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
+            ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
+            // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
+            m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
             break;
         }
 
@@ -1844,7 +2338,11 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
         }
         i += opLength;
     }
-    m_instructions = WTF::RefCountedArray(instructions);
+
+    if (vm.controlFlowProfiler())
+        insertBasicBlockBoundariesForControlFlowProfiler(instructions);
+
+    m_instructions = WTFMove(instructions);
 
     // Set optimization thresholds only after m_instructions is initialized, since these
     // rely on the instruction count (and are in theory permitted to also inspect the
@@ -1854,71 +2352,89 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
 
     // If the concurrent thread will want the code block's hash, then compute it here
     // synchronously.
-    if (Options::showDisassembly()
-        || Options::showDFGDisassembly()
-        || Options::dumpBytecodeAtDFGTime()
-        || Options::dumpGraphAtEachPhase()
-        || Options::verboseCompilation()
-        || Options::logCompilationChanges()
-        || Options::validateGraph()
-        || Options::validateGraphAtEachPhase()
-        || Options::verboseOSR()
-        || Options::verboseCompilationQueue()
-        || Options::reportCompileTimes()
-        || Options::verboseCFA())
+    if (Options::alwaysComputeHash())
         hash();
 
     if (Options::dumpGeneratedBytecodes())
         dumpBytecode();
-
-    m_heap->m_codeBlocks.add(this);
-    m_heap->reportExtraMemoryCost(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
+    
+    heap()->m_codeBlocks->add(this);
+    heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
 }
 
 CodeBlock::~CodeBlock()
 {
     if (m_vm->m_perBytecodeProfiler)
         m_vm->m_perBytecodeProfiler->notifyDestruction(this);
-    
+
+    if (unlinkedCodeBlock()->didOptimize() == MixedTriState)
+        unlinkedCodeBlock()->setDidOptimize(FalseTriState);
+
 #if ENABLE(VERBOSE_VALUE_PROFILE)
     dumpValueProfiles();
 #endif
 
-#if ENABLE(LLINT)    
-    while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
-        m_incomingLLIntCalls.begin()->remove();
-#endif // ENABLE(LLINT)
-#if ENABLE(JIT)
     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
     // Consider that two CodeBlocks become unreachable at the same time. There
     // is no guarantee about the order in which the CodeBlocks are destroyed.
     // So, if we don't remove incoming calls, and get destroyed before the
     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
     // destructor will try to remove nodes from our (no longer valid) linked list.
-    while (m_incomingCalls.begin() != m_incomingCalls.end())
-        m_incomingCalls.begin()->remove();
+    unlinkIncomingCalls();
     
     // Note that our outgoing calls will be removed from other CodeBlocks'
     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
     // destructors.
 
-    for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
-        (*iter)->deref();
+#if ENABLE(JIT)
+    for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+        StructureStubInfo* stub = *iter;
+        stub->aboutToDie();
+        stub->deref();
+    }
 #endif // ENABLE(JIT)
 }
 
-void CodeBlock::setNumParameters(int newValue)
+void CodeBlock::setConstantRegisters(const Vector>& constants, const Vector& constantsSourceCodeRepresentation)
 {
-    m_numParameters = newValue;
+    ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
+    size_t count = constants.size();
+    m_constantRegisters.resizeToFit(count);
+    bool hasTypeProfiler = !!m_vm->typeProfiler();
+    for (size_t i = 0; i < count; i++) {
+        JSValue constant = constants[i].get();
+
+        if (!constant.isEmpty()) {
+            if (SymbolTable* symbolTable = jsDynamicCast(*vm(), constant)) {
+                if (hasTypeProfiler) {
+                    ConcurrentJSLocker locker(symbolTable->m_lock);
+                    symbolTable->prepareForTypeProfiling(locker);
+                }
 
-    m_argumentValueProfiles.resizeToFit(newValue);
+                SymbolTable* clone = symbolTable->cloneScopePart(*m_vm);
+                if (wasCompiledWithDebuggingOpcodes())
+                    clone->setRareDataCodeBlock(this);
+
+                constant = clone;
+            }
+        }
+
+        m_constantRegisters[i].set(*m_vm, this, constant);
+    }
+
+    m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
 }
 
-void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
+void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
 {
-    EvalCacheMap::iterator end = m_cacheMap.end();
-    for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
-        visitor.append(&ptr->value);
+    m_alternative.set(vm, this, alternative);
+}
+
+void CodeBlock::setNumParameters(int newValue)
+{
+    m_numParameters = newValue;
+
+    m_argumentValueProfiles = RefCountedArray(newValue);
 }
 
 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
@@ -1927,77 +2443,46 @@ CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
     if (jitType() != JITCode::DFGJIT)
         return 0;
     DFG::JITCode* jitCode = m_jitCode->dfg();
-    return jitCode->osrEntryBlock.get();
+    return jitCode->osrEntryBlock();
 #else // ENABLE(FTL_JIT)
     return 0;
 #endif // ENABLE(FTL_JIT)
 }
 
-void CodeBlock::visitAggregate(SlotVisitor& visitor)
-{
-#if ENABLE(PARALLEL_GC)
-    // I may be asked to scan myself more than once, and it may even happen concurrently.
-    // To this end, use a CAS loop to check if I've been called already. Only one thread
-    // may proceed past this point - whichever one wins the CAS race.
-    unsigned oldValue;
-    do {
-        oldValue = m_visitAggregateHasBeenCalled;
-        if (oldValue) {
-            // Looks like someone else won! Return immediately to ensure that we don't
-            // trace the same CodeBlock concurrently. Doing so is hazardous since we will
-            // be mutating the state of ValueProfiles, which contain JSValues, which can
-            // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
-            // that are nearly impossible to track down.
-            
-            // Also note that it must be safe to return early as soon as we see the
-            // value true (well, (unsigned)1), since once a GC thread is in this method
-            // and has won the CAS race (i.e. was responsible for setting the value true)
-            // it will definitely complete the rest of this method before declaring
-            // termination.
-            return;
-        }
-    } while (!WTF::weakCompareAndSwap(&m_visitAggregateHasBeenCalled, 0, 1));
-#endif // ENABLE(PARALLEL_GC)
-    
-    if (!!m_alternative)
-        m_alternative->visitAggregate(visitor);
+void CodeBlock::visitWeakly(SlotVisitor& visitor)
+{
+    ConcurrentJSLocker locker(m_lock);
+    if (m_visitWeaklyHasBeenCalled)
+        return;
     
-    if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
-        otherBlock->visitAggregate(visitor);
+    m_visitWeaklyHasBeenCalled = true;
 
-    visitor.reportExtraMemoryUsage(ownerExecutable(), sizeof(CodeBlock));
-    if (m_jitCode)
-        visitor.reportExtraMemoryUsage(ownerExecutable(), m_jitCode->size());
-    if (m_instructions.size()) {
-        // Divide by refCount() because m_instructions points to something that is shared
-        // by multiple CodeBlocks, and we only want to count it towards the heap size once.
-        // Having each CodeBlock report only its proportional share of the size is one way
-        // of accomplishing this.
-        visitor.reportExtraMemoryUsage(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
+    if (Heap::isMarkedConcurrently(this))
+        return;
+
+    if (shouldVisitStrongly(locker)) {
+        visitor.appendUnbarriered(this);
+        return;
     }
+    
+    // There are two things that may use unconditional finalizers: inline cache clearing
+    // and jettisoning. The probability of us wanting to do at least one of those things
+    // is probably quite close to 1. So we add one no matter what and when it runs, it
+    // figures out whether it has any work to do.
+    visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
 
-    visitor.append(&m_unlinkedCode);
+    if (!JITCode::isOptimizingJIT(jitType()))
+        return;
 
-    // There are three things that may use unconditional finalizers: lazy bytecode freeing,
-    // inline cache clearing, and jettisoning. The probability of us wanting to do at
-    // least one of those things is probably quite close to 1. So we add one no matter what
-    // and when it runs, it figures out whether it has any work to do.
-    visitor.addUnconditionalFinalizer(this);
+    // If we jettison ourselves we'll install our alternative, so make sure that it
+    // survives GC even if we don't.
+    visitor.append(m_alternative);
     
     // There are two things that we use weak reference harvesters for: DFG fixpoint for
     // jettisoning, and trying to find structures that would be live based on some
     // inline cache. So it makes sense to register them regardless.
-    visitor.addWeakReferenceHarvester(this);
-    m_allTransitionsHaveBeenMarked = false;
-    
-    if (shouldImmediatelyAssumeLivenessDuringScan()) {
-        // This code block is live, so scan all references strongly and return.
-        stronglyVisitStrongReferences(visitor);
-        stronglyVisitWeakReferences(visitor);
-        propagateTransitions(visitor);
-        return;
-    }
-    
+    visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester);
+
 #if ENABLE(DFG_JIT)
     // We get here if we're live in the sense that our owner executable is live,
     // but we're not yet live for sure in another sense: we may yet decide that this
@@ -2007,17 +2492,149 @@ void CodeBlock::visitAggregate(SlotVisitor& visitor)
     // either us marking additional objects, or by other objects being marked for
     // other reasons, that this iteration should run again; it will notify us of this
     // decision by calling harvestWeakReferences().
-    
+
+    m_allTransitionsHaveBeenMarked = false;
+    propagateTransitions(locker, visitor);
+
     m_jitCode->dfgCommon()->livenessHasBeenProved = false;
-    
-    propagateTransitions(visitor);
-    determineLiveness(visitor);
-#else // ENABLE(DFG_JIT)
-    RELEASE_ASSERT_NOT_REACHED();
+    determineLiveness(locker, visitor);
 #endif // ENABLE(DFG_JIT)
 }
 
-void CodeBlock::propagateTransitions(SlotVisitor& visitor)
+size_t CodeBlock::estimatedSize(JSCell* cell)
+{
+    CodeBlock* thisObject = jsCast(cell);
+    size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
+    if (thisObject->m_jitCode)
+        extraMemoryAllocated += thisObject->m_jitCode->size();
+    return Base::estimatedSize(cell) + extraMemoryAllocated;
+}
+
+void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+    CodeBlock* thisObject = jsCast(cell);
+    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+    JSCell::visitChildren(thisObject, visitor);
+    thisObject->visitChildren(visitor);
+}
+
+void CodeBlock::visitChildren(SlotVisitor& visitor)
+{
+    ConcurrentJSLocker locker(m_lock);
+    // There are two things that may use unconditional finalizers: inline cache clearing
+    // and jettisoning. The probability of us wanting to do at least one of those things
+    // is probably quite close to 1. So we add one no matter what and when it runs, it
+    // figures out whether it has any work to do.
+    visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
+
+    if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
+        visitor.appendUnbarriered(otherBlock);
+
+    if (m_jitCode)
+        visitor.reportExtraMemoryVisited(m_jitCode->size());
+    if (m_instructions.size()) {
+        unsigned refCount = m_instructions.refCount();
+        if (!refCount) {
+            dataLog("CodeBlock: ", RawPointer(this), "\n");
+            dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n");
+            dataLog("refCount: ", refCount, "\n");
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+        visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
+    }
+
+    stronglyVisitStrongReferences(locker, visitor);
+    stronglyVisitWeakReferences(locker, visitor);
+
+    m_allTransitionsHaveBeenMarked = false;
+    propagateTransitions(locker, visitor);
+}
+
+bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
+{
+    if (Options::forceCodeBlockLiveness())
+        return true;
+
+    if (shouldJettisonDueToOldAge(locker))
+        return false;
+
+    // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
+    // their weak references go stale. So if a basline JIT CodeBlock gets
+    // scanned, we can assume that this means that it's live.
+    if (!JITCode::isOptimizingJIT(jitType()))
+        return true;
+
+    return false;
+}
+
+bool CodeBlock::shouldJettisonDueToWeakReference()
+{
+    if (!JITCode::isOptimizingJIT(jitType()))
+        return false;
+    return !Heap::isMarked(this);
+}
+
+static std::chrono::milliseconds timeToLive(JITCode::JITType jitType)
+{
+    if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
+        switch (jitType) {
+        case JITCode::InterpreterThunk:
+            return std::chrono::milliseconds(10);
+        case JITCode::BaselineJIT:
+            return std::chrono::milliseconds(10 + 20);
+        case JITCode::DFGJIT:
+            return std::chrono::milliseconds(40);
+        case JITCode::FTLJIT:
+            return std::chrono::milliseconds(120);
+        default:
+            return std::chrono::milliseconds::max();
+        }
+    }
+
+    switch (jitType) {
+    case JITCode::InterpreterThunk:
+        return std::chrono::duration_cast(std::chrono::seconds(5));
+    case JITCode::BaselineJIT:
+        // Effectively 10 additional seconds, since BaselineJIT and
+        // InterpreterThunk share a CodeBlock.
+        return std::chrono::duration_cast(std::chrono::seconds(5 + 10));
+    case JITCode::DFGJIT:
+        return std::chrono::duration_cast(std::chrono::seconds(20));
+    case JITCode::FTLJIT:
+        return std::chrono::duration_cast(std::chrono::seconds(60));
+    default:
+        return std::chrono::milliseconds::max();
+    }
+}
+
+bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
+{
+    if (Heap::isMarkedConcurrently(this))
+        return false;
+
+    if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
+        return true;
+    
+    if (timeSinceCreation() < timeToLive(jitType()))
+        return false;
+    
+    return true;
+}
+
+#if ENABLE(DFG_JIT)
+static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
+{
+    if (transition.m_codeOrigin && !Heap::isMarkedConcurrently(transition.m_codeOrigin.get()))
+        return false;
+    
+    if (!Heap::isMarkedConcurrently(transition.m_from.get()))
+        return false;
+    
+    return true;
+}
+#endif // ENABLE(DFG_JIT)
+
+void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
 {
     UNUSED_PARAM(visitor);
 
@@ -2026,19 +2643,23 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
 
     bool allAreMarkedSoFar = true;
         
-#if ENABLE(LLINT)
     Interpreter* interpreter = m_vm->interpreter;
     if (jitType() == JITCode::InterpreterThunk) {
         const Vector& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
             switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
-            case op_put_by_id_transition_direct:
-            case op_put_by_id_transition_normal:
-            case op_put_by_id_transition_direct_out_of_line:
-            case op_put_by_id_transition_normal_out_of_line: {
-                if (Heap::isMarked(instruction[4].u.structure.get()))
-                    visitor.append(&instruction[6].u.structure);
+            case op_put_by_id: {
+                StructureID oldStructureID = instruction[4].u.structureID;
+                StructureID newStructureID = instruction[6].u.structureID;
+                if (!oldStructureID || !newStructureID)
+                    break;
+                Structure* oldStructure =
+                    m_vm->heap.structureIDTable().get(oldStructureID);
+                Structure* newStructure =
+                    m_vm->heap.structureIDTable().get(newStructureID);
+                if (Heap::isMarkedConcurrently(oldStructure))
+                    visitor.appendUnbarriered(newStructure);
                 else
                     allAreMarkedSoFar = false;
                 break;
@@ -2048,69 +2669,42 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
             }
         }
     }
-#endif // ENABLE(LLINT)
 
 #if ENABLE(JIT)
     if (JITCode::isJIT(jitType())) {
-        for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
-            StructureStubInfo& stubInfo = **iter;
-            switch (stubInfo.accessType) {
-            case access_put_by_id_transition_normal:
-            case access_put_by_id_transition_direct: {
-                JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
-                if ((!origin || Heap::isMarked(origin))
-                    && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
-                    visitor.append(&stubInfo.u.putByIdTransition.structure);
-                else
-                    allAreMarkedSoFar = false;
-                break;
-            }
-
-            case access_put_by_id_list: {
-                PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
-                JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
-                if (origin && !Heap::isMarked(origin)) {
-                    allAreMarkedSoFar = false;
-                    break;
-                }
-                for (unsigned j = list->size(); j--;) {
-                    PutByIdAccess& access = list->m_list[j];
-                    if (!access.isTransition())
-                        continue;
-                    if (Heap::isMarked(access.oldStructure()))
-                        visitor.append(&access.m_newStructure);
-                    else
-                        allAreMarkedSoFar = false;
-                }
-                break;
-            }
-            
-            default:
-                break;
-            }
-        }
+        for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
+            allAreMarkedSoFar &= (*iter)->propagateTransitions(visitor);
     }
 #endif // ENABLE(JIT)
     
 #if ENABLE(DFG_JIT)
     if (JITCode::isOptimizingJIT(jitType())) {
         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
-        for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
-            if ((!dfgCommon->transitions[i].m_codeOrigin
-                 || Heap::isMarked(dfgCommon->transitions[i].m_codeOrigin.get()))
-                && Heap::isMarked(dfgCommon->transitions[i].m_from.get())) {
+        for (auto& weakReference : dfgCommon->weakStructureReferences)
+            allAreMarkedSoFar &= weakReference->markIfCheap(visitor);
+
+        for (auto& transition : dfgCommon->transitions) {
+            if (shouldMarkTransition(transition)) {
                 // If the following three things are live, then the target of the
                 // transition is also live:
+                //
                 // - This code block. We know it's live already because otherwise
                 //   we wouldn't be scanning ourselves.
+                //
                 // - The code origin of the transition. Transitions may arise from
                 //   code that was inlined. They are not relevant if the user's
                 //   object that is required for the inlinee to run is no longer
                 //   live.
+                //
                 // - The source of the transition. The transition checks if some
                 //   heap location holds the source, and if so, stores the target.
                 //   Hence the source must be live for the transition to be live.
-                visitor.append(&dfgCommon->transitions[i].m_to);
+                //
+                // We also short-circuit the liveness if the structure is harmless
+                // to mark (i.e. its global object and prototype are both already
+                // live).
+
+                visitor.append(transition.m_to);
             } else
                 allAreMarkedSoFar = false;
         }
@@ -2121,13 +2715,10 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
         m_allTransitionsHaveBeenMarked = true;
 }
 
-void CodeBlock::determineLiveness(SlotVisitor& visitor)
+void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
 {
     UNUSED_PARAM(visitor);
     
-    if (shouldImmediatelyAssumeLivenessDuringScan())
-        return;
-    
 #if ENABLE(DFG_JIT)
     // Check if we have any remaining work to do.
     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
@@ -2139,11 +2730,21 @@ void CodeBlock::determineLiveness(SlotVisitor& visitor)
     // GC we still have not proved liveness, then this code block is toast.
     bool allAreLiveSoFar = true;
     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
-        if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
+        JSCell* reference = dfgCommon->weakReferences[i].get();
+        ASSERT(!jsDynamicCast(*reference->vm(), reference));
+        if (!Heap::isMarkedConcurrently(reference)) {
             allAreLiveSoFar = false;
             break;
         }
     }
+    if (allAreLiveSoFar) {
+        for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
+            if (!Heap::isMarkedConcurrently(dfgCommon->weakStructureReferences[i].get())) {
+                allAreLiveSoFar = false;
+                break;
+            }
+        }
+    }
     
     // If some weak references are dead, then this fixpoint iteration was
     // unsuccessful.
@@ -2153,261 +2754,346 @@ void CodeBlock::determineLiveness(SlotVisitor& visitor)
     // All weak references are live. Record this information so we don't
     // come back here again, and scan the strong references.
     dfgCommon->livenessHasBeenProved = true;
-    stronglyVisitStrongReferences(visitor);
+    visitor.appendUnbarriered(this);
 #endif // ENABLE(DFG_JIT)
 }
 
-void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
+void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor)
+{
+    CodeBlock* codeBlock =
+        bitwise_cast(
+            bitwise_cast(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester));
+    
+    codeBlock->propagateTransitions(NoLockingNecessary, visitor);
+    codeBlock->determineLiveness(NoLockingNecessary, visitor);
+}
+
+void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
 {
-    propagateTransitions(visitor);
-    determineLiveness(visitor);
+    instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
+    instruction[4].u.pointer = nullptr;
+    instruction[5].u.pointer = nullptr;
+    instruction[6].u.pointer = nullptr;
 }
 
-void CodeBlock::finalizeUnconditionally()
+void CodeBlock::finalizeLLIntInlineCaches()
 {
     Interpreter* interpreter = m_vm->interpreter;
-    if (JITCode::couldBeInterpreted(jitType())) {
-        const Vector& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
-        for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
-            Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
-            switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
-            case op_get_by_id:
-            case op_get_by_id_out_of_line:
-            case op_put_by_id:
-            case op_put_by_id_out_of_line:
-                if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
-                    break;
-                if (Options::verboseOSR())
-                    dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
-                curInstruction[4].u.structure.clear();
-                curInstruction[5].u.operand = 0;
+    const Vector& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+    for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
+        Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
+        switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
+        case op_get_by_id:
+        case op_get_by_id_proto_load:
+        case op_get_by_id_unset: {
+            StructureID oldStructureID = curInstruction[4].u.structureID;
+            if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
                 break;
-            case op_put_by_id_transition_direct:
-            case op_put_by_id_transition_normal:
-            case op_put_by_id_transition_direct_out_of_line:
-            case op_put_by_id_transition_normal_out_of_line:
-                if (Heap::isMarked(curInstruction[4].u.structure.get())
-                    && Heap::isMarked(curInstruction[6].u.structure.get())
-                    && Heap::isMarked(curInstruction[7].u.structureChain.get()))
-                    break;
-                if (Options::verboseOSR()) {
-                    dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
-                            curInstruction[4].u.structure.get(),
-                            curInstruction[6].u.structure.get(),
-                            curInstruction[7].u.structureChain.get());
-                }
-                curInstruction[4].u.structure.clear();
-                curInstruction[6].u.structure.clear();
-                curInstruction[7].u.structureChain.clear();
-                curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
+            if (Options::verboseOSR())
+                dataLogF("Clearing LLInt property access.\n");
+            clearLLIntGetByIdCache(curInstruction);
+            break;
+        }
+        case op_put_by_id: {
+            StructureID oldStructureID = curInstruction[4].u.structureID;
+            StructureID newStructureID = curInstruction[6].u.structureID;
+            StructureChain* chain = curInstruction[7].u.structureChain.get();
+            if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
+                (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
+                (!chain || Heap::isMarked(chain)))
                 break;
-            case op_get_array_length:
+            if (Options::verboseOSR())
+                dataLogF("Clearing LLInt put transition.\n");
+            curInstruction[4].u.structureID = 0;
+            curInstruction[5].u.operand = 0;
+            curInstruction[6].u.structureID = 0;
+            curInstruction[7].u.structureChain.clear();
+            break;
+        }
+        case op_get_array_length:
+            break;
+        case op_to_this:
+            if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
                 break;
-            case op_to_this:
-                if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
-                    break;
-                if (Options::verboseOSR())
-                    dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
-                curInstruction[2].u.structure.clear();
+            if (Options::verboseOSR())
+                dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
+            curInstruction[2].u.structure.clear();
+            curInstruction[3].u.toThisStatus = merge(
+                curInstruction[3].u.toThisStatus, ToThisClearedByGC);
+            break;
+        case op_create_this: {
+            auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
+            if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
                 break;
-            case op_get_callee:
-                if (!curInstruction[2].u.jsCell || Heap::isMarked(curInstruction[2].u.jsCell.get()))
-                    break;
-                if (Options::verboseOSR())
-                    dataLogF("Clearing LLInt get callee with function %p.\n", curInstruction[2].u.jsCell.get());
-                curInstruction[2].u.jsCell.clear();
+            JSCell* cachedFunction = cacheWriteBarrier.get();
+            if (Heap::isMarked(cachedFunction))
                 break;
-            case op_resolve_scope: {
-                WriteBarrierBase& activation = curInstruction[5].u.activation;
-                if (!activation || Heap::isMarked(activation.get()))
-                    break;
-                if (Options::verboseOSR())
-                    dataLogF("Clearing dead activation %p.\n", activation.get());
-                activation.clear();
+            if (Options::verboseOSR())
+                dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
+            cacheWriteBarrier.clear();
+            break;
+        }
+        case op_resolve_scope: {
+            // Right now this isn't strictly necessary. Any symbol tables that this will refer to
+            // are for outer functions, and we refer to those functions strongly, and they refer
+            // to the symbol table strongly. But it's nice to be on the safe side.
+            WriteBarrierBase& symbolTable = curInstruction[6].u.symbolTable;
+            if (!symbolTable || Heap::isMarked(symbolTable.get()))
                 break;
-            }
-            case op_get_from_scope:
-            case op_put_to_scope: {
-                ResolveModeAndType modeAndType =
-                    ResolveModeAndType(curInstruction[4].u.operand);
-                if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks)
-                    continue;
-                WriteBarrierBase& structure = curInstruction[5].u.structure;
-                if (!structure || Heap::isMarked(structure.get()))
-                    break;
-                if (Options::verboseOSR())
-                    dataLogF("Clearing scope access with structure %p.\n", structure.get());
-                structure.clear();
+            if (Options::verboseOSR())
+                dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
+            symbolTable.clear();
+            break;
+        }
+        case op_get_from_scope:
+        case op_put_to_scope: {
+            GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
+            if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
+                || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
+                continue;
+            WriteBarrierBase& structure = curInstruction[5].u.structure;
+            if (!structure || Heap::isMarked(structure.get()))
                 break;
-            }
-            default:
-                RELEASE_ASSERT_NOT_REACHED();
-            }
+            if (Options::verboseOSR())
+                dataLogF("Clearing scope access with structure %p.\n", structure.get());
+            structure.clear();
+            break;
         }
-
-#if ENABLE(LLINT)
-        for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
-            if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
-                if (Options::verboseOSR())
-                    dataLog("Clearing LLInt call from ", *this, "\n");
-                m_llintCallLinkInfos[i].unlink();
-            }
-            if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
-                m_llintCallLinkInfos[i].lastSeenCallee.clear();
+        default:
+            OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
+            ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
         }
-#endif // ENABLE(LLINT)
     }
 
-#if ENABLE(DFG_JIT)
-    // Check if we're not live. If we are, then jettison.
-    if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_jitCode->dfgCommon()->livenessHasBeenProved)) {
-        if (Options::verboseOSR())
-            dataLog(*this, " has dead weak references, jettisoning during GC.\n");
+    // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
+    // then cleared the cache without GCing in between.
+    m_llintGetByIdWatchpointMap.removeIf([](const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
+        return !Heap::isMarked(pair.key);
+    });
 
-        if (DFG::shouldShowDisassembly()) {
-            dataLog(*this, " will be jettisoned because of the following dead references:\n");
-            DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
-            for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
-                DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
-                JSCell* origin = transition.m_codeOrigin.get();
-                JSCell* from = transition.m_from.get();
-                JSCell* to = transition.m_to.get();
-                if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
-                    continue;
-                dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
-            }
-            for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
-                JSCell* weak = dfgCommon->weakReferences[i].get();
-                if (Heap::isMarked(weak))
-                    continue;
-                dataLog("    Weak reference ", RawPointer(weak), ".\n");
-            }
+    for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+        if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
+            if (Options::verboseOSR())
+                dataLog("Clearing LLInt call from ", *this, "\n");
+            m_llintCallLinkInfos[i].unlink();
         }
-        
-        jettison();
+        if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
+            m_llintCallLinkInfos[i].lastSeenCallee.clear();
+    }
+}
+
+void CodeBlock::finalizeBaselineJITInlineCaches()
+{
+#if ENABLE(JIT)
+    for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
+        (*iter)->visitWeak(*vm());
+
+    for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+        StructureStubInfo& stubInfo = **iter;
+        stubInfo.visitWeakReferences(this);
+    }
+#endif
+}
+
+void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally()
+{
+    CodeBlock* codeBlock = bitwise_cast(
+        bitwise_cast(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer));
+    
+    codeBlock->updateAllPredictions();
+    
+    if (!Heap::isMarked(codeBlock)) {
+        if (codeBlock->shouldJettisonDueToWeakReference())
+            codeBlock->jettison(Profiler::JettisonDueToWeakReference);
+        else
+            codeBlock->jettison(Profiler::JettisonDueToOldAge);
         return;
     }
-#endif // ENABLE(DFG_JIT)
+
+    if (JITCode::couldBeInterpreted(codeBlock->jitType()))
+        codeBlock->finalizeLLIntInlineCaches();
 
 #if ENABLE(JIT)
-    // Handle inline caches.
-    if (!!jitCode()) {
-        RepatchBuffer repatchBuffer(this);
-        for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
-            if (callLinkInfo(i).isLinked()) {
-                if (ClosureCallStubRoutine* stub = callLinkInfo(i).stub.get()) {
-                    if (!Heap::isMarked(stub->structure())
-                        || !Heap::isMarked(stub->executable())) {
-                        if (Options::verboseOSR()) {
-                            dataLog(
-                                "Clearing closure call from ", *this, " to ",
-                                stub->executable()->hashFor(callLinkInfo(i).specializationKind()),
-                                ", stub routine ", RawPointer(stub), ".\n");
-                        }
-                        callLinkInfo(i).unlink(*m_vm, repatchBuffer);
-                    }
-                } else if (!Heap::isMarked(callLinkInfo(i).callee.get())) {
-                    if (Options::verboseOSR()) {
-                        dataLog(
-                            "Clearing call from ", *this, " to ",
-                            RawPointer(callLinkInfo(i).callee.get()), " (",
-                            callLinkInfo(i).callee.get()->executable()->hashFor(
-                                callLinkInfo(i).specializationKind()),
-                            ").\n");
-                    }
-                    callLinkInfo(i).unlink(*m_vm, repatchBuffer);
-                }
-            }
-            if (!!callLinkInfo(i).lastSeenCallee
-                && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
-                callLinkInfo(i).lastSeenCallee.clear();
-        }
-        for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
-            StructureStubInfo& stubInfo = **iter;
-            
-            if (stubInfo.visitWeakReferences())
-                continue;
-            
-            resetStubDuringGCInternal(repatchBuffer, stubInfo);
-        }
+    if (!!codeBlock->jitCode())
+        codeBlock->finalizeBaselineJITInlineCaches();
+#endif
+}
+
+void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
+{
+#if ENABLE(JIT)
+    if (JITCode::isJIT(jitType()))
+        toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+#else
+    UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getStubInfoMap(StubInfoMap& result)
+{
+    ConcurrentJSLocker locker(m_lock);
+    getStubInfoMap(locker, result);
+}
+
+void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
+{
+#if ENABLE(JIT)
+    if (JITCode::isJIT(jitType()))
+        toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
+#else
+    UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
+{
+    ConcurrentJSLocker locker(m_lock);
+    getCallLinkInfoMap(locker, result);
+}
+
+void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
+{
+#if ENABLE(JIT)
+    if (JITCode::isJIT(jitType())) {
+        for (auto* byValInfo : m_byValInfos)
+            result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
     }
+#else
+    UNUSED_PARAM(result);
 #endif
 }
 
+void CodeBlock::getByValInfoMap(ByValInfoMap& result)
+{
+    ConcurrentJSLocker locker(m_lock);
+    getByValInfoMap(locker, result);
+}
+
 #if ENABLE(JIT)
-StructureStubInfo* CodeBlock::addStubInfo()
+StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
 {
-    ConcurrentJITLocker locker(m_lock);
-    return m_stubInfos.add();
+    ConcurrentJSLocker locker(m_lock);
+    return m_stubInfos.add(accessType);
 }
 
-void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
+JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
 {
-    toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+    return m_addICs.add(arithProfile);
 }
 
-void CodeBlock::resetStub(StructureStubInfo& stubInfo)
+JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
 {
-    if (stubInfo.accessType == access_unset)
-        return;
-    
-    ConcurrentJITLocker locker(m_lock);
-    
-    RepatchBuffer repatchBuffer(this);
-    resetStubInternal(repatchBuffer, stubInfo);
+    return m_mulICs.add(arithProfile);
 }
 
-void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
 {
-    AccessType accessType = static_cast(stubInfo.accessType);
-    
-    if (Options::verboseOSR()) {
-        // This can be called from GC destructor calls, so we don't try to do a full dump
-        // of the CodeBlock.
-        dataLog("Clearing structure cache (kind ", static_cast(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
+    return m_subICs.add(arithProfile);
+}
+
+JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
+{
+    return m_negICs.add(arithProfile);
+}
+
+StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
+{
+    for (StructureStubInfo* stubInfo : m_stubInfos) {
+        if (stubInfo->codeOrigin == codeOrigin)
+            return stubInfo;
     }
-    
-    RELEASE_ASSERT(JITCode::isJIT(jitType()));
-    
-    if (isGetByIdAccess(accessType))
-        resetGetByID(repatchBuffer, stubInfo);
-    else if (isPutByIdAccess(accessType))
-        resetPutByID(repatchBuffer, stubInfo);
-    else {
-        RELEASE_ASSERT(isInAccess(accessType));
-        resetIn(repatchBuffer, stubInfo);
+    return nullptr;
+}
+
+ByValInfo* CodeBlock::addByValInfo()
+{
+    ConcurrentJSLocker locker(m_lock);
+    return m_byValInfos.add();
+}
+
+CallLinkInfo* CodeBlock::addCallLinkInfo()
+{
+    ConcurrentJSLocker locker(m_lock);
+    return m_callLinkInfos.add();
+}
+
+CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
+{
+    for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
+        if ((*iter)->codeOrigin() == CodeOrigin(index))
+            return *iter;
     }
-    
-    stubInfo.reset();
+    return nullptr;
 }
 
-void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+void CodeBlock::resetJITData()
 {
-    resetStubInternal(repatchBuffer, stubInfo);
-    stubInfo.resetByGC = true;
+    RELEASE_ASSERT(!JITCode::isJIT(jitType()));
+    ConcurrentJSLocker locker(m_lock);
+    
+    // We can clear these because no other thread will have references to any stub infos, call
+    // link infos, or by val infos if we don't have JIT code. Attempts to query these data
+    // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
+    // don't have JIT code.
+    m_stubInfos.clear();
+    m_callLinkInfos.clear();
+    m_byValInfos.clear();
+    
+    // We can clear this because the DFG's queries to these data structures are guarded by whether
+    // there is JIT code.
+    m_rareCaseProfiles.clear();
 }
 #endif
 
-void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
+void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
 {
-    visitor.append(&m_globalObject);
-    visitor.append(&m_ownerExecutable);
-    visitor.append(&m_symbolTable);
-    visitor.append(&m_unlinkedCode);
+    // We strongly visit OSR exits targets because we don't want to deal with
+    // the complexity of generating an exit target CodeBlock on demand and
+    // guaranteeing that it matches the details of the CodeBlock we compiled
+    // the OSR exit against.
+
+    visitor.append(m_alternative);
+
+#if ENABLE(DFG_JIT)
+    DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+    if (dfgCommon->inlineCallFrames) {
+        for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
+            ASSERT(inlineCallFrame->baselineCodeBlock);
+            visitor.append(inlineCallFrame->baselineCodeBlock);
+        }
+    }
+#endif
+}
+
+void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
+{
+    UNUSED_PARAM(locker);
+    
+    visitor.append(m_globalObject);
+    visitor.append(m_ownerExecutable);
+    visitor.append(m_unlinkedCode);
     if (m_rareData)
-        m_rareData->m_evalCodeCache.visitAggregate(visitor);
+        m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
-    for (size_t i = 0; i < m_functionExprs.size(); ++i)
-        visitor.append(&m_functionExprs[i]);
-    for (size_t i = 0; i < m_functionDecls.size(); ++i)
-        visitor.append(&m_functionDecls[i]);
-    for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
-        m_objectAllocationProfiles[i].visitAggregate(visitor);
+    for (auto& functionExpr : m_functionExprs)
+        visitor.append(functionExpr);
+    for (auto& functionDecl : m_functionDecls)
+        visitor.append(functionDecl);
+    for (auto& objectAllocationProfile : m_objectAllocationProfiles)
+        objectAllocationProfile.visitAggregate(visitor);
 
-    updateAllPredictions();
+#if ENABLE(JIT)
+    for (ByValInfo* byValInfo : m_byValInfos)
+        visitor.append(byValInfo->cachedSymbol);
+#endif
+
+#if ENABLE(DFG_JIT)
+    if (JITCode::isOptimizingJIT(jitType()))
+        visitOSRExitTargets(locker, visitor);
+#endif
 }
 
-void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
+void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
 {
     UNUSED_PARAM(visitor);
 
@@ -2417,15 +3103,20 @@ void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
     
     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
 
-    for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
-        if (!!dfgCommon->transitions[i].m_codeOrigin)
-            visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
-        visitor.append(&dfgCommon->transitions[i].m_from);
-        visitor.append(&dfgCommon->transitions[i].m_to);
+    for (auto& transition : dfgCommon->transitions) {
+        if (!!transition.m_codeOrigin)
+            visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
+        visitor.append(transition.m_from);
+        visitor.append(transition.m_to);
     }
-    
-    for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
-        visitor.append(&dfgCommon->weakReferences[i]);
+
+    for (auto& weakReference : dfgCommon->weakReferences)
+        visitor.append(weakReference);
+
+    for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
+        visitor.append(weakStructureReference);
+
+    dfgCommon->livenessHasBeenProved = true;
 #endif    
 }
 
@@ -2474,87 +3165,56 @@ bool CodeBlock::hasOptimizedReplacement()
 }
 #endif
 
-bool CodeBlock::isCaptured(VirtualRegister operand, InlineCallFrame* inlineCallFrame) const
+HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
 {
-    if (operand.isArgument())
-        return operand.toArgument() && usesArguments();
-
-    if (inlineCallFrame)
-        return inlineCallFrame->capturedVars.get(operand.toLocal());
-
-    // The activation object isn't in the captured region, but it's "captured"
-    // in the sense that stores to its location can be observed indirectly.
-    if (needsActivation() && operand == activationRegister())
-        return true;
-
-    // Ditto for the arguments object.
-    if (usesArguments() && operand == argumentsRegister())
-        return true;
-
-    // Ditto for the arguments object.
-    if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
-        return true;
-
-    // We're in global code so there are no locals to capture
-    if (!symbolTable())
-        return false;
-
-    return symbolTable()->isCaptured(operand.offset());
+    RELEASE_ASSERT(bytecodeOffset < instructions().size());
+    return handlerForIndex(bytecodeOffset, requiredHandler);
 }
 
-int CodeBlock::framePointerOffsetToGetActivationRegisters(int machineCaptureStart)
+HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
 {
-    // We'll be adding this to the stack pointer to get a registers pointer that looks
-    // like it would have looked in the baseline engine. For example, if bytecode would
-    // have put the first captured variable at offset -5 but we put it at offset -1, then
-    // we'll have an offset of 4.
-    int32_t offset = 0;
-    
-    // Compute where we put the captured variables. This offset will point the registers
-    // pointer directly at the first captured var.
-    offset += machineCaptureStart;
-    
-    // Now compute the offset needed to make the runtime see the captured variables at the
-    // same offset that the bytecode would have used.
-    offset -= symbolTable()->captureStart();
-    
-    return offset;
+    if (!m_rareData)
+        return 0;
+    return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
 }
 
-int CodeBlock::framePointerOffsetToGetActivationRegisters()
+CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
 {
-    if (!JITCode::isOptimizingJIT(jitType()))
-        return 0;
 #if ENABLE(DFG_JIT)
-    return framePointerOffsetToGetActivationRegisters(jitCode()->dfgCommon()->machineCaptureStart);
+    RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+    RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
+    ASSERT(!!handlerForIndex(originalCallSite.bits()));
+    CodeOrigin originalOrigin = codeOrigin(originalCallSite);
+    return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
 #else
+    // We never create new on-the-fly exception handling
+    // call sites outside the DFG/FTL inline caches.
+    UNUSED_PARAM(originalCallSite);
     RELEASE_ASSERT_NOT_REACHED();
-    return 0;
+    return CallSiteIndex(0u);
 #endif
 }
 
-HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
+void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
 {
-    RELEASE_ASSERT(bytecodeOffset < instructions().size());
-
-    if (!m_rareData)
-        return 0;
-    
+    RELEASE_ASSERT(m_rareData);
     Vector& exceptionHandlers = m_rareData->m_exceptionHandlers;
+    unsigned index = callSiteIndex.bits();
     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
-        // Handlers are ordered innermost first, so the first handler we encounter
-        // that contains the source address is the correct handler to use.
-        if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset)
-            return &exceptionHandlers[i];
+        HandlerInfo& handler = exceptionHandlers[i];
+        if (handler.start <= index && handler.end > index) {
+            exceptionHandlers.remove(i);
+            return;
+        }
     }
 
-    return 0;
+    RELEASE_ASSERT_NOT_REACHED();
 }
 
 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
 {
     RELEASE_ASSERT(bytecodeOffset < instructions().size());
-    return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
+    return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
 }
 
 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
@@ -2568,12 +3228,12 @@ unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
     return column;
 }
 
-void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
+void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
 {
     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
     divot += m_sourceOffset;
     column += line ? 1 : firstLineColumnOffset();
-    line += m_ownerExecutable->lineNo();
+    line += ownerScriptExecutable()->firstLine();
 }
 
 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
@@ -2599,11 +3259,13 @@ bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
 
 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
 {
+    ConcurrentJSLocker locker(m_lock);
+
     m_rareCaseProfiles.shrinkToFit();
-    m_specialFastCaseProfiles.shrinkToFit();
     
     if (shrinkMode == EarlyShrink) {
         m_constantRegisters.shrinkToFit();
+        m_constantsSourceCodeRepresentation.shrinkToFit();
         
         if (m_rareData) {
             m_rareData->m_switchJumpTables.shrinkToFit();
@@ -2612,175 +3274,154 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
     } // else don't shrink these, because we would have already pointed pointers into these tables.
 }
 
-unsigned CodeBlock::addOrFindConstant(JSValue v)
-{
-    unsigned result;
-    if (findConstant(v, result))
-        return result;
-    return addConstant(v);
-}
-
-bool CodeBlock::findConstant(JSValue v, unsigned& index)
-{
-    unsigned numberOfConstants = numberOfConstantRegisters();
-    for (unsigned i = 0; i < numberOfConstants; ++i) {
-        if (getConstant(FirstConstantRegisterIndex + i) == v) {
-            index = i;
-            return true;
-        }
-    }
-    index = numberOfConstants;
-    return false;
-}
-
 #if ENABLE(JIT)
-void CodeBlock::unlinkCalls()
+void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
 {
-    if (!!m_alternative)
-        m_alternative->unlinkCalls();
-#if ENABLE(LLINT)
-    for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
-        if (m_llintCallLinkInfos[i].isLinked())
-            m_llintCallLinkInfos[i].unlink();
-    }
-#endif
-    if (!m_callLinkInfos.size())
-        return;
-    if (!m_vm->canUseJIT())
-        return;
-    RepatchBuffer repatchBuffer(this);
-    for (size_t i = 0; i < m_callLinkInfos.size(); i++) {
-        if (!m_callLinkInfos[i].isLinked())
-            continue;
-        m_callLinkInfos[i].unlink(*m_vm, repatchBuffer);
-    }
+    noticeIncomingCall(callerFrame);
+    m_incomingCalls.push(incoming);
 }
 
-void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
+void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
 {
     noticeIncomingCall(callerFrame);
-    m_incomingCalls.push(incoming);
+    m_incomingPolymorphicCalls.push(incoming);
 }
 #endif // ENABLE(JIT)
 
 void CodeBlock::unlinkIncomingCalls()
 {
-#if ENABLE(LLINT)
     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
         m_incomingLLIntCalls.begin()->unlink();
-#endif // ENABLE(LLINT)
 #if ENABLE(JIT)
-    if (m_incomingCalls.isEmpty())
-        return;
-    RepatchBuffer repatchBuffer(this);
     while (m_incomingCalls.begin() != m_incomingCalls.end())
-        m_incomingCalls.begin()->unlink(*m_vm, repatchBuffer);
+        m_incomingCalls.begin()->unlink(*vm());
+    while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
+        m_incomingPolymorphicCalls.begin()->unlink(*vm());
 #endif // ENABLE(JIT)
 }
 
-#if ENABLE(LLINT)
 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
 {
     noticeIncomingCall(callerFrame);
     m_incomingLLIntCalls.push(incoming);
 }
-#endif // ENABLE(LLINT)
 
-void CodeBlock::clearEvalCache()
+CodeBlock* CodeBlock::newReplacement()
 {
-    if (!!m_alternative)
-        m_alternative->clearEvalCache();
-    if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
-        otherBlock->clearEvalCache();
-    if (!m_rareData)
-        return;
-    m_rareData->m_evalCodeCache.clear();
+    return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
 }
 
-void CodeBlock::install()
+#if ENABLE(JIT)
+CodeBlock* CodeBlock::replacement()
 {
-    ownerExecutable()->installCode(this);
-}
+    const ClassInfo* classInfo = this->classInfo(*vm());
 
-PassRefPtr CodeBlock::newReplacement()
-{
-    return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
-}
+    if (classInfo == FunctionCodeBlock::info())
+        return jsCast(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
 
-const SlowArgument* CodeBlock::machineSlowArguments()
-{
-    if (!JITCode::isOptimizingJIT(jitType()))
-        return symbolTable()->slowArguments();
-    
-#if ENABLE(DFG_JIT)
-    return jitCode()->dfgCommon()->slowArguments.get();
-#else // ENABLE(DFG_JIT)
-    return 0;
-#endif // ENABLE(DFG_JIT)
-}
+    if (classInfo == EvalCodeBlock::info())
+        return jsCast(ownerExecutable())->codeBlock();
 
-#if ENABLE(JIT)
-CodeBlock* ProgramCodeBlock::replacement()
-{
-    return jsCast(ownerExecutable())->codeBlock();
-}
+    if (classInfo == ProgramCodeBlock::info())
+        return jsCast(ownerExecutable())->codeBlock();
 
-CodeBlock* EvalCodeBlock::replacement()
-{
-    return jsCast(ownerExecutable())->codeBlock();
-}
+    if (classInfo == ModuleProgramCodeBlock::info())
+        return jsCast(ownerExecutable())->codeBlock();
 
-CodeBlock* FunctionCodeBlock::replacement()
-{
-    return jsCast(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
+    RELEASE_ASSERT_NOT_REACHED();
+    return nullptr;
 }
 
-DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
+DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
 {
-    return DFG::programCapabilityLevel(this);
-}
+    const ClassInfo* classInfo = this->classInfo(*vm());
 
-DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
-{
-    return DFG::evalCapabilityLevel(this);
+    if (classInfo == FunctionCodeBlock::info()) {
+        if (m_isConstructor)
+            return DFG::functionForConstructCapabilityLevel(this);
+        return DFG::functionForCallCapabilityLevel(this);
+    }
+
+    if (classInfo == EvalCodeBlock::info())
+        return DFG::evalCapabilityLevel(this);
+
+    if (classInfo == ProgramCodeBlock::info())
+        return DFG::programCapabilityLevel(this);
+
+    if (classInfo == ModuleProgramCodeBlock::info())
+        return DFG::programCapabilityLevel(this);
+
+    RELEASE_ASSERT_NOT_REACHED();
+    return DFG::CannotCompile;
 }
 
-DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
+#endif // ENABLE(JIT)
+
+void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
 {
-    if (m_isConstructor)
-        return DFG::functionForConstructCapabilityLevel(this);
-    return DFG::functionForCallCapabilityLevel(this);
-}
+#if !ENABLE(DFG_JIT)
+    UNUSED_PARAM(mode);
+    UNUSED_PARAM(detail);
 #endif
+    
+    CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
 
-void CodeBlock::jettison(ReoptimizationMode mode)
-{
+    RELEASE_ASSERT(reason != Profiler::NotJettisoned);
+    
 #if ENABLE(DFG_JIT)
-    if (DFG::shouldShowDisassembly()) {
+    if (DFG::shouldDumpDisassembly()) {
         dataLog("Jettisoning ", *this);
         if (mode == CountReoptimization)
             dataLog(" and counting reoptimization");
+        dataLog(" due to ", reason);
+        if (detail)
+            dataLog(", ", *detail);
         dataLog(".\n");
     }
     
-    DeferGCForAWhile deferGC(*m_heap);
-    RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+    if (reason == Profiler::JettisonDueToWeakReference) {
+        if (DFG::shouldDumpDisassembly()) {
+            dataLog(*this, " will be jettisoned because of the following dead references:\n");
+            DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+            for (auto& transition : dfgCommon->transitions) {
+                JSCell* origin = transition.m_codeOrigin.get();
+                JSCell* from = transition.m_from.get();
+                JSCell* to = transition.m_to.get();
+                if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
+                    continue;
+                dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
+            }
+            for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
+                JSCell* weak = dfgCommon->weakReferences[i].get();
+                if (Heap::isMarked(weak))
+                    continue;
+                dataLog("    Weak reference ", RawPointer(weak), ".\n");
+            }
+        }
+    }
+#endif // ENABLE(DFG_JIT)
+
+    DeferGCForAWhile deferGC(*heap());
     
     // We want to accomplish two things here:
     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
     //    we should OSR exit at the top of the next bytecode instruction after the return.
     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
-    
-    // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
-    // whether the invalidation has already happened.
-    if (!jitCode()->dfgCommon()->invalidate()) {
-        // Nothing to do since we've already been invalidated. That means that we cannot be
-        // the optimized replacement.
-        RELEASE_ASSERT(this != replacement());
-        return;
+
+#if ENABLE(DFG_JIT)
+    if (reason != Profiler::JettisonDueToOldAge) {
+        if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
+            compilation->setJettisonReason(reason, detail);
+        
+        // This accomplishes (1), and does its own book-keeping about whether it has already happened.
+        if (!jitCode()->dfgCommon()->invalidate()) {
+            // We've already been invalidated.
+            RELEASE_ASSERT(this != replacement() || (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
+            return;
+        }
     }
     
-    if (DFG::shouldShowDisassembly())
+    if (DFG::shouldDumpDisassembly())
         dataLog("    Did invalidate ", *this, "\n");
     
     // Count the reoptimization if that's what the user wanted.
@@ -2788,24 +3429,35 @@ void CodeBlock::jettison(ReoptimizationMode mode)
         // FIXME: Maybe this should call alternative().
         // https://bugs.webkit.org/show_bug.cgi?id=123677
         baselineAlternative()->countReoptimization();
-        if (DFG::shouldShowDisassembly())
+        if (DFG::shouldDumpDisassembly())
             dataLog("    Did count reoptimization for ", *this, "\n");
     }
     
-    // Now take care of the entrypoint.
     if (this != replacement()) {
         // This means that we were never the entrypoint. This can happen for OSR entry code
         // blocks.
         return;
     }
-    alternative()->optimizeAfterWarmUp();
-    tallyFrequentExitSites();
-    alternative()->install();
-    if (DFG::shouldShowDisassembly())
+
+    if (alternative())
+        alternative()->optimizeAfterWarmUp();
+
+    if (reason != Profiler::JettisonDueToOldAge)
+        tallyFrequentExitSites();
+#endif // ENABLE(DFG_JIT)
+
+    // Jettison can happen during GC. We don't want to install code to a dead executable
+    // because that would add a dead object to the remembered set.
+    if (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
+        return;
+
+    // This accomplishes (2).
+    ownerScriptExecutable()->installCode(
+        m_globalObject->vm(), alternative(), codeType(), specializationKind());
+
+#if ENABLE(DFG_JIT)
+    if (DFG::shouldDumpDisassembly())
         dataLog("    Did install baseline version of ", *this, "\n");
-#else // ENABLE(DFG_JIT)
-    UNUSED_PARAM(mode);
-    UNREACHABLE_FOR_PLATFORM();
 #endif // ENABLE(DFG_JIT)
 }
 
@@ -2813,28 +3465,82 @@ JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
 {
     if (!codeOrigin.inlineCallFrame)
         return globalObject();
-    return jsCast(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
+    return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
 }
 
+class RecursionCheckFunctor {
+public:
+    RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
+        : m_startCallFrame(startCallFrame)
+        , m_codeBlock(codeBlock)
+        , m_depthToCheck(depthToCheck)
+        , m_foundStartCallFrame(false)
+        , m_didRecurse(false)
+    { }
+
+    StackVisitor::Status operator()(StackVisitor& visitor) const
+    {
+        CallFrame* currentCallFrame = visitor->callFrame();
+
+        if (currentCallFrame == m_startCallFrame)
+            m_foundStartCallFrame = true;
+
+        if (m_foundStartCallFrame) {
+            if (visitor->callFrame()->codeBlock() == m_codeBlock) {
+                m_didRecurse = true;
+                return StackVisitor::Done;
+            }
+
+            if (!m_depthToCheck--)
+                return StackVisitor::Done;
+        }
+
+        return StackVisitor::Continue;
+    }
+
+    bool didRecurse() const { return m_didRecurse; }
+
+private:
+    CallFrame* m_startCallFrame;
+    CodeBlock* m_codeBlock;
+    mutable unsigned m_depthToCheck;
+    mutable bool m_foundStartCallFrame;
+    mutable bool m_didRecurse;
+};
+
 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
 {
     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
     
     if (Options::verboseCallLink())
-        dataLog("Noticing call link from ", *callerCodeBlock, " to ", *this, "\n");
+        dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
     
+#if ENABLE(DFG_JIT)
     if (!m_shouldAlwaysBeInlined)
         return;
+    
+    if (!callerCodeBlock) {
+        m_shouldAlwaysBeInlined = false;
+        if (Options::verboseCallLink())
+            dataLog("    Clearing SABI because caller is native.\n");
+        return;
+    }
 
-#if ENABLE(DFG_JIT)
     if (!hasBaselineJITProfiling())
         return;
 
     if (!DFG::mightInlineFunction(this))
         return;
 
-    if (!canInline(m_capabilityLevelState))
+    if (!canInline(capabilityLevelState()))
+        return;
+    
+    if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
+        m_shouldAlwaysBeInlined = false;
+        if (Options::verboseCallLink())
+            dataLog("    Clearing SABI because caller is too large.\n");
         return;
+    }
 
     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
         // If the caller is still in the interpreter, then we can't expect inlining to
@@ -2843,7 +3549,14 @@ void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
         // any of its callers.
         m_shouldAlwaysBeInlined = false;
         if (Options::verboseCallLink())
-            dataLog("    Marking SABI because caller is in LLInt.\n");
+            dataLog("    Clearing SABI because caller is in LLInt.\n");
+        return;
+    }
+    
+    if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
+        m_shouldAlwaysBeInlined = false;
+        if (Options::verboseCallLink())
+            dataLog("    Clearing SABI bcause caller was already optimized.\n");
         return;
     }
     
@@ -2853,40 +3566,72 @@ void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
         // delay eval optimization by a *lot*.
         m_shouldAlwaysBeInlined = false;
         if (Options::verboseCallLink())
-            dataLog("    Marking SABI because caller is not a function.\n");
+            dataLog("    Clearing SABI because caller is not a function.\n");
         return;
     }
-    
-    ExecState* frame = callerFrame;
-    for (unsigned i = Options::maximumInliningDepth(); i--; frame = frame->callerFrame()) {
-        if (frame->isVMEntrySentinel())
-            break;
-        if (frame->codeBlock() == this) {
-            // Recursive calls won't be inlined.
-            if (Options::verboseCallLink())
-                dataLog("    Marking SABI because recursion was detected.\n");
-            m_shouldAlwaysBeInlined = false;
-            return;
-        }
+
+    // Recursive calls won't be inlined.
+    RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
+    vm()->topCallFrame->iterate(functor);
+
+    if (functor.didRecurse()) {
+        if (Options::verboseCallLink())
+            dataLog("    Clearing SABI because recursion was detected.\n");
+        m_shouldAlwaysBeInlined = false;
+        return;
     }
     
-    RELEASE_ASSERT(callerCodeBlock->m_capabilityLevelState != DFG::CapabilityLevelNotSet);
+    if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
+        dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
+        CRASH();
+    }
     
-    if (canCompile(callerCodeBlock->m_capabilityLevelState))
+    if (canCompile(callerCodeBlock->capabilityLevelState()))
         return;
     
     if (Options::verboseCallLink())
-        dataLog("    Marking SABI because the caller is not a DFG candidate.\n");
+        dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
     
     m_shouldAlwaysBeInlined = false;
 #endif
 }
 
-#if ENABLE(JIT)
 unsigned CodeBlock::reoptimizationRetryCounter() const
 {
+#if ENABLE(JIT)
     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
     return m_reoptimizationRetryCounter;
+#else
+    return 0;
+#endif // ENABLE(JIT)
+}
+
+#if ENABLE(JIT)
+void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
+{
+    m_calleeSaveRegisters = std::make_unique(calleeSaveRegisters);
+}
+
+void CodeBlock::setCalleeSaveRegisters(std::unique_ptr registerAtOffsetList)
+{
+    m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
+}
+    
+static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
+{
+    static const unsigned cpuRegisterSize = sizeof(void*);
+    return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
+
+}
+
+size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
+{
+    return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
+}
+
+size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
+{
+    return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
 }
 
 void CodeBlock::countReoptimization()
@@ -2899,6 +3644,11 @@ void CodeBlock::countReoptimization()
 unsigned CodeBlock::numberOfDFGCompiles()
 {
     ASSERT(JITCode::isBaselineCode(jitType()));
+    if (Options::testTheFTL()) {
+        if (m_didFailFTLCompilation)
+            return 1000000;
+        return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
+    }
     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
 }
 
@@ -2979,13 +3729,16 @@ double CodeBlock::optimizationThresholdScalingFactor()
     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
     
     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
+    
+    result *= codeTypeThresholdMultiplier();
+    
     if (Options::verboseOSR()) {
         dataLog(
             *this, ": instruction count is ", instructionCount,
             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
             "\n");
     }
-    return result * codeTypeThresholdMultiplier();
+    return result;
 }
 
 static int32_t clipThreshold(double threshold)
@@ -3010,7 +3763,7 @@ int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
 bool CodeBlock::checkIfOptimizationThresholdReached()
 {
 #if ENABLE(DFG_JIT)
-    if (DFG::Worklist* worklist = m_vm->worklist.get()) {
+    if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
             == DFG::Worklist::Compiled) {
             optimizeNextInvocation();
@@ -3076,8 +3829,22 @@ void CodeBlock::forceOptimizationSlowPathConcurrently()
 #if ENABLE(DFG_JIT)
 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
 {
-    RELEASE_ASSERT(jitType() == JITCode::BaselineJIT);
-    RELEASE_ASSERT((result == CompilationSuccessful) == (replacement() != this));
+    JITCode::JITType type = jitType();
+    if (type != JITCode::BaselineJIT) {
+        dataLog(*this, ": expected to have baseline code but have ", type, "\n");
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    CodeBlock* theReplacement = replacement();
+    if ((result == CompilationSuccessful) != (theReplacement != this)) {
+        dataLog(*this, ": we have result = ", result, " but ");
+        if (theReplacement == this)
+            dataLog("we are our own replacement.\n");
+        else
+            dataLog("our replacement is ", pointerDump(theReplacement), "\n");
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
     switch (result) {
     case CompilationSuccessful:
         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
@@ -3100,6 +3867,8 @@ void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResu
         optimizeAfterWarmUp();
         return;
     }
+    
+    dataLog("Unrecognized result: ", static_cast(result), "\n");
     RELEASE_ASSERT_NOT_REACHED();
 }
 
@@ -3141,26 +3910,74 @@ bool CodeBlock::shouldReoptimizeFromLoopNow()
 }
 #endif
 
-ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
+ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
 {
-    for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
-        if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
-            return &m_arrayProfiles[i];
+    for (auto& m_arrayProfile : m_arrayProfiles) {
+        if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
+            return &m_arrayProfile;
     }
     return 0;
 }
 
-ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return getArrayProfile(locker, bytecodeOffset);
+}
+
+ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
+{
+    m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
+    return &m_arrayProfiles.last();
+}
+
+ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return addArrayProfile(locker, bytecodeOffset);
+}
+
+ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
 {
-    ArrayProfile* result = getArrayProfile(bytecodeOffset);
+    ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
     if (result)
         return result;
-    return addArrayProfile(bytecodeOffset);
+    return addArrayProfile(locker, bytecodeOffset);
+}
+
+ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return getOrAddArrayProfile(locker, bytecodeOffset);
+}
+
+#if ENABLE(DFG_JIT)
+Vector& CodeBlock::codeOrigins()
+{
+    return m_jitCode->dfgCommon()->codeOrigins;
 }
 
+size_t CodeBlock::numberOfDFGIdentifiers() const
+{
+    if (!JITCode::isOptimizingJIT(jitType()))
+        return 0;
+    
+    return m_jitCode->dfgCommon()->dfgIdentifiers.size();
+}
+
+const Identifier& CodeBlock::identifier(int index) const
+{
+    size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
+    if (static_cast(index) < unlinkedIdentifiers)
+        return m_unlinkedCode->identifier(index);
+    ASSERT(JITCode::isOptimizingJIT(jitType()));
+    return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
+}
+#endif // ENABLE(DFG_JIT)
+
 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
 {
-    ConcurrentJITLocker locker(m_lock);
+    ConcurrentJSLocker locker(m_lock);
     
     numberOfLiveNonArgumentValueProfiles = 0;
     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
@@ -3192,7 +4009,7 @@ void CodeBlock::updateAllValueProfilePredictions()
 
 void CodeBlock::updateAllArrayPredictions()
 {
-    ConcurrentJITLocker locker(m_lock);
+    ConcurrentJSLocker locker(m_lock);
     
     for (unsigned i = m_arrayProfiles.size(); i--;)
         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
@@ -3253,12 +4070,8 @@ void CodeBlock::tallyFrequentExitSites()
     switch (jitType()) {
     case JITCode::DFGJIT: {
         DFG::JITCode* jitCode = m_jitCode->dfg();
-        for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
-            DFG::OSRExit& exit = jitCode->osrExit[i];
-            
-            if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
-                continue;
-        }
+        for (auto& exit : jitCode->osrExit)
+            exit.considerAddingAsFrequentExitSite(profiledBlock);
         break;
     }
 
@@ -3270,9 +4083,7 @@ void CodeBlock::tallyFrequentExitSites()
         FTL::JITCode* jitCode = m_jitCode->ftl();
         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
             FTL::OSRExit& exit = jitCode->osrExit[i];
-            
-            if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
-                continue;
+            exit.considerAddingAsFrequentExitSite(profiledBlock);
         }
         break;
     }
@@ -3308,21 +4119,14 @@ void CodeBlock::dumpValueProfiles()
         RareCaseProfile* profile = rareCaseProfile(i);
         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
     }
-    dataLog("SpecialFastCaseProfile for ", *this, ":\n");
-    for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
-        RareCaseProfile* profile = specialFastCaseProfile(i);
-        dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
-    }
 }
 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
 
 unsigned CodeBlock::frameRegisterCount()
 {
     switch (jitType()) {
-#if ENABLE(LLINT)
     case JITCode::InterpreterThunk:
         return LLInt::frameRegisterCountFor(this);
-#endif // ENABLE(LLINT)
 
 #if ENABLE(JIT)
     case JITCode::BaselineJIT:
@@ -3341,6 +4145,11 @@ unsigned CodeBlock::frameRegisterCount()
     }
 }
 
+int CodeBlock::stackPointerOffset()
+{
+    return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
+}
+
 size_t CodeBlock::predictedMachineCodeSize()
 {
     // This will be called from CodeBlock::CodeBlock before either m_vm or the
@@ -3349,12 +4158,12 @@ size_t CodeBlock::predictedMachineCodeSize()
     if (!m_vm)
         return 0;
     
-    if (!m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
+    if (!*m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
         return 0; // It's as good of a prediction as we'll get.
     
     // Be conservative: return a size that will be an overestimation 84% of the time.
-    double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() +
-        m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation();
+    double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
+        m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
     
     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
     // here is OK, since this whole method is just a heuristic.
@@ -3400,72 +4209,35 @@ bool CodeBlock::usesOpcode(OpcodeID opcodeID)
 
 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
 {
-    ConcurrentJITLocker locker(symbolTable()->m_lock);
-    SymbolTable::Map::iterator end = symbolTable()->end(locker);
-    for (SymbolTable::Map::iterator ptr = symbolTable()->begin(locker); ptr != end; ++ptr) {
-        if (ptr->value.getIndex() == virtualRegister.offset()) {
-            // FIXME: This won't work from the compilation thread.
-            // https://bugs.webkit.org/show_bug.cgi?id=115300
-            return String(ptr->key);
+    for (auto& constantRegister : m_constantRegisters) {
+        if (constantRegister.get().isEmpty())
+            continue;
+        if (SymbolTable* symbolTable = jsDynamicCast(*vm(), constantRegister.get())) {
+            ConcurrentJSLocker locker(symbolTable->m_lock);
+            auto end = symbolTable->end(locker);
+            for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
+                if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
+                    // FIXME: This won't work from the compilation thread.
+                    // https://bugs.webkit.org/show_bug.cgi?id=115300
+                    return ptr->key.get();
+                }
+            }
         }
     }
-    if (needsActivation() && virtualRegister == activationRegister())
-        return ASCIILiteral("activation");
     if (virtualRegister == thisRegister())
         return ASCIILiteral("this");
-    if (usesArguments()) {
-        if (virtualRegister == argumentsRegister())
-            return ASCIILiteral("arguments");
-        if (unmodifiedArgumentsRegister(argumentsRegister()) == virtualRegister)
-            return ASCIILiteral("real arguments");
-    }
     if (virtualRegister.isArgument())
-        return String::format("arguments[%3d]", virtualRegister.toArgument()).impl();
+        return String::format("arguments[%3d]", virtualRegister.toArgument());
 
     return "";
 }
 
-namespace {
-
-struct VerifyCapturedDef {
-    void operator()(CodeBlock* codeBlock, Instruction* instruction, OpcodeID opcodeID, int operand)
-    {
-        unsigned bytecodeOffset = instruction - codeBlock->instructions().begin();
-        
-        if (codeBlock->isConstantRegisterIndex(operand)) {
-            codeBlock->beginValidationDidFail();
-            dataLog("    At bc#", bytecodeOffset, " encountered a definition of a constant.\n");
-            codeBlock->endValidationDidFail();
-            return;
-        }
-
-        switch (opcodeID) {
-        case op_enter:
-        case op_captured_mov:
-        case op_init_lazy_reg:
-        case op_create_arguments:
-        case op_new_captured_func:
-            return;
-        default:
-            break;
-        }
-        
-        VirtualRegister virtualReg(operand);
-        if (!virtualReg.isLocal())
-            return;
-        
-        if (codeBlock->captureCount() && codeBlock->symbolTable()->isCaptured(operand)) {
-            codeBlock->beginValidationDidFail();
-            dataLog("    At bc#", bytecodeOffset, " encountered invalid assignment to captured variable loc", virtualReg.toLocal(), ".\n");
-            codeBlock->endValidationDidFail();
-            return;
-        }
-        
-        return;
-    }
-};
-
-} // anonymous namespace
+ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
+{
+    OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instructions()[bytecodeOffset].u.opcode);
+    unsigned length = opcodeLength(opcodeID);
+    return instructions()[bytecodeOffset + length - 1].u.profile;
+}
 
 void CodeBlock::validate()
 {
@@ -3473,7 +4245,7 @@ void CodeBlock::validate()
     
     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
     
-    if (liveAtHead.numBits() != static_cast(m_numCalleeRegisters)) {
+    if (liveAtHead.numBits() != static_cast(m_numCalleeLocals)) {
         beginValidationDidFail();
         dataLog("    Wrong number of bits in result!\n");
         dataLog("    Result: ", liveAtHead, "\n");
@@ -3481,39 +4253,16 @@ void CodeBlock::validate()
         endValidationDidFail();
     }
     
-    for (unsigned i = m_numCalleeRegisters; i--;) {
-        bool isCaptured = false;
+    for (unsigned i = m_numCalleeLocals; i--;) {
         VirtualRegister reg = virtualRegisterForLocal(i);
         
-        if (captureCount())
-            isCaptured = reg.offset() <= captureStart() && reg.offset() > captureEnd();
-        
-        if (isCaptured) {
-            if (!liveAtHead.get(i)) {
-                beginValidationDidFail();
-                dataLog("    Variable loc", i, " is expected to be live because it is captured, but it isn't live.\n");
-                dataLog("    Result: ", liveAtHead, "\n");
-                endValidationDidFail();
-            }
-        } else {
-            if (liveAtHead.get(i)) {
-                beginValidationDidFail();
-                dataLog("    Variable loc", i, " is expected to be dead.\n");
-                dataLog("    Result: ", liveAtHead, "\n");
-                endValidationDidFail();
-            }
+        if (liveAtHead[i]) {
+            beginValidationDidFail();
+            dataLog("    Variable ", reg, " is expected to be dead.\n");
+            dataLog("    Result: ", liveAtHead, "\n");
+            endValidationDidFail();
         }
     }
-    
-    for (unsigned bytecodeOffset = 0; bytecodeOffset < instructions().size();) {
-        Instruction* currentInstruction = instructions().begin() + bytecodeOffset;
-        OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
-        
-        VerifyCapturedDef verifyCapturedDef;
-        computeDefsForBytecodeOffset(this, bytecodeOffset, verifyCapturedDef);
-        
-        bytecodeOffset += opcodeLength(opcodeID);
-    }
 }
 
 void CodeBlock::beginValidationDidFail()
@@ -3535,15 +4284,293 @@ void CodeBlock::addBreakpoint(unsigned numBreakpoints)
 {
     m_numBreakpoints += numBreakpoints;
     ASSERT(m_numBreakpoints);
-    if (jitType() == JITCode::DFGJIT)
-        jettison();
+    if (JITCode::isOptimizingJIT(jitType()))
+        jettison(Profiler::JettisonDueToDebuggerBreakpoint);
 }
 
 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
 {
     m_steppingMode = mode;
-    if (mode == SteppingModeEnabled && jitType() == JITCode::DFGJIT)
-        jettison();
+    if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
+        jettison(Profiler::JettisonDueToDebuggerStepping);
+}
+
+RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
+{
+    m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+    return &m_rareCaseProfiles.last();
+}
+
+RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+{
+    return tryBinarySearch(
+        m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
+        getRareCaseProfileBytecodeOffset);
+}
+
+unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
+{
+    RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
+    if (profile)
+        return profile->m_counter;
+    return 0;
+}
+
+ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
+{
+    return arithProfileForPC(instructions().begin() + bytecodeOffset);
+}
+
+ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
+{
+    auto opcodeID = vm()->interpreter->getOpcodeID(pc[0].u.opcode);
+    switch (opcodeID) {
+    case op_negate:
+        return bitwise_cast(&pc[3].u.operand);
+    case op_bitor:
+    case op_bitand:
+    case op_bitxor:
+    case op_add:
+    case op_mul:
+    case op_sub:
+    case op_div:
+        return bitwise_cast(&pc[4].u.operand);
+    default:
+        break;
+    }
+
+    return nullptr;
+}
+
+bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
+{
+    if (!hasBaselineJITProfiling())
+        return false;
+    ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
+    if (!profile)
+        return false;
+    return profile->tookSpecialFastPath();
+}
+
+#if ENABLE(JIT)
+DFG::CapabilityLevel CodeBlock::capabilityLevel()
+{
+    DFG::CapabilityLevel result = computeCapabilityLevel();
+    m_capabilityLevelState = result;
+    return result;
+}
+#endif
+
+void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray& instructions)
+{
+    if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
+        return;
+    const Vector& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
+    for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
+        // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
+        // the next op_profile_control_flow will give us the text range of a single basic block.
+        size_t startIdx = bytecodeOffsets[i];
+        RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx].u.opcode) == op_profile_control_flow);
+        int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
+        int basicBlockEndOffset;
+        if (i + 1 < offsetsLength) {
+            size_t endIdx = bytecodeOffsets[i + 1];
+            RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx].u.opcode) == op_profile_control_flow);
+            basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
+        } else {
+            basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
+            basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
+        }
+
+        // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
+        // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
+        // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
+        // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
+        // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
+        // program. The condition: 
+        // (basicBlockEndOffset < basicBlockStartOffset) 
+        // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
+        // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
+        // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
+        // internal data structure, so if any of them execute, it will record the same textual basic block in the 
+        // JavaScript program as executing.
+        // At the bytecode level, this situation looks like:
+        // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
+        // ...
+        // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
+        // ...
+        // m: op_profile_control_flow
+        if (basicBlockEndOffset < basicBlockStartOffset) {
+            RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
+            instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
+            continue;
+        }
+
+        BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
+
+        // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
+        // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
+        // This is necessary because in the original source text of a JavaScript program, 
+        // function literals form new basic blocks boundaries, but they aren't represented 
+        // inside the CodeBlock's instruction stream.
+        auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier& functionExecutable) {
+            const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
+            int functionStart = executable->typeProfilingStartOffset();
+            int functionEnd = executable->typeProfilingEndOffset();
+            if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
+                basicBlockLocation->insertGap(functionStart, functionEnd);
+        };
+
+        for (const WriteBarrier& executable : m_functionDecls)
+            insertFunctionGaps(executable);
+        for (const WriteBarrier& executable : m_functionExprs)
+            insertFunctionGaps(executable);
+
+        instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
+    }
+}
+
+#if ENABLE(JIT)
+void CodeBlock::setPCToCodeOriginMap(std::unique_ptr&& map) 
+{ 
+    m_pcToCodeOriginMap = WTFMove(map);
+}
+
+std::optional CodeBlock::findPC(void* pc)
+{
+    if (m_pcToCodeOriginMap) {
+        if (std::optional codeOrigin = m_pcToCodeOriginMap->findPC(pc))
+            return codeOrigin;
+    }
+
+    for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+        StructureStubInfo* stub = *iter;
+        if (stub->containsPC(pc))
+            return std::optional(stub->codeOrigin);
+    }
+
+    if (std::optional codeOrigin = m_jitCode->findPC(this, pc))
+        return codeOrigin;
+
+    return std::nullopt;
+}
+#endif // ENABLE(JIT)
+
+std::optional CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
+{
+    std::optional bytecodeOffset;
+    JITCode::JITType jitType = this->jitType();
+    if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
+#if USE(JSVALUE64)
+        bytecodeOffset = callSiteIndex.bits();
+#else
+        Instruction* instruction = bitwise_cast(callSiteIndex.bits());
+        bytecodeOffset = instruction - instructions().begin();
+#endif
+    } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
+#if ENABLE(DFG_JIT)
+        RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
+        CodeOrigin origin = codeOrigin(callSiteIndex);
+        bytecodeOffset = origin.bytecodeIndex;
+#else
+        RELEASE_ASSERT_NOT_REACHED();
+#endif
+    }
+
+    return bytecodeOffset;
+}
+
+int32_t CodeBlock::thresholdForJIT(int32_t threshold)
+{
+    switch (unlinkedCodeBlock()->didOptimize()) {
+    case MixedTriState:
+        return threshold;
+    case FalseTriState:
+        return threshold * 4;
+    case TrueTriState:
+        return threshold / 2;
+    }
+    ASSERT_NOT_REACHED();
+    return threshold;
+}
+
+void CodeBlock::jitAfterWarmUp()
+{
+    m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
+}
+
+void CodeBlock::jitSoon()
+{
+    m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
+}
+
+void CodeBlock::dumpMathICStats()
+{
+#if ENABLE(MATH_IC_STATS)
+    double numAdds = 0.0;
+    double totalAddSize = 0.0;
+    double numMuls = 0.0;
+    double totalMulSize = 0.0;
+    double numNegs = 0.0;
+    double totalNegSize = 0.0;
+    double numSubs = 0.0;
+    double totalSubSize = 0.0;
+
+    auto countICs = [&] (CodeBlock* codeBlock) {
+        for (JITAddIC* addIC : codeBlock->m_addICs) {
+            numAdds++;
+            totalAddSize += addIC->codeSize();
+        }
+
+        for (JITMulIC* mulIC : codeBlock->m_mulICs) {
+            numMuls++;
+            totalMulSize += mulIC->codeSize();
+        }
+
+        for (JITNegIC* negIC : codeBlock->m_negICs) {
+            numNegs++;
+            totalNegSize += negIC->codeSize();
+        }
+
+        for (JITSubIC* subIC : codeBlock->m_subICs) {
+            numSubs++;
+            totalSubSize += subIC->codeSize();
+        }
+
+        return false;
+    };
+    heap()->forEachCodeBlock(countICs);
+
+    dataLog("Num Adds: ", numAdds, "\n");
+    dataLog("Total Add size in bytes: ", totalAddSize, "\n");
+    dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
+    dataLog("\n");
+    dataLog("Num Muls: ", numMuls, "\n");
+    dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
+    dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
+    dataLog("\n");
+    dataLog("Num Negs: ", numNegs, "\n");
+    dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
+    dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
+    dataLog("\n");
+    dataLog("Num Subs: ", numSubs, "\n");
+    dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
+    dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
+
+    dataLog("-----------------------\n");
+#endif
+}
+
+BytecodeLivenessAnalysis& CodeBlock::livenessAnalysisSlow()
+{
+    std::unique_ptr analysis = std::make_unique(this);
+    {
+        ConcurrentJSLocker locker(m_lock);
+        if (!m_livenessAnalysis)
+            m_livenessAnalysis = WTFMove(analysis);
+        return *m_livenessAnalysis;
+    }
 }
 
+
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index 0d9868079..2a2966460 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
  * Copyright (C) 2008 Cameron Zwarich 
  *
  * Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -27,54 +27,49 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef CodeBlock_h
-#define CodeBlock_h
+#pragma once
 
 #include "ArrayProfile.h"
 #include "ByValInfo.h"
 #include "BytecodeConventions.h"
-#include "BytecodeLivenessAnalysis.h"
 #include "CallLinkInfo.h"
 #include "CallReturnOffsetToBytecodeOffset.h"
 #include "CodeBlockHash.h"
-#include "CodeBlockSet.h"
-#include "ConcurrentJITLock.h"
 #include "CodeOrigin.h"
 #include "CodeType.h"
 #include "CompactJITCodeMap.h"
+#include "ConcurrentJSLock.h"
 #include "DFGCommon.h"
-#include "DFGCommonData.h"
 #include "DFGExitProfile.h"
-#include "DFGMinifiedGraph.h"
-#include "DFGOSREntry.h"
-#include "DFGOSRExit.h"
-#include "DFGVariableEventStream.h"
 #include "DeferredCompilationCallback.h"
-#include "EvalCodeCache.h"
+#include "DirectEvalCodeCache.h"
+#include "EvalExecutable.h"
 #include "ExecutionCounter.h"
 #include "ExpressionRangeInfo.h"
+#include "FunctionExecutable.h"
 #include "HandlerInfo.h"
-#include "ObjectAllocationProfile.h"
-#include "Options.h"
-#include "Operations.h"
-#include "PutPropertySlot.h"
 #include "Instruction.h"
 #include "JITCode.h"
-#include "JITWriteBarrier.h"
+#include "JITMathICForwards.h"
+#include "JSCell.h"
 #include "JSGlobalObject.h"
 #include "JumpTable.h"
 #include "LLIntCallLinkInfo.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
 #include "LazyOperandValueProfile.h"
-#include "ProfilerCompilation.h"
-#include "RegExpObject.h"
-#include "StructureStubInfo.h"
+#include "ModuleProgramExecutable.h"
+#include "ObjectAllocationProfile.h"
+#include "Options.h"
+#include "ProfilerJettisonReason.h"
+#include "ProgramExecutable.h"
+#include "PutPropertySlot.h"
 #include "UnconditionalFinalizer.h"
 #include "ValueProfile.h"
 #include "VirtualRegister.h"
 #include "Watchpoint.h"
 #include 
+#include 
 #include 
-#include 
 #include 
 #include 
 #include 
@@ -83,33 +78,55 @@
 
 namespace JSC {
 
+class BytecodeLivenessAnalysis;
+class CodeBlockSet;
 class ExecState;
+class JSModuleEnvironment;
 class LLIntOffsetsExtractor;
-class RepatchBuffer;
+class PCToCodeOriginMap;
+class RegisterAtOffsetList;
+class StructureStubInfo;
+
+enum class AccessType : int8_t;
 
-inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
+struct ArithProfile;
 
-static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits::max(); }
+typedef HashMap StubInfoMap;
 
 enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
 
-class CodeBlock : public ThreadSafeRefCounted, public UnconditionalFinalizer, public WeakReferenceHarvester {
-    WTF_MAKE_FAST_ALLOCATED;
+class CodeBlock : public JSCell {
+    typedef JSCell Base;
     friend class BytecodeLivenessAnalysis;
     friend class JIT;
     friend class LLIntOffsetsExtractor;
+
+    class UnconditionalFinalizer : public JSC::UnconditionalFinalizer { 
+        void finalizeUnconditionally() override;
+    };
+
+    class WeakReferenceHarvester : public JSC::WeakReferenceHarvester {
+        void visitWeakReferences(SlotVisitor&) override;
+    };
+
 public:
     enum CopyParsedBlockTag { CopyParsedBlock };
+
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    DECLARE_INFO;
+
 protected:
-    CodeBlock(CopyParsedBlockTag, CodeBlock& other);
-        
-    CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr, unsigned sourceOffset, unsigned firstLineColumnOffset);
+    CodeBlock(VM*, Structure*, CopyParsedBlockTag, CodeBlock& other);
+    CodeBlock(VM*, Structure*, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, RefPtr&&, unsigned sourceOffset, unsigned firstLineColumnOffset);
+
+    void finishCreation(VM&, CopyParsedBlockTag, CodeBlock& other);
+    void finishCreation(VM&, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
 
     WriteBarrier m_globalObject;
-    Heap* m_heap;
 
 public:
-    JS_EXPORT_PRIVATE virtual ~CodeBlock();
+    JS_EXPORT_PRIVATE ~CodeBlock();
 
     UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
 
@@ -117,39 +134,74 @@ public:
     CodeBlockHash hash() const;
     bool hasHash() const;
     bool isSafeToComputeHash() const;
+    CString hashAsStringIfPossible() const;
     CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
     CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
     void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
-    void dump(PrintStream&) const;
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const;
 
     int numParameters() const { return m_numParameters; }
     void setNumParameters(int newValue);
 
+    int numberOfArgumentsToSkip() const { return m_numberOfArgumentsToSkip; }
+
+    int numCalleeLocals() const { return m_numCalleeLocals; }
+
     int* addressOfNumParameters() { return &m_numParameters; }
     static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
 
-    CodeBlock* alternative() { return m_alternative.get(); }
-    PassRefPtr releaseAlternative() { return m_alternative.release(); }
-    void setAlternative(PassRefPtr alternative) { m_alternative = alternative; }
+    CodeBlock* alternative() const { return static_cast(m_alternative.get()); }
+    void setAlternative(VM&, CodeBlock*);
+
+    template  void forEachRelatedCodeBlock(Functor&& functor)
+    {
+        Functor f(std::forward(functor));
+        Vector codeBlocks;
+        codeBlocks.append(this);
+
+        while (!codeBlocks.isEmpty()) {
+            CodeBlock* currentCodeBlock = codeBlocks.takeLast();
+            f(currentCodeBlock);
+
+            if (CodeBlock* alternative = currentCodeBlock->alternative())
+                codeBlocks.append(alternative);
+            if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
+                codeBlocks.append(osrEntryBlock);
+        }
+    }
     
     CodeSpecializationKind specializationKind() const
     {
         return specializationFromIsConstruct(m_isConstructor);
     }
-    
-    CodeBlock* baselineAlternative();
+
+    CodeBlock* alternativeForJettison();    
+    JS_EXPORT_PRIVATE CodeBlock* baselineAlternative();
     
     // FIXME: Get rid of this.
     // https://bugs.webkit.org/show_bug.cgi?id=123677
     CodeBlock* baselineVersion();
 
-    void visitAggregate(SlotVisitor&);
-
-    void dumpBytecode(PrintStream& = WTF::dataFile());
-    void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
+    static size_t estimatedSize(JSCell*);
+    static void visitChildren(JSCell*, SlotVisitor&);
+    void visitChildren(SlotVisitor&);
+    void visitWeakly(SlotVisitor&);
+    void clearVisitWeaklyHasBeenCalled();
+
+    void dumpSource();
+    void dumpSource(PrintStream&);
+
+    void dumpBytecode();
+    void dumpBytecode(PrintStream&);
+    void dumpBytecode(
+        PrintStream&, unsigned bytecodeOffset,
+        const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
+    void dumpExceptionHandlers(PrintStream&);
     void printStructures(PrintStream&, const Instruction*);
     void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
 
+    void dumpMathICStats();
+
     bool isStrictMode() const { return m_isStrictMode; }
     ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
 
@@ -169,71 +221,85 @@ public:
         return index >= m_numVars;
     }
 
-    HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
+    HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+    HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
+    void removeExceptionHandlerForCallSite(CallSiteIndex);
     unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
     unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
     void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
-                                          int& startOffset, int& endOffset, unsigned& line, unsigned& column);
+        int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
 
-#if ENABLE(JIT)
-    StructureStubInfo* addStubInfo();
-    Bag::iterator begin() { return m_stubInfos.begin(); }
-    Bag::iterator end() { return m_stubInfos.end(); }
+    std::optional bytecodeOffsetFromCallSiteIndex(CallSiteIndex);
 
-    void resetStub(StructureStubInfo&);
+    void getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result);
+    void getStubInfoMap(StubInfoMap& result);
     
-    void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
+    void getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result);
+    void getCallLinkInfoMap(CallLinkInfoMap& result);
 
-    ByValInfo& getByValInfo(unsigned bytecodeIndex)
-    {
-        return *(binarySearch(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
-    }
+    void getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result);
+    void getByValInfoMap(ByValInfoMap& result);
+    
+#if ENABLE(JIT)
+    StructureStubInfo* addStubInfo(AccessType);
+    JITAddIC* addJITAddIC(ArithProfile*);
+    JITMulIC* addJITMulIC(ArithProfile*);
+    JITNegIC* addJITNegIC(ArithProfile*);
+    JITSubIC* addJITSubIC(ArithProfile*);
+    Bag::iterator stubInfoBegin() { return m_stubInfos.begin(); }
+    Bag::iterator stubInfoEnd() { return m_stubInfos.end(); }
+    
+    // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
+    // stub info.
+    StructureStubInfo* findStubInfo(CodeOrigin);
 
-    CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
-    {
-        return *(binarySearch(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
-    }
+    ByValInfo* addByValInfo();
 
-    CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
-    {
-        ASSERT(!JITCode::isOptimizingJIT(jitType()));
-        return *(binarySearch(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
-    }
+    CallLinkInfo* addCallLinkInfo();
+    Bag::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); }
+    Bag::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); }
+
+    // This is a slow function call used primarily for compiling OSR exits in the case
+    // that there had been inlining. Chances are if you want to use this, you're really
+    // looking for a CallLinkInfoMap to amortize the cost of calling this.
+    CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
+    
+    // We call this when we want to reattempt compiling something with the baseline JIT. Ideally
+    // the baseline JIT would not add data to CodeBlock, but instead it would put its data into
+    // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we
+    // would be able to get rid of this silly function.
+    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061
+    void resetJITData();
 #endif // ENABLE(JIT)
 
     void unlinkIncomingCalls();
 
 #if ENABLE(JIT)
-    void unlinkCalls();
-        
     void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
-        
-    bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
-    {
-        return m_incomingCalls.isOnList(incoming);
-    }
+    void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*);
 #endif // ENABLE(JIT)
 
-#if ENABLE(LLINT)
     void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
-#endif // ENABLE(LLINT)
 
-    void setJITCodeMap(PassOwnPtr jitCodeMap)
+    void setJITCodeMap(std::unique_ptr jitCodeMap)
     {
-        m_jitCodeMap = jitCodeMap;
+        m_jitCodeMap = WTFMove(jitCodeMap);
     }
     CompactJITCodeMap* jitCodeMap()
     {
         return m_jitCodeMap.get();
     }
     
+    static void clearLLIntGetByIdCache(Instruction*);
+
     unsigned bytecodeOffset(Instruction* returnAddress)
     {
         RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
         return static_cast(returnAddress) - instructions().begin();
     }
 
-    bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
+    typedef JSC::Instruction Instruction;
+    typedef RefCountedArray& UnpackedInstructions;
 
     unsigned numberOfInstructions() const { return m_instructions.size(); }
     RefCountedArray& instructions() { return m_instructions; }
@@ -245,28 +311,19 @@ public:
 
     unsigned instructionCount() const { return m_instructions.size(); }
 
-    int argumentIndexAfterCapture(size_t argument);
-    
-    bool hasSlowArguments();
-    const SlowArgument* machineSlowArguments();
-
-    // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
-    void install();
-    
     // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
-    PassRefPtr newReplacement();
+    CodeBlock* newReplacement();
     
-    void setJITCode(PassRefPtr code, MacroAssemblerCodePtr codeWithArityCheck)
+    void setJITCode(Ref&& code)
     {
-        ASSERT(m_heap->isDeferred());
-        m_heap->reportExtraMemoryCost(code->size());
-        ConcurrentJITLocker locker(m_lock);
+        ASSERT(heap()->isDeferred());
+        heap()->reportExtraMemoryAllocated(code->size());
+        ConcurrentJSLocker locker(m_lock);
         WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
-        m_jitCode = code;
-        m_jitCodeWithArityCheck = codeWithArityCheck;
+        m_jitCode = WTFMove(code);
     }
-    PassRefPtr jitCode() { return m_jitCode; }
-    MacroAssemblerCodePtr jitCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
+    RefPtr jitCode() { return m_jitCode; }
+    static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); }
     JITCode::JITType jitType() const
     {
         JITCode* jitCode = m_jitCode.get();
@@ -282,103 +339,44 @@ public:
     }
     
 #if ENABLE(JIT)
-    virtual CodeBlock* replacement() = 0;
+    CodeBlock* replacement();
 
-    virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
-    DFG::CapabilityLevel capabilityLevel()
-    {
-        DFG::CapabilityLevel result = capabilityLevelInternal();
-        m_capabilityLevelState = result;
-        return result;
-    }
-    DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
+    DFG::CapabilityLevel computeCapabilityLevel();
+    DFG::CapabilityLevel capabilityLevel();
+    DFG::CapabilityLevel capabilityLevelState() { return static_cast(m_capabilityLevelState); }
 
     bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
     bool hasOptimizedReplacement(); // the typeToReplace is my JITType
 #endif
 
-    void jettison(ReoptimizationMode = DontCountReoptimization);
+    void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr);
     
-    ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
+    ExecutableBase* ownerExecutable() const { return m_ownerExecutable.get(); }
+    ScriptExecutable* ownerScriptExecutable() const { return jsCast(m_ownerExecutable.get()); }
 
-    void setVM(VM* vm) { m_vm = vm; }
-    VM* vm() { return m_vm; }
+    VM* vm() const { return m_vm; }
 
     void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
     VirtualRegister thisRegister() const { return m_thisRegister; }
 
-    bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
     bool usesEval() const { return m_unlinkedCode->usesEval(); }
 
-    void setArgumentsRegister(VirtualRegister argumentsRegister)
-    {
-        ASSERT(argumentsRegister.isValid());
-        m_argumentsRegister = argumentsRegister;
-        ASSERT(usesArguments());
-    }
-    VirtualRegister argumentsRegister() const
-    {
-        ASSERT(usesArguments());
-        return m_argumentsRegister;
-    }
-    VirtualRegister uncheckedArgumentsRegister()
-    {
-        if (!usesArguments())
-            return VirtualRegister();
-        return argumentsRegister();
-    }
-    void setActivationRegister(VirtualRegister activationRegister)
-    {
-        m_activationRegister = activationRegister;
-    }
-
-    VirtualRegister activationRegister() const
-    {
-        ASSERT(needsFullScopeChain());
-        return m_activationRegister;
-    }
-
-    VirtualRegister uncheckedActivationRegister()
+    void setScopeRegister(VirtualRegister scopeRegister)
     {
-        if (!needsFullScopeChain())
-            return VirtualRegister();
-        return activationRegister();
+        ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
+        m_scopeRegister = scopeRegister;
     }
 
-    bool usesArguments() const { return m_argumentsRegister.isValid(); }
-
-    bool needsActivation() const
+    VirtualRegister scopeRegister() const
     {
-        return m_needsActivation;
+        return m_scopeRegister;
     }
     
-    unsigned captureCount() const
+    CodeType codeType() const
     {
-        if (!symbolTable())
-            return 0;
-        return symbolTable()->captureCount();
-    }
-    
-    int captureStart() const
-    {
-        if (!symbolTable())
-            return 0;
-        return symbolTable()->captureStart();
-    }
-    
-    int captureEnd() const
-    {
-        if (!symbolTable())
-            return 0;
-        return symbolTable()->captureEnd();
+        return static_cast(m_codeType);
     }
 
-    bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const;
-    
-    int framePointerOffsetToGetActivationRegisters(int machineCaptureStart);
-    int framePointerOffsetToGetActivationRegisters();
-
-    CodeType codeType() const { return m_unlinkedCode->codeType(); }
     PutPropertySlot::Context putByIdContext() const
     {
         if (codeType() == EvalCode)
@@ -393,20 +391,8 @@ public:
     size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
     unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
 
-    void clearEvalCache();
-
     String nameForRegister(VirtualRegister);
 
-#if ENABLE(JIT)
-    void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); }
-    size_t numberOfByValInfos() const { return m_byValInfos.size(); }
-    ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
-
-    void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.resizeToFit(size); }
-    size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
-    CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
-#endif
-
     unsigned numberOfArgumentValueProfiles()
     {
         ASSERT(m_numParameters >= 0);
@@ -422,20 +408,12 @@ public:
 
     unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
     ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
-    ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
-    {
-        ValueProfile* result = binarySearch(
-            m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
-            getValueProfileBytecodeOffset);
-        ASSERT(result->m_bytecodeOffset != -1);
-        ASSERT(instructions()[bytecodeOffset + opcodeLength(
-            m_vm->interpreter->getOpcodeID(
-                instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
-        return result;
-    }
-    SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
+    ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset);
+    SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
     {
-        return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
+        if (ValueProfile* valueProfile = valueProfileForBytecodeOffset(bytecodeOffset))
+            return valueProfile->computeUpdatedPrediction(locker);
+        return SpecNone;
     }
 
     unsigned totalNumberOfValueProfiles()
@@ -449,25 +427,16 @@ public:
         return valueProfile(index - numberOfArgumentValueProfiles());
     }
 
-    RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
-    {
-        m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
-        return &m_rareCaseProfiles.last();
-    }
+    RareCaseProfile* addRareCaseProfile(int bytecodeOffset);
     unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
-    RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
-    RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
-    {
-        return tryBinarySearch(
-            m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
-            getRareCaseProfileBytecodeOffset);
-    }
+    RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
+    unsigned rareCaseProfileCountForBytecodeOffset(int bytecodeOffset);
 
     bool likelyToTakeSlowCase(int bytecodeOffset)
     {
         if (!hasBaselineJITProfiling())
             return false;
-        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+        unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
         return value >= Options::likelyToTakeSlowCaseMinimumCount();
     }
 
@@ -475,68 +444,22 @@ public:
     {
         if (!hasBaselineJITProfiling())
             return false;
-        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+        unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
         return value >= Options::couldTakeSlowCaseMinimumCount();
     }
 
-    RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
-    {
-        m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
-        return &m_specialFastCaseProfiles.last();
-    }
-    unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
-    RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
-    RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
-    {
-        return tryBinarySearch(
-                                                     m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
-                                                     getRareCaseProfileBytecodeOffset);
-    }
+    ArithProfile* arithProfileForBytecodeOffset(int bytecodeOffset);
+    ArithProfile* arithProfileForPC(Instruction*);
 
-    bool likelyToTakeSpecialFastCase(int bytecodeOffset)
-    {
-        if (!hasBaselineJITProfiling())
-            return false;
-        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-        return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
-    }
-
-    bool couldTakeSpecialFastCase(int bytecodeOffset)
-    {
-        if (!hasBaselineJITProfiling())
-            return false;
-        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-        return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
-    }
-
-    bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
-    {
-        if (!hasBaselineJITProfiling())
-            return false;
-        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-        unsigned value = slowCaseCount - specialFastCaseCount;
-        return value >= Options::likelyToTakeSlowCaseMinimumCount();
-    }
-
-    bool likelyToTakeAnySlowCase(int bytecodeOffset)
-    {
-        if (!hasBaselineJITProfiling())
-            return false;
-        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
-        unsigned value = slowCaseCount + specialFastCaseCount;
-        return value >= Options::likelyToTakeSlowCaseMinimumCount();
-    }
+    bool couldTakeSpecialFastCase(int bytecodeOffset);
 
     unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
     const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
-    ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
-    {
-        m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
-        return &m_arrayProfiles.last();
-    }
+    ArrayProfile* addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
+    ArrayProfile* addArrayProfile(unsigned bytecodeOffset);
+    ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
     ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
+    ArrayProfile* getOrAddArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
     ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
 
     // Exception handling support
@@ -547,10 +470,7 @@ public:
     bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
 
 #if ENABLE(DFG_JIT)
-    Vector& codeOrigins()
-    {
-        return m_jitCode->dfgCommon()->codeOrigins;
-    }
+    Vector& codeOrigins();
     
     // Having code origins implies that there has been some inlining.
     bool hasCodeOrigins()
@@ -558,30 +478,34 @@ public:
         return JITCode::isOptimizingJIT(jitType());
     }
         
-    bool canGetCodeOrigin(unsigned index)
+    bool canGetCodeOrigin(CallSiteIndex index)
     {
         if (!hasCodeOrigins())
             return false;
-        return index < codeOrigins().size();
+        return index.bits() < codeOrigins().size();
     }
 
-    CodeOrigin codeOrigin(unsigned index)
+    CodeOrigin codeOrigin(CallSiteIndex index)
     {
-        return codeOrigins()[index];
+        return codeOrigins()[index.bits()];
     }
 
     bool addFrequentExitSite(const DFG::FrequentExitSite& site)
     {
         ASSERT(JITCode::isBaselineCode(jitType()));
-        ConcurrentJITLocker locker(m_lock);
-        return m_exitProfile.add(locker, site);
+        ConcurrentJSLocker locker(m_lock);
+        return m_exitProfile.add(locker, this, site);
     }
-        
-    bool hasExitSite(const DFG::FrequentExitSite& site) const
+
+    bool hasExitSite(const ConcurrentJSLocker& locker, const DFG::FrequentExitSite& site) const
     {
-        ConcurrentJITLocker locker(m_lock);
         return m_exitProfile.hasExitSite(locker, site);
     }
+    bool hasExitSite(const DFG::FrequentExitSite& site) const
+    {
+        ConcurrentJSLocker locker(m_lock);
+        return hasExitSite(locker, site);
+    }
 
     DFG::ExitProfile& exitProfile() { return m_exitProfile; }
 
@@ -589,44 +513,26 @@ public:
     {
         return m_lazyOperandValueProfiles;
     }
-#else // ENABLE(DFG_JIT)
-    bool addFrequentExitSite(const DFG::FrequentExitSite&)
-    {
-        return false;
-    }
 #endif // ENABLE(DFG_JIT)
 
     // Constant Pool
 #if ENABLE(DFG_JIT)
     size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
-    size_t numberOfDFGIdentifiers() const
-    {
-        if (!JITCode::isOptimizingJIT(jitType()))
-            return 0;
-
-        return m_jitCode->dfgCommon()->dfgIdentifiers.size();
-    }
-
-    const Identifier& identifier(int index) const
-    {
-        size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
-        if (static_cast(index) < unlinkedIdentifiers)
-            return m_unlinkedCode->identifier(index);
-        ASSERT(JITCode::isOptimizingJIT(jitType()));
-        return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
-    }
+    size_t numberOfDFGIdentifiers() const;
+    const Identifier& identifier(int index) const;
 #else
     size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
     const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
 #endif
 
     Vector>& constants() { return m_constantRegisters; }
-    size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
+    Vector& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
     unsigned addConstant(JSValue v)
     {
         unsigned result = m_constantRegisters.size();
         m_constantRegisters.append(WriteBarrier());
-        m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
+        m_constantRegisters.last().set(m_globalObject->vm(), this, v);
+        m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
         return result;
     }
 
@@ -634,19 +540,19 @@ public:
     {
         unsigned result = m_constantRegisters.size();
         m_constantRegisters.append(WriteBarrier());
+        m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
         return result;
     }
 
-    bool findConstant(JSValue, unsigned& result);
-    unsigned addOrFindConstant(JSValue);
     WriteBarrier& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
-    ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
+    static ALWAYS_INLINE bool isConstantRegisterIndex(int index) { return index >= FirstConstantRegisterIndex; }
     ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+    ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; }
 
     FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
     int numberOfFunctionDecls() { return m_functionDecls.size(); }
     FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
-
+    
     RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
 
     unsigned numberOfConstantBuffers() const
@@ -673,15 +579,19 @@ public:
         return constantBufferAsVector(index).data();
     }
 
+    Heap* heap() const { return &m_vm->heap; }
     JSGlobalObject* globalObject() { return m_globalObject.get(); }
 
     JSGlobalObject* globalObjectFor(CodeOrigin);
 
     BytecodeLivenessAnalysis& livenessAnalysis()
     {
-        if (!m_livenessAnalysis)
-            m_livenessAnalysis = std::make_unique(this);
-        return *m_livenessAnalysis;
+        {
+            ConcurrentJSLocker locker(m_lock);
+            if (!!m_livenessAnalysis)
+                return *m_livenessAnalysis;
+        }
+        return livenessAnalysisSlow();
     }
     
     void validate();
@@ -702,10 +612,7 @@ public:
     StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
     StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
 
-
-    SymbolTable* symbolTable() const { return m_symbolTable.get(); }
-
-    EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
+    DirectEvalCodeCache& directEvalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_directEvalCodeCache; }
 
     enum ShrinkMode {
         // Shrink prior to generating machine code that may point directly into vectors.
@@ -731,21 +638,18 @@ public:
         m_llintExecuteCounter.deferIndefinitely();
     }
 
-    void jitAfterWarmUp()
-    {
-        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
-    }
-
-    void jitSoon()
-    {
-        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
-    }
+    int32_t thresholdForJIT(int32_t threshold);
+    void jitAfterWarmUp();
+    void jitSoon();
 
-    const ExecutionCounter& llintExecuteCounter() const
+    const BaselineExecutionCounter& llintExecuteCounter() const
     {
         return m_llintExecuteCounter;
     }
 
+    typedef HashMap> StructureWatchpointMap;
+    StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; }
+
     // Functions for controlling when tiered compilation kicks in. This
     // controls both when the optimizing compiler is invoked and when OSR
     // entry happens. Two triggers exist: the loop trigger and the return
@@ -767,9 +671,13 @@ public:
     // When we observe a lot of speculation failures, we trigger a
     // reoptimization. But each time, we increase the optimization trigger
     // to avoid thrashing.
-    unsigned reoptimizationRetryCounter() const;
+    JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
     void countReoptimization();
 #if ENABLE(JIT)
+    static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return RegisterSet::llintBaselineCalleeSaveRegisters().numberOfSetRegisters(); }
+    static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters();
+    size_t calleeSaveSpaceAsVirtualRegisters();
+
     unsigned numberOfDFGCompiles();
 
     int32_t codeTypeThresholdMultiplier() const;
@@ -781,11 +689,11 @@ public:
         return &m_jitExecuteCounter.m_counter;
     }
 
-    static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
-    static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
-    static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
+    static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
+    static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
+    static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
 
-    const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
+    const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
 
     unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
 
@@ -855,7 +763,14 @@ public:
     uint32_t exitCountThresholdForReoptimizationFromLoop();
     bool shouldReoptimizeNow();
     bool shouldReoptimizeFromLoopNow();
+
+    void setCalleeSaveRegisters(RegisterSet);
+    void setCalleeSaveRegisters(std::unique_ptr);
+    
+    RegisterAtOffsetList* calleeSaveRegisters() const { return m_calleeSaveRegisters.get(); }
 #else // No JIT
+    static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; }
+    static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 0; };
     void optimizeAfterWarmUp() { }
     unsigned numberOfDFGCompiles() { return 0; }
 #endif
@@ -866,10 +781,11 @@ public:
     void updateAllPredictions();
 
     unsigned frameRegisterCount();
+    int stackPointerOffset();
 
     bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
 
-    int hasDebuggerRequests() const { return !!m_debuggerRequests; }
+    bool hasDebuggerRequests() const { return m_debuggerRequests; }
     void* debuggerRequestsAddress() { return &m_debuggerRequests; }
 
     void addBreakpoint(unsigned numBreakpoints);
@@ -885,13 +801,18 @@ public:
     };
     void setSteppingMode(SteppingMode);
 
-    void clearDebuggerRequests() { m_debuggerRequests = 0; }
+    void clearDebuggerRequests()
+    {
+        m_steppingMode = SteppingModeDisabled;
+        m_numBreakpoints = 0;
+    }
 
+    bool wasCompiledWithDebuggingOpcodes() const { return m_unlinkedCode->wasCompiledWithDebuggingOpcodes(); }
+    
     // FIXME: Make these remaining members private.
 
-    int m_numCalleeRegisters;
+    int m_numCalleeLocals;
     int m_numVars;
-    bool m_isConstructor;
     
     // This is intentionally public; it's the responsibility of anyone doing any
     // of the following to hold the lock:
@@ -909,21 +830,67 @@ public:
     // Another exception to the rules is that the GC can do whatever it wants
     // without holding any locks, because the GC is guaranteed to wait until any
     // concurrent compilation threads finish what they're doing.
-    mutable ConcurrentJITLock m_lock;
-    
-    bool m_shouldAlwaysBeInlined;
-    bool m_allTransitionsHaveBeenMarked; // Initialized and used on every GC.
-    
-    bool m_didFailFTLCompilation;
+    mutable ConcurrentJSLock m_lock;
+
+    bool m_visitWeaklyHasBeenCalled;
+
+    bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
+
+#if ENABLE(JIT)
+    unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel
+#endif
+
+    bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
+
+    bool m_didFailJITCompilation : 1;
+    bool m_didFailFTLCompilation : 1;
+    bool m_hasBeenCompiledWithFTL : 1;
+    bool m_isConstructor : 1;
+    bool m_isStrictMode : 1;
+    unsigned m_codeType : 2; // CodeType
 
     // Internal methods for use by validation code. It would be private if it wasn't
     // for the fact that we use it from anonymous namespaces.
     void beginValidationDidFail();
     NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
 
+    struct RareData {
+        WTF_MAKE_FAST_ALLOCATED;
+    public:
+        Vector m_exceptionHandlers;
+
+        // Buffers used for large array literals
+        Vector> m_constantBuffers;
+
+        // Jump Tables
+        Vector m_switchJumpTables;
+        Vector m_stringSwitchJumpTables;
+
+        DirectEvalCodeCache m_directEvalCodeCache;
+    };
+
+    void clearExceptionHandlers()
+    {
+        if (m_rareData)
+            m_rareData->m_exceptionHandlers.clear();
+    }
+
+    void appendExceptionHandler(const HandlerInfo& handler)
+    {
+        createRareDataIfNecessary(); // We may be handling the exception of an inlined call frame.
+        m_rareData->m_exceptionHandlers.append(handler);
+    }
+
+    CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite);
+
+#if ENABLE(JIT)
+    void setPCToCodeOriginMap(std::unique_ptr&&);
+    std::optional findPC(void* pc);
+#endif
+
 protected:
-    virtual void visitWeakReferences(SlotVisitor&) override;
-    virtual void finalizeUnconditionally() override;
+    void finalizeLLIntInlineCaches();
+    void finalizeBaselineJITInlineCaches();
 
 #if ENABLE(DFG_JIT)
     void tallyFrequentExitSites();
@@ -933,6 +900,8 @@ protected:
 
 private:
     friend class CodeBlockSet;
+
+    BytecodeLivenessAnalysis& livenessAnalysisSlow();
     
     CodeBlock* specialOSREntryBlockOrNull();
     
@@ -940,299 +909,147 @@ private:
     
     double optimizationThresholdScalingFactor();
 
-#if ENABLE(JIT)
-    ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
-#endif
-        
     void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
 
-    void setConstantRegisters(const Vector>& constants)
+    void setConstantRegisters(const Vector>& constants, const Vector& constantsSourceCodeRepresentation);
+
+    void replaceConstant(int index, JSValue value)
     {
-        size_t count = constants.size();
-        m_constantRegisters.resize(count);
-        for (size_t i = 0; i < count; i++)
-            m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
+        ASSERT(isConstantRegisterIndex(index) && static_cast(index - FirstConstantRegisterIndex) < m_constantRegisters.size());
+        m_constantRegisters[index - FirstConstantRegisterIndex].set(m_globalObject->vm(), this, value);
     }
 
-    void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&, const StubInfoMap& = StubInfoMap());
+    void dumpBytecode(
+        PrintStream&, ExecState*, const Instruction* begin, const Instruction*&,
+        const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
 
     CString registerName(int r) const;
+    CString constantName(int index) const;
     void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
     void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
     void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
     void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
     void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
     enum CacheDumpMode { DumpCaches, DontDumpCaches };
-    void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling);
+    void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&);
     void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
-    void printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
-    {
-        out.printf("[%4d] %-17s ", location, op);
-    }
-
-    void printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
-    {
-        printLocationAndOp(out, exec, location, it, op);
-        out.printf("%s", registerName(operand).data());
-    }
+    void printPutByIdCacheStatus(PrintStream&, int location, const StubInfoMap&);
+    void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+    void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand);
 
     void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
     void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
     void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
     void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
-        
-#if ENABLE(DFG_JIT)
-    bool shouldImmediatelyAssumeLivenessDuringScan()
-    {
-        // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
-        // their weak references go stale. So if a basline JIT CodeBlock gets
-        // scanned, we can assume that this means that it's live.
-        if (!JITCode::isOptimizingJIT(jitType()))
-            return true;
-
-        // For simplicity, we don't attempt to jettison code blocks during GC if
-        // they are executing. Instead we strongly mark their weak references to
-        // allow them to continue to execute soundly.
-        if (m_mayBeExecuting)
-            return true;
-
-        if (Options::forceDFGCodeBlockLiveness())
-            return true;
+    void dumpArithProfile(PrintStream&, ArithProfile*, bool& hasPrintedProfiling);
 
-        return false;
-    }
-#else
-    bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
-#endif
+    bool shouldVisitStrongly(const ConcurrentJSLocker&);
+    bool shouldJettisonDueToWeakReference();
+    bool shouldJettisonDueToOldAge(const ConcurrentJSLocker&);
     
-    void propagateTransitions(SlotVisitor&);
-    void determineLiveness(SlotVisitor&);
+    void propagateTransitions(const ConcurrentJSLocker&, SlotVisitor&);
+    void determineLiveness(const ConcurrentJSLocker&, SlotVisitor&);
         
-    void stronglyVisitStrongReferences(SlotVisitor&);
-    void stronglyVisitWeakReferences(SlotVisitor&);
+    void stronglyVisitStrongReferences(const ConcurrentJSLocker&, SlotVisitor&);
+    void stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor&);
+    void visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor&);
+
+    std::chrono::milliseconds timeSinceCreation()
+    {
+        return std::chrono::duration_cast(
+            std::chrono::steady_clock::now() - m_creationTime);
+    }
 
     void createRareDataIfNecessary()
     {
         if (!m_rareData)
-            m_rareData = adoptPtr(new RareData);
+            m_rareData = std::make_unique();
     }
-    
-#if ENABLE(JIT)
-    void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
-    void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
-#endif
+
+    void insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray&);
+
     WriteBarrier m_unlinkedCode;
     int m_numParameters;
+    int m_numberOfArgumentsToSkip { 0 };
     union {
         unsigned m_debuggerRequests;
         struct {
+            unsigned m_hasDebuggerStatement : 1;
             unsigned m_steppingMode : 1;
-            unsigned m_numBreakpoints : 31;
+            unsigned m_numBreakpoints : 30;
         };
     };
-    WriteBarrier m_ownerExecutable;
+    WriteBarrier m_ownerExecutable;
     VM* m_vm;
 
     RefCountedArray m_instructions;
-    WriteBarrier m_symbolTable;
     VirtualRegister m_thisRegister;
-    VirtualRegister m_argumentsRegister;
-    VirtualRegister m_activationRegister;
-
-    bool m_isStrictMode;
-    bool m_needsActivation;
-    bool m_mayBeExecuting;
-    uint8_t m_visitAggregateHasBeenCalled;
+    VirtualRegister m_scopeRegister;
+    mutable CodeBlockHash m_hash;
 
     RefPtr m_source;
     unsigned m_sourceOffset;
     unsigned m_firstLineColumnOffset;
-    unsigned m_codeType;
 
-#if ENABLE(LLINT)
-    Vector m_llintCallLinkInfos;
+    RefCountedArray m_llintCallLinkInfos;
     SentinelLinkedList> m_incomingLLIntCalls;
-#endif
+    StructureWatchpointMap m_llintGetByIdWatchpointMap;
     RefPtr m_jitCode;
-    MacroAssemblerCodePtr m_jitCodeWithArityCheck;
 #if ENABLE(JIT)
+    std::unique_ptr m_calleeSaveRegisters;
     Bag m_stubInfos;
-    Vector m_byValInfos;
-    Vector m_callLinkInfos;
+    Bag m_addICs;
+    Bag m_mulICs;
+    Bag m_negICs;
+    Bag m_subICs;
+    Bag m_byValInfos;
+    Bag m_callLinkInfos;
     SentinelLinkedList> m_incomingCalls;
+    SentinelLinkedList> m_incomingPolymorphicCalls;
+    std::unique_ptr m_pcToCodeOriginMap;
 #endif
-    OwnPtr m_jitCodeMap;
+    std::unique_ptr m_jitCodeMap;
 #if ENABLE(DFG_JIT)
     // This is relevant to non-DFG code blocks that serve as the profiled code block
     // for DFG code blocks.
     DFG::ExitProfile m_exitProfile;
     CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
 #endif
-    Vector m_argumentValueProfiles;
-    Vector m_valueProfiles;
+    RefCountedArray m_argumentValueProfiles;
+    RefCountedArray m_valueProfiles;
     SegmentedVector m_rareCaseProfiles;
-    SegmentedVector m_specialFastCaseProfiles;
-    Vector m_arrayAllocationProfiles;
+    RefCountedArray m_arrayAllocationProfiles;
     ArrayProfileVector m_arrayProfiles;
-    Vector m_objectAllocationProfiles;
+    RefCountedArray m_objectAllocationProfiles;
 
     // Constant Pool
     COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier), Register_must_be_same_size_as_WriteBarrier_Unknown);
     // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
     // it, so we're stuck with it for now.
     Vector> m_constantRegisters;
-    Vector> m_functionDecls;
-    Vector> m_functionExprs;
+    Vector m_constantsSourceCodeRepresentation;
+    RefCountedArray> m_functionDecls;
+    RefCountedArray> m_functionExprs;
 
-    RefPtr m_alternative;
+    WriteBarrier m_alternative;
     
-    ExecutionCounter m_llintExecuteCounter;
+    BaselineExecutionCounter m_llintExecuteCounter;
 
-    ExecutionCounter m_jitExecuteCounter;
-    int32_t m_totalJITExecutions;
+    BaselineExecutionCounter m_jitExecuteCounter;
     uint32_t m_osrExitCounter;
     uint16_t m_optimizationDelayCounter;
     uint16_t m_reoptimizationRetryCounter;
-    
-    mutable CodeBlockHash m_hash;
-
-    std::unique_ptr m_livenessAnalysis;
-
-    struct RareData {
-        WTF_MAKE_FAST_ALLOCATED;
-    public:
-        Vector m_exceptionHandlers;
-
-        // Buffers used for large array literals
-        Vector> m_constantBuffers;
-
-        // Jump Tables
-        Vector m_switchJumpTables;
-        Vector m_stringSwitchJumpTables;
-
-        EvalCodeCache m_evalCodeCache;
-    };
-#if COMPILER(MSVC)
-    friend void WTF::deleteOwnedPtr(RareData*);
-#endif
-    OwnPtr m_rareData;
-#if ENABLE(JIT)
-    DFG::CapabilityLevel m_capabilityLevelState;
-#endif
-};
-
-// Program code is not marked by any function, so we make the global object
-// responsible for marking it.
-
-class GlobalCodeBlock : public CodeBlock {
-protected:
-    GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
-    : CodeBlock(CopyParsedBlock, other)
-    {
-    }
-        
-    GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
-        : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
-    {
-    }
-};
-
-class ProgramCodeBlock : public GlobalCodeBlock {
-public:
-    ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
-    : GlobalCodeBlock(CopyParsedBlock, other)
-    {
-    }
 
-    ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr sourceProvider, unsigned firstLineColumnOffset)
-        : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
-    {
-    }
+    std::chrono::steady_clock::time_point m_creationTime;
 
-#if ENABLE(JIT)
-protected:
-    virtual CodeBlock* replacement() override;
-    virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
-};
-
-class EvalCodeBlock : public GlobalCodeBlock {
-public:
-    EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
-    : GlobalCodeBlock(CopyParsedBlock, other)
-    {
-    }
-        
-    EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr sourceProvider)
-        : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1)
-    {
-    }
-    
-    const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
-    unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
-    
-#if ENABLE(JIT)
-protected:
-    virtual CodeBlock* replacement() override;
-    virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
-    
-private:
-    UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast(unlinkedCodeBlock()); }
-};
+    std::unique_ptr m_livenessAnalysis;
 
-class FunctionCodeBlock : public CodeBlock {
-public:
-    FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
-    : CodeBlock(CopyParsedBlock, other)
-    {
-    }
+    std::unique_ptr m_rareData;
 
-    FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
-        : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
-    {
-    }
-    
-#if ENABLE(JIT)
-protected:
-    virtual CodeBlock* replacement() override;
-    virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
+    UnconditionalFinalizer m_unconditionalFinalizer;
+    WeakReferenceHarvester m_weakReferenceHarvester;
 };
 
-inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
-{
-    RELEASE_ASSERT(inlineCallFrame);
-    ExecutableBase* executable = inlineCallFrame->executable.get();
-    RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info());
-    return static_cast(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
-}
-
-inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
-{
-    if (codeOrigin.inlineCallFrame)
-        return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
-    return baselineCodeBlock;
-}
-
-inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
-{
-    if (argument >= static_cast(symbolTable()->parameterCount()))
-        return CallFrame::argumentOffset(argument);
-    
-    const SlowArgument* slowArguments = symbolTable()->slowArguments();
-    if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
-        return CallFrame::argumentOffset(argument);
-    
-    ASSERT(slowArguments[argument].status == SlowArgument::Captured);
-    return slowArguments[argument].index;
-}
-
-inline bool CodeBlock::hasSlowArguments()
-{
-    return !!symbolTable()->slowArguments();
-}
-
 inline Register& ExecState::r(int index)
 {
     CodeBlock* codeBlock = this->codeBlock();
@@ -1241,44 +1058,47 @@ inline Register& ExecState::r(int index)
     return this[index];
 }
 
+inline Register& ExecState::r(VirtualRegister reg)
+{
+    return r(reg.offset());
+}
+
 inline Register& ExecState::uncheckedR(int index)
 {
     RELEASE_ASSERT(index < FirstConstantRegisterIndex);
     return this[index];
 }
 
-inline JSValue ExecState::argumentAfterCapture(size_t argument)
+inline Register& ExecState::uncheckedR(VirtualRegister reg)
 {
-    if (argument >= argumentCount())
-        return jsUndefined();
-    
-    if (!codeBlock())
-        return this[argumentOffset(argument)].jsValue();
-    
-    return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
+    return uncheckedR(reg.offset());
 }
 
-inline void CodeBlockSet::mark(void* candidateCodeBlock)
+inline void CodeBlock::clearVisitWeaklyHasBeenCalled()
 {
-    // We have to check for 0 and -1 because those are used by the HashMap as markers.
-    uintptr_t value = reinterpret_cast(candidateCodeBlock);
-    
-    // This checks for both of those nasty cases in one go.
-    // 0 + 1 = 1
-    // -1 + 1 = 0
-    if (value + 1 <= 1)
-        return;
-    
-    HashSet::iterator iter = m_set.find(static_cast(candidateCodeBlock));
-    if (iter == m_set.end())
-        return;
-    
-    (*iter)->m_mayBeExecuting = true;
-#if ENABLE(GGC)
-    m_currentlyExecuting.append(static_cast(candidateCodeBlock));
-#endif
+    m_visitWeaklyHasBeenCalled = false;
 }
 
-} // namespace JSC
+template 
+JSObject* ScriptExecutable::prepareForExecution(VM& vm, JSFunction* function, JSScope* scope, CodeSpecializationKind kind, CodeBlock*& resultCodeBlock)
+{
+    if (hasJITCodeFor(kind)) {
+        if (std::is_same::value)
+            resultCodeBlock = jsCast(jsCast(this)->codeBlock());
+        else if (std::is_same::value)
+            resultCodeBlock = jsCast(jsCast(this)->codeBlock());
+        else if (std::is_same::value)
+            resultCodeBlock = jsCast(jsCast(this)->codeBlock());
+        else if (std::is_same::value)
+            resultCodeBlock = jsCast(jsCast(this)->codeBlockFor(kind));
+        else
+            RELEASE_ASSERT_NOT_REACHED();
+        return nullptr;
+    }
+    return prepareForExecutionImpl(vm, function, scope, kind, resultCodeBlock);
+}
 
-#endif // CodeBlock_h
+#define CODEBLOCK_LOG_EVENT(codeBlock, summary, details) \
+    (codeBlock->vm()->logEvent(codeBlock, summary, [&] () { return toCString details; }))
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockHash.h b/Source/JavaScriptCore/bytecode/CodeBlockHash.h
index 4e3398867..b828fe808 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockHash.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlockHash.h
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef CodeBlockHash_h
-#define CodeBlockHash_h
+#pragma once
 
 #include "CodeSpecializationKind.h"
 #include 
@@ -77,5 +76,3 @@ private:
 };
 
 } // namespace JSC
-
-#endif // CodeBlockHash_h
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp
index be50c9778..50cf7378d 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,18 +28,16 @@
 
 #include "CodeBlock.h"
 #include "DFGCommon.h"
+#include "JSCInlines.h"
 
 namespace JSC {
 
-void CodeBlockJettisoningWatchpoint::fireInternal()
+void CodeBlockJettisoningWatchpoint::fireInternal(const FireDetail& detail)
 {
-    if (DFG::shouldShowDisassembly())
+    if (DFG::shouldDumpDisassembly())
         dataLog("Firing watchpoint ", RawPointer(this), " on ", *m_codeBlock, "\n");
 
-    m_codeBlock->jettison(CountReoptimization);
-
-    if (isOnList())
-        remove();
+    m_codeBlock->jettison(Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &detail);
 }
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h
index 89d87f4d0..635cd78ca 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef CodeBlockJettisoningWatchpoint_h
-#define CodeBlockJettisoningWatchpoint_h
+#pragma once
 
 #include "Watchpoint.h"
 
@@ -34,24 +33,16 @@ class CodeBlock;
 
 class CodeBlockJettisoningWatchpoint : public Watchpoint {
 public:
-    CodeBlockJettisoningWatchpoint()
-        : m_codeBlock(0)
-    {
-    }
-    
     CodeBlockJettisoningWatchpoint(CodeBlock* codeBlock)
         : m_codeBlock(codeBlock)
     {
     }
     
 protected:
-    virtual void fireInternal() override;
+    void fireInternal(const FireDetail&) override;
 
 private:
     CodeBlock* m_codeBlock;
 };
 
 } // namespace JSC
-
-#endif // CodeBlockJettisoningWatchpoint_h
-
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h b/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h
index d87085841..37f83c4b1 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef CodeBlockWithJITType_h
-#define CodeBlockWithJITType_h
+#pragma once
 
 #include "CodeBlock.h"
 
@@ -51,6 +50,3 @@ private:
 };
 
 } // namespace JSC
-
-#endif // CodeBlockWithJITType_h
-
diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
index 39b83fead..a52df924f 100644
--- a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,15 +28,15 @@
 
 #include "CallFrame.h"
 #include "CodeBlock.h"
-#include "Executable.h"
-#include "Operations.h"
+#include "InlineCallFrame.h"
+#include "JSCInlines.h"
 
 namespace JSC {
 
 unsigned CodeOrigin::inlineDepthForCallFrame(InlineCallFrame* inlineCallFrame)
 {
     unsigned result = 1;
-    for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
+    for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame)
         result++;
     return result;
 }
@@ -45,18 +45,90 @@ unsigned CodeOrigin::inlineDepth() const
 {
     return inlineDepthForCallFrame(inlineCallFrame);
 }
+
+bool CodeOrigin::isApproximatelyEqualTo(const CodeOrigin& other) const
+{
+    CodeOrigin a = *this;
+    CodeOrigin b = other;
+
+    if (!a.isSet())
+        return !b.isSet();
+    if (!b.isSet())
+        return false;
+    
+    if (a.isHashTableDeletedValue())
+        return b.isHashTableDeletedValue();
+    if (b.isHashTableDeletedValue())
+        return false;
     
+    for (;;) {
+        ASSERT(a.isSet());
+        ASSERT(b.isSet());
+        
+        if (a.bytecodeIndex != b.bytecodeIndex)
+            return false;
+        
+        if ((!!a.inlineCallFrame) != (!!b.inlineCallFrame))
+            return false;
+        
+        if (!a.inlineCallFrame)
+            return true;
+        
+        if (a.inlineCallFrame->baselineCodeBlock.get() != b.inlineCallFrame->baselineCodeBlock.get())
+            return false;
+        
+        a = a.inlineCallFrame->directCaller;
+        b = b.inlineCallFrame->directCaller;
+    }
+}
+
+unsigned CodeOrigin::approximateHash() const
+{
+    if (!isSet())
+        return 0;
+    if (isHashTableDeletedValue())
+        return 1;
+    
+    unsigned result = 2;
+    CodeOrigin codeOrigin = *this;
+    for (;;) {
+        result += codeOrigin.bytecodeIndex;
+        
+        if (!codeOrigin.inlineCallFrame)
+            return result;
+        
+        result += WTF::PtrHash::hash(codeOrigin.inlineCallFrame->baselineCodeBlock.get());
+        
+        codeOrigin = codeOrigin.inlineCallFrame->directCaller;
+    }
+}
+
 Vector CodeOrigin::inlineStack() const
 {
     Vector result(inlineDepth());
     result.last() = *this;
     unsigned index = result.size() - 2;
-    for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
-        result[index--] = current->caller;
+    for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame)
+        result[index--] = current->directCaller;
     RELEASE_ASSERT(!result[0].inlineCallFrame);
     return result;
 }
 
+CodeBlock* CodeOrigin::codeOriginOwner() const
+{
+    if (!inlineCallFrame)
+        return 0;
+    return inlineCallFrame->baselineCodeBlock.get();
+}
+
+int CodeOrigin::stackOffset() const
+{
+    if (!inlineCallFrame)
+        return 0;
+    
+    return inlineCallFrame->stackOffset;
+}
+
 void CodeOrigin::dump(PrintStream& out) const
 {
     if (!isSet()) {
@@ -70,7 +142,7 @@ void CodeOrigin::dump(PrintStream& out) const
             out.print(" --> ");
         
         if (InlineCallFrame* frame = stack[i].inlineCallFrame) {
-            out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->executable.get()), "> ");
+            out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->baselineCodeBlock.get()), "> ");
             if (frame->isClosureCall)
                 out.print("(closure) ");
         }
@@ -84,51 +156,4 @@ void CodeOrigin::dumpInContext(PrintStream& out, DumpContext*) const
     dump(out);
 }
 
-JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const
-{
-    return jsCast(calleeRecovery.recover(exec));
-}
-
-CodeBlockHash InlineCallFrame::hash() const
-{
-    return jsCast(executable.get())->codeBlockFor(
-        specializationKind())->hash();
-}
-
-CString InlineCallFrame::inferredName() const
-{
-    return jsCast(executable.get())->inferredName().utf8();
-}
-
-CodeBlock* InlineCallFrame::baselineCodeBlock() const
-{
-    return jsCast(executable.get())->baselineCodeBlockFor(specializationKind());
-}
-
-void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const
-{
-    out.print(inferredName(), "#", hash());
-}
-
-void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const
-{
-    out.print(briefFunctionInformation(), ":<", RawPointer(executable.get()));
-    if (executable->isStrictMode())
-        out.print(" (StrictMode)");
-    out.print(", bc#", caller.bytecodeIndex, ", ", specializationKind());
-    if (isClosureCall)
-        out.print(", closure call");
-    else
-        out.print(", known callee: ", inContext(calleeRecovery.constant(), context));
-    out.print(", numArgs+this = ", arguments.size());
-    out.print(", stack < loc", VirtualRegister(stackOffset).toLocal());
-    out.print(">");
-}
-
-void InlineCallFrame::dump(PrintStream& out) const
-{
-    dumpInContext(out, 0);
-}
-
 } // namespace JSC
-
diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.h b/Source/JavaScriptCore/bytecode/CodeOrigin.h
index ed660c247..38712f964 100644
--- a/Source/JavaScriptCore/bytecode/CodeOrigin.h
+++ b/Source/JavaScriptCore/bytecode/CodeOrigin.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,15 +23,12 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef CodeOrigin_h
-#define CodeOrigin_h
+#pragma once
 
+#include "CallMode.h"
 #include "CodeBlockHash.h"
 #include "CodeSpecializationKind.h"
-#include "JSFunction.h"
-#include "ValueRecovery.h"
 #include "WriteBarrier.h"
-#include 
 #include 
 #include 
 #include 
@@ -39,10 +36,9 @@
 
 namespace JSC {
 
+class CodeBlock;
+struct DumpContext;
 struct InlineCallFrame;
-class ExecState;
-class ScriptExecutable;
-class JSFunction;
 
 struct CodeOrigin {
     static const unsigned invalidBytecodeIndex = UINT_MAX;
@@ -63,7 +59,7 @@ struct CodeOrigin {
     
     CodeOrigin(WTF::HashTableDeletedValueType)
         : bytecodeIndex(invalidBytecodeIndex)
-        , inlineCallFrame(bitwise_cast(static_cast(1)))
+        , inlineCallFrame(deletedMarker())
     {
     }
     
@@ -75,6 +71,7 @@ struct CodeOrigin {
     }
     
     bool isSet() const { return bytecodeIndex != invalidBytecodeIndex; }
+    explicit operator bool() const { return isSet(); }
     
     bool isHashTableDeletedValue() const
     {
@@ -87,7 +84,7 @@ struct CodeOrigin {
     
     // If the code origin corresponds to inlined code, gives you the heap object that
     // would have owned the code if it had not been inlined. Otherwise returns 0.
-    ScriptExecutable* codeOriginOwner() const;
+    CodeBlock* codeOriginOwner() const;
     
     int stackOffset() const;
     
@@ -97,69 +94,28 @@ struct CodeOrigin {
     bool operator==(const CodeOrigin& other) const;
     bool operator!=(const CodeOrigin& other) const { return !(*this == other); }
     
+    // This checks if the two code origins correspond to the same stack trace snippets,
+    // but ignore whether the InlineCallFrame's are identical.
+    bool isApproximatelyEqualTo(const CodeOrigin& other) const;
+    
+    unsigned approximateHash() const;
+
+    template 
+    void walkUpInlineStack(const Function&);
+    
     // Get the inline stack. This is slow, and is intended for debugging only.
     Vector inlineStack() const;
     
-    void dump(PrintStream&) const;
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const;
     void dumpInContext(PrintStream&, DumpContext*) const;
-};
-
-struct InlineCallFrame {
-    Vector arguments;
-    WriteBarrier executable;
-    ValueRecovery calleeRecovery;
-    CodeOrigin caller;
-    BitVector capturedVars; // Indexed by the machine call frame's variable numbering.
-    signed stackOffset : 30;
-    bool isCall : 1;
-    bool isClosureCall : 1; // If false then we know that callee/scope are constants and the DFG won't treat them as variables, i.e. they have to be recovered manually.
-    VirtualRegister argumentsRegister; // This is only set if the code uses arguments. The unmodified arguments register follows the unmodifiedArgumentsRegister() convention (see CodeBlock.h).
-    
-    // There is really no good notion of a "default" set of values for
-    // InlineCallFrame's fields. This constructor is here just to reduce confusion if
-    // we forgot to initialize explicitly.
-    InlineCallFrame()
-        : stackOffset(0)
-        , isCall(false)
-        , isClosureCall(false)
-    {
-    }
-    
-    CodeSpecializationKind specializationKind() const { return specializationFromIsCall(isCall); }
 
-    JSFunction* calleeConstant() const
+private:
+    static InlineCallFrame* deletedMarker()
     {
-        if (calleeRecovery.isConstant())
-            return jsCast(calleeRecovery.constant());
-        return 0;
+        return bitwise_cast(static_cast(1));
     }
-    
-    // Get the callee given a machine call frame to which this InlineCallFrame belongs.
-    JSFunction* calleeForCallFrame(ExecState*) const;
-    
-    CString inferredName() const;
-    CodeBlockHash hash() const;
-    
-    CodeBlock* baselineCodeBlock() const;
-    
-    ptrdiff_t callerFrameOffset() const { return stackOffset * sizeof(Register) + CallFrame::callerFrameOffset(); }
-    ptrdiff_t returnPCOffset() const { return stackOffset * sizeof(Register) + CallFrame::returnPCOffset(); }
-
-    void dumpBriefFunctionInformation(PrintStream&) const;
-    void dump(PrintStream&) const;
-    void dumpInContext(PrintStream&, DumpContext*) const;
-
-    MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation);
 };
 
-inline int CodeOrigin::stackOffset() const
-{
-    if (!inlineCallFrame)
-        return 0;
-    
-    return inlineCallFrame->stackOffset;
-}
-
 inline unsigned CodeOrigin::hash() const
 {
     return WTF::IntHash::hash(bytecodeIndex) +
@@ -171,13 +127,6 @@ inline bool CodeOrigin::operator==(const CodeOrigin& other) const
     return bytecodeIndex == other.bytecodeIndex
         && inlineCallFrame == other.inlineCallFrame;
 }
-    
-inline ScriptExecutable* CodeOrigin::codeOriginOwner() const
-{
-    if (!inlineCallFrame)
-        return 0;
-    return inlineCallFrame->executable.get();
-}
 
 struct CodeOriginHash {
     static unsigned hash(const CodeOrigin& key) { return key.hash(); }
@@ -185,6 +134,12 @@ struct CodeOriginHash {
     static const bool safeToCompareToEmptyOrDeleted = true;
 };
 
+struct CodeOriginApproximateHash {
+    static unsigned hash(const CodeOrigin& key) { return key.approximateHash(); }
+    static bool equal(const CodeOrigin& a, const CodeOrigin& b) { return a.isApproximatelyEqualTo(b); }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
 } // namespace JSC
 
 namespace WTF {
@@ -200,6 +155,3 @@ template<> struct HashTraits : SimpleClassHashTraits
+#pragma once
 
 namespace JSC {
 
-enum CodeType { GlobalCode, EvalCode, FunctionCode };
+enum CodeType { GlobalCode, EvalCode, FunctionCode, ModuleCode };
 
 } // namespace JSC
 
@@ -40,6 +37,3 @@ class PrintStream;
 void printInternal(PrintStream&, JSC::CodeType);
 
 } // namespace WTF
-
-#endif // CodeType_h
-
diff --git a/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp b/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp
new file mode 100644
index 000000000..0622553c0
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ComplexGetStatus.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+ComplexGetStatus ComplexGetStatus::computeFor(
+    Structure* headStructure, const ObjectPropertyConditionSet& conditionSet, UniquedStringImpl* uid)
+{
+    // FIXME: We should assert that we never see a structure that
+    // getOwnPropertySlotIsImpure() but for which we don't
+    // newImpurePropertyFiresWatchpoints(). We're not at a point where we can do
+    // that, yet.
+    // https://bugs.webkit.org/show_bug.cgi?id=131810
+    
+    ASSERT(conditionSet.isValid());
+    
+    if (headStructure->takesSlowPathInDFGForImpureProperty())
+        return takesSlowPath();
+    
+    ComplexGetStatus result;
+    result.m_kind = Inlineable;
+    
+    if (!conditionSet.isEmpty()) {
+        result.m_conditionSet = conditionSet;
+        
+        if (!result.m_conditionSet.structuresEnsureValidity())
+            return skip();
+
+        unsigned numberOfSlotBases =
+            result.m_conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence);
+        RELEASE_ASSERT(numberOfSlotBases <= 1);
+        if (!numberOfSlotBases) {
+            ASSERT(result.m_offset == invalidOffset);
+            return result;
+        }
+        ObjectPropertyCondition base = result.m_conditionSet.slotBaseCondition();
+        ASSERT(base.kind() == PropertyCondition::Presence);
+
+        result.m_offset = base.offset();
+    } else
+        result.m_offset = headStructure->getConcurrently(uid);
+    
+    if (!isValidOffset(result.m_offset))
+        return takesSlowPath();
+    
+    return result;
+}
+
+} // namespace JSC
+
+
diff --git a/Source/JavaScriptCore/bytecode/ComplexGetStatus.h b/Source/JavaScriptCore/bytecode/ComplexGetStatus.h
new file mode 100644
index 000000000..d94b312ab
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ComplexGetStatus.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JSCJSValue.h"
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+
+namespace JSC {
+
+class CodeBlock;
+class StructureChain;
+
+// This class is useful for figuring out how to inline a cached get-like access. We
+// say "get-like" because this is appropriate for loading the GetterSetter object in
+// a put_by_id that hits a setter. Notably, this doesn't figure out how to call
+// accessors, or even whether they should be called. What it gives us, is a way of
+// determining how to load the value from the requested property (identified by a
+// StringImpl* uid) from an object of the given structure in the given CodeBlock,
+// assuming that such an access had already been cached by Repatch (and so Repatch had
+// already done a bunch of safety checks). This doesn't reexecute any checks that
+// Repatch would have executed, and for prototype chain accesses, it doesn't ask the
+// objects in the prototype chain whether their getOwnPropertySlot would attempt to
+// intercept the access - so this really is only appropriate if you already know that
+// one of the JITOperations had OK'd this for caching and that Repatch concurred.
+//
+// The typical use pattern is something like:
+//
+//     ComplexGetStatus status = ComplexGetStatus::computeFor(...);
+//     switch (status.kind()) {
+//     case ComplexGetStatus::ShouldSkip:
+//         // Handle the case where this kind of access is possibly safe but wouldn't
+//         // pass the required safety checks. For example, if an IC gives us a list of
+//         // accesses and one of them is ShouldSkip, then we should pretend as if it
+//         // wasn't even there.
+//         break;
+//     case ComplexGetStatus::TakesSlowPath:
+//         // This kind of access is not safe to inline. Bail out of any attempst to
+//         // inline.
+//         break;
+//     case ComplexGetStatus::Inlineable:
+//         // The good stuff goes here. If it's Inlineable then the other properties of
+//         // the 'status' object will tell you everything you need to know about how
+//         // to execute the get-like operation.
+//         break;
+//     }
+
+class ComplexGetStatus {
+public:
+    enum Kind {
+        ShouldSkip,
+        TakesSlowPath,
+        Inlineable
+    };
+    
+    ComplexGetStatus()
+        : m_kind(ShouldSkip)
+        , m_offset(invalidOffset)
+    {
+    }
+    
+    static ComplexGetStatus skip()
+    {
+        return ComplexGetStatus();
+    }
+    
+    static ComplexGetStatus takesSlowPath()
+    {
+        ComplexGetStatus result;
+        result.m_kind = TakesSlowPath;
+        return result;
+    }
+    
+    static ComplexGetStatus computeFor(
+        Structure* headStructure, const ObjectPropertyConditionSet&, UniquedStringImpl* uid);
+    
+    Kind kind() const { return m_kind; }
+    PropertyOffset offset() const { return m_offset; }
+    const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+    
+private:
+    Kind m_kind;
+    PropertyOffset m_offset;
+    ObjectPropertyConditionSet m_conditionSet;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
index 5d05bbb2f..64fe9a387 100644
--- a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,19 +28,32 @@
 
 #if ENABLE(DFG_JIT)
 
-#include 
+#include "CodeBlock.h"
+#include "VMInlines.h"
 
 namespace JSC { namespace DFG {
 
+void FrequentExitSite::dump(PrintStream& out) const
+{
+    out.print("bc#", m_bytecodeOffset, ": ", m_kind, "/", m_jitType);
+}
+
 ExitProfile::ExitProfile() { }
 ExitProfile::~ExitProfile() { }
 
-bool ExitProfile::add(const ConcurrentJITLocker&, const FrequentExitSite& site)
+bool ExitProfile::add(const ConcurrentJSLocker&, CodeBlock* owner, const FrequentExitSite& site)
 {
+    ASSERT(site.jitType() != ExitFromAnything);
+
+    CODEBLOCK_LOG_EVENT(owner, "frequentExit", (site));
+    
+    if (Options::verboseExitProfile())
+        dataLog(pointerDump(owner), ": Adding exit site: ", site, "\n");
+    
     // If we've never seen any frequent exits then create the list and put this site
     // into it.
     if (!m_frequentExitSites) {
-        m_frequentExitSites = adoptPtr(new Vector());
+        m_frequentExitSites = std::make_unique>();
         m_frequentExitSites->append(site);
         return true;
     }
@@ -72,13 +85,13 @@ Vector ExitProfile::exitSitesFor(unsigned bytecodeIndex)
     return result;
 }
 
-bool ExitProfile::hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite& site) const
+bool ExitProfile::hasExitSite(const ConcurrentJSLocker&, const FrequentExitSite& site) const
 {
     if (!m_frequentExitSites)
         return false;
     
     for (unsigned i = m_frequentExitSites->size(); i--;) {
-        if (m_frequentExitSites->at(i) == site)
+        if (site.subsumes(m_frequentExitSites->at(i)))
             return true;
     }
     return false;
@@ -87,7 +100,7 @@ bool ExitProfile::hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite
 QueryableExitProfile::QueryableExitProfile() { }
 QueryableExitProfile::~QueryableExitProfile() { }
 
-void QueryableExitProfile::initialize(const ConcurrentJITLocker&, const ExitProfile& profile)
+void QueryableExitProfile::initialize(const ConcurrentJSLocker&, const ExitProfile& profile)
 {
     if (!profile.m_frequentExitSites)
         return;
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.h b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
index ab1a60d58..337e3ec01 100644
--- a/Source/JavaScriptCore/bytecode/DFGExitProfile.h
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2014, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,13 +23,14 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef DFGExitProfile_h
-#define DFGExitProfile_h
+#pragma once
 
-#include "ConcurrentJITLock.h"
+#if ENABLE(DFG_JIT)
+
+#include "ConcurrentJSLock.h"
 #include "ExitKind.h"
+#include "ExitingJITType.h"
 #include 
-#include 
 #include 
 
 namespace JSC { namespace DFG {
@@ -39,18 +40,21 @@ public:
     FrequentExitSite()
         : m_bytecodeOffset(0) // 0 = empty value
         , m_kind(ExitKindUnset)
+        , m_jitType(ExitFromAnything)
     {
     }
     
     FrequentExitSite(WTF::HashTableDeletedValueType)
         : m_bytecodeOffset(1) // 1 = deleted value
         , m_kind(ExitKindUnset)
+        , m_jitType(ExitFromAnything)
     {
     }
     
-    explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind)
+    explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind, ExitingJITType jitType = ExitFromAnything)
         : m_bytecodeOffset(bytecodeOffset)
         , m_kind(kind)
+        , m_jitType(jitType)
     {
         if (m_kind == ArgumentsEscaped) {
             // Count this one globally. It doesn't matter where in the code block the arguments excaped;
@@ -61,9 +65,10 @@ public:
     
     // Use this constructor if you wish for the exit site to be counted globally within its
     // code block.
-    explicit FrequentExitSite(ExitKind kind)
+    explicit FrequentExitSite(ExitKind kind, ExitingJITType jitType = ExitFromAnything)
         : m_bytecodeOffset(0)
         , m_kind(kind)
+        , m_jitType(jitType)
     {
     }
     
@@ -75,25 +80,48 @@ public:
     bool operator==(const FrequentExitSite& other) const
     {
         return m_bytecodeOffset == other.m_bytecodeOffset
-            && m_kind == other.m_kind;
+            && m_kind == other.m_kind
+            && m_jitType == other.m_jitType;
+    }
+    
+    bool subsumes(const FrequentExitSite& other) const
+    {
+        if (m_bytecodeOffset != other.m_bytecodeOffset)
+            return false;
+        if (m_kind != other.m_kind)
+            return false;
+        if (m_jitType == ExitFromAnything)
+            return true;
+        return m_jitType == other.m_jitType;
     }
     
     unsigned hash() const
     {
-        return WTF::intHash(m_bytecodeOffset) + m_kind;
+        return WTF::intHash(m_bytecodeOffset) + m_kind + m_jitType * 7;
     }
     
     unsigned bytecodeOffset() const { return m_bytecodeOffset; }
     ExitKind kind() const { return m_kind; }
+    ExitingJITType jitType() const { return m_jitType; }
+    
+    FrequentExitSite withJITType(ExitingJITType jitType) const
+    {
+        FrequentExitSite result = *this;
+        result.m_jitType = jitType;
+        return result;
+    }
 
     bool isHashTableDeletedValue() const
     {
         return m_kind == ExitKindUnset && m_bytecodeOffset;
     }
+    
+    void dump(PrintStream& out) const;
 
 private:
     unsigned m_bytecodeOffset;
     ExitKind m_kind;
+    ExitingJITType m_jitType;
 };
 
 struct FrequentExitSiteHash {
@@ -104,6 +132,7 @@ struct FrequentExitSiteHash {
 
 } } // namespace JSC::DFG
 
+
 namespace WTF {
 
 template struct DefaultHash;
@@ -131,7 +160,7 @@ public:
     // be called a fixed number of times per recompilation. Recompilation is
     // rare to begin with, and implies doing O(n) operations on the CodeBlock
     // anyway.
-    bool add(const ConcurrentJITLocker&, const FrequentExitSite&);
+    bool add(const ConcurrentJSLocker&, CodeBlock* owner, const FrequentExitSite&);
     
     // Get the frequent exit sites for a bytecode index. This is O(n), and is
     // meant to only be used from debugging/profiling code.
@@ -141,12 +170,12 @@ public:
     // in the compiler. It should be strictly cheaper than building a
     // QueryableExitProfile, if you really expect this to be called infrequently
     // and you believe that there are few exit sites.
-    bool hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite&) const;
-    bool hasExitSite(const ConcurrentJITLocker& locker, ExitKind kind) const
+    bool hasExitSite(const ConcurrentJSLocker&, const FrequentExitSite&) const;
+    bool hasExitSite(const ConcurrentJSLocker& locker, ExitKind kind) const
     {
         return hasExitSite(locker, FrequentExitSite(kind));
     }
-    bool hasExitSite(const ConcurrentJITLocker& locker, unsigned bytecodeIndex, ExitKind kind) const
+    bool hasExitSite(const ConcurrentJSLocker& locker, unsigned bytecodeIndex, ExitKind kind) const
     {
         return hasExitSite(locker, FrequentExitSite(bytecodeIndex, kind));
     }
@@ -154,7 +183,7 @@ public:
 private:
     friend class QueryableExitProfile;
     
-    OwnPtr> m_frequentExitSites;
+    std::unique_ptr> m_frequentExitSites;
 };
 
 class QueryableExitProfile {
@@ -162,10 +191,14 @@ public:
     QueryableExitProfile();
     ~QueryableExitProfile();
     
-    void initialize(const ConcurrentJITLocker&, const ExitProfile&);
+    void initialize(const ConcurrentJSLocker&, const ExitProfile&);
 
     bool hasExitSite(const FrequentExitSite& site) const
     {
+        if (site.jitType() == ExitFromAnything) {
+            return hasExitSite(site.withJITType(ExitFromDFG))
+                || hasExitSite(site.withJITType(ExitFromFTL));
+        }
         return m_frequentExitSites.find(site) != m_frequentExitSites.end();
     }
     
@@ -184,4 +217,4 @@ private:
 
 } } // namespace JSC::DFG
 
-#endif // DFGExitProfile_h
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.cpp b/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.cpp
new file mode 100644
index 000000000..790d9c03d
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DOMJITAccessCasePatchpointParams.h"
+
+#include "LinkBuffer.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+template
+class SlowPathCallGeneratorWithArguments : public DOMJITAccessCasePatchpointParams::SlowPathCallGenerator {
+public:
+    SlowPathCallGeneratorWithArguments(JumpType from, CCallHelpers::Label to, FunctionType function, ResultType result, std::tuple arguments)
+        : m_from(from)
+        , m_to(to)
+        , m_function(function)
+        , m_result(result)
+        , m_arguments(arguments)
+    {
+    }
+
+    template
+    CCallHelpers::JumpList generateImpl(AccessGenerationState& state, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers& jit, std::index_sequence)
+    {
+        CCallHelpers::JumpList exceptions;
+        // We spill (1) the used registers by IC and (2) the used registers by DOMJIT::Patchpoint.
+        AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(usedRegistersByPatchpoint);
+
+        jit.store32(
+            CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+            CCallHelpers::tagFor(static_cast(CallFrameSlot::argumentCount)));
+
+        jit.makeSpaceOnStackForCCall();
+
+        // FIXME: Currently, we do not check any ARM EABI things here.
+        // But it is OK because a compile error happens when you pass JSValueRegs as an argument.
+        // https://bugs.webkit.org/show_bug.cgi?id=163099
+        jit.setupArgumentsWithExecState(std::get(m_arguments)...);
+
+        CCallHelpers::Call operationCall = jit.call();
+        auto function = m_function;
+        jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+            linkBuffer.link(operationCall, FunctionPtr(function));
+        });
+
+        jit.setupResults(m_result);
+        jit.reclaimSpaceOnStackForCCall();
+
+        CCallHelpers::Jump noException = jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+        state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
+        exceptions.append(jit.jump());
+
+        noException.link(&jit);
+        RegisterSet dontRestore;
+        dontRestore.set(m_result);
+        state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
+
+        return exceptions;
+    }
+
+    CCallHelpers::JumpList generate(AccessGenerationState& state, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers& jit) override
+    {
+        m_from.link(&jit);
+        CCallHelpers::JumpList exceptions = generateImpl(state, usedRegistersByPatchpoint, jit, std::make_index_sequence>::value>());
+        jit.jump().linkTo(m_to, &jit);
+        return exceptions;
+    }
+
+protected:
+    JumpType m_from;
+    CCallHelpers::Label m_to;
+    FunctionType m_function;
+    ResultType m_result;
+    std::tuple m_arguments;
+};
+
+#define JSC_DEFINE_CALL_OPERATIONS(OperationType, ResultType, ...) \
+    void DOMJITAccessCasePatchpointParams::addSlowPathCallImpl(CCallHelpers::JumpList from, CCallHelpers& jit, OperationType operation, ResultType result, std::tuple<__VA_ARGS__> args) \
+    { \
+        CCallHelpers::Label to = jit.label(); \
+        m_generators.append(std::make_unique>(from, to, operation, result, args)); \
+    } \
+
+DOMJIT_SLOW_PATH_CALLS(JSC_DEFINE_CALL_OPERATIONS)
+#undef JSC_DEFINE_CALL_OPERATIONS
+
+CCallHelpers::JumpList DOMJITAccessCasePatchpointParams::emitSlowPathCalls(AccessGenerationState& state, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers& jit)
+{
+    CCallHelpers::JumpList exceptions;
+    for (auto& generator : m_generators)
+        exceptions.append(generator->generate(state, usedRegistersByPatchpoint, jit));
+    return exceptions;
+}
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.h b/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.h
new file mode 100644
index 000000000..8cf975197
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "DOMJITPatchpointParams.h"
+
+namespace JSC {
+
+struct AccessGenerationState;
+
+class DOMJITAccessCasePatchpointParams : public DOMJIT::PatchpointParams {
+public:
+    DOMJITAccessCasePatchpointParams(Vector&& regs, Vector&& gpScratch, Vector&& fpScratch)
+        : DOMJIT::PatchpointParams(WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch))
+    {
+    }
+
+    class SlowPathCallGenerator {
+    public:
+        virtual ~SlowPathCallGenerator() { }
+        virtual CCallHelpers::JumpList generate(AccessGenerationState&, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers&) = 0;
+    };
+
+    CCallHelpers::JumpList emitSlowPathCalls(AccessGenerationState&, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers&);
+
+private:
+#define JSC_DEFINE_CALL_OPERATIONS(OperationType, ResultType, ...) void addSlowPathCallImpl(CCallHelpers::JumpList, CCallHelpers&, OperationType, ResultType, std::tuple<__VA_ARGS__> args) override;
+    DOMJIT_SLOW_PATH_CALLS(JSC_DEFINE_CALL_OPERATIONS)
+#undef JSC_DEFINE_CALL_OPERATIONS
+    Vector> m_generators;
+};
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/bytecode/DataFormat.cpp b/Source/JavaScriptCore/bytecode/DataFormat.cpp
new file mode 100644
index 000000000..8bd42e100
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DataFormat.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "DataFormat.h"
+
+#include 
+#include 
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::DataFormat dataFormat)
+{
+    out.print(dataFormatToString(dataFormat));
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/DataFormat.h b/Source/JavaScriptCore/bytecode/DataFormat.h
index bb9da4c57..22c649226 100644
--- a/Source/JavaScriptCore/bytecode/DataFormat.h
+++ b/Source/JavaScriptCore/bytecode/DataFormat.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef DataFormat_h
-#define DataFormat_h
+#pragma once
 
 #include 
 
@@ -56,7 +55,6 @@ enum DataFormat {
     
     // Special data formats used only for OSR.
     DataFormatDead = 33, // Implies jsUndefined().
-    DataFormatArguments = 34 // Implies that the arguments object must be reified.
 };
 
 inline const char* dataFormatToString(DataFormat dataFormat)
@@ -90,8 +88,6 @@ inline const char* dataFormatToString(DataFormat dataFormat)
         return "JSBoolean";
     case DataFormatDead:
         return "Dead";
-    case DataFormatArguments:
-        return "Arguments";
     default:
         RELEASE_ASSERT_NOT_REACHED();
         return "Unknown";
@@ -124,6 +120,11 @@ inline bool isJSBoolean(DataFormat format)
     return isJSFormat(format, DataFormatJSBoolean);
 }
 
-}
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::DataFormat);
 
-#endif // DataFormat_h
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp
index 35af7c7b9..762387caf 100644
--- a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp
+++ b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -26,10 +26,46 @@
 #include "config.h"
 #include "DeferredCompilationCallback.h"
 
+#include "CodeBlock.h"
+
 namespace JSC {
 
 DeferredCompilationCallback::DeferredCompilationCallback() { }
 DeferredCompilationCallback::~DeferredCompilationCallback() { }
 
+void DeferredCompilationCallback::compilationDidComplete(CodeBlock*, CodeBlock*, CompilationResult result)
+{
+    dumpCompiledSourcesIfNeeded();
+
+    switch (result) {
+    case CompilationFailed:
+    case CompilationInvalidated:
+    case CompilationSuccessful:
+        break;
+    case CompilationDeferred:
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+}
+
+Vector& DeferredCompilationCallback::ensureDeferredSourceDump()
+{
+    if (!m_deferredSourceDump)
+        m_deferredSourceDump = std::make_unique>();
+    return *m_deferredSourceDump;
+}
+
+void DeferredCompilationCallback::dumpCompiledSourcesIfNeeded()
+{
+    if (!m_deferredSourceDump)
+        return;
+
+    ASSERT(Options::dumpSourceAtDFGTime());
+    unsigned index = 0;
+    for (auto& info : *m_deferredSourceDump) {
+        dataLog("[", ++index, "] ");
+        info.dump();
+    }
+}
+
 } // JSC
 
diff --git a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h
index 6421e3e25..925711047 100644
--- a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h
+++ b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h
@@ -23,11 +23,12 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef DeferredCompilationCallback_h
-#define DeferredCompilationCallback_h
+#pragma once
 
 #include "CompilationResult.h"
+#include "DeferredSourceDump.h"
 #include 
+#include 
 
 namespace JSC {
 
@@ -40,11 +41,15 @@ protected:
 public:
     virtual ~DeferredCompilationCallback();
 
-    virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*) = 0;
-    virtual void compilationDidComplete(CodeBlock*, CompilationResult) = 0;
-};
+    virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*, CodeBlock* profiledDFGCodeBlock) = 0;
+    virtual void compilationDidComplete(CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationResult);
 
-} // namespace JSC
+    Vector& ensureDeferredSourceDump();
 
-#endif // DeferredCompilationCallback_h
+private:
+    void dumpCompiledSourcesIfNeeded();
 
+    std::unique_ptr> m_deferredSourceDump;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp b/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp
new file mode 100644
index 000000000..48079db66
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DeferredSourceDump.h"
+
+#include "CodeBlock.h"
+#include "CodeBlockWithJITType.h"
+
+namespace JSC {
+
+DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock)
+    : m_codeBlock(codeBlock)
+    , m_rootCodeBlock(nullptr)
+    , m_rootJITType(JITCode::None)
+{
+}
+
+DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin)
+    : m_codeBlock(codeBlock)
+    , m_rootCodeBlock(rootCodeBlock)
+    , m_rootJITType(rootJITType)
+    , m_callerCodeOrigin(callerCodeOrigin)
+{
+}
+
+void DeferredSourceDump::dump()
+{
+    bool isInlinedFrame = !!m_rootCodeBlock;
+    if (isInlinedFrame)
+        dataLog("Inlined ");
+    else
+        dataLog("Compiled ");
+    dataLog(*m_codeBlock);
+
+    if (isInlinedFrame)
+        dataLog(" at ", CodeBlockWithJITType(m_rootCodeBlock, m_rootJITType), " ", m_callerCodeOrigin);
+
+    dataLog("\n'''");
+    m_codeBlock->dumpSource();
+    dataLog("'''\n");
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/DeferredSourceDump.h b/Source/JavaScriptCore/bytecode/DeferredSourceDump.h
new file mode 100644
index 000000000..6c9943d08
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DeferredSourceDump.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeOrigin.h"
+#include "JITCode.h"
+
+namespace JSC {
+
+class CodeBlock;
+
+class DeferredSourceDump {
+public:
+    DeferredSourceDump(CodeBlock*);
+    DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin);
+
+    void dump();
+
+private:
+    CodeBlock* m_codeBlock;
+    CodeBlock* m_rootCodeBlock;
+    JITCode::JITType m_rootJITType;
+    CodeOrigin m_callerCodeOrigin;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.cpp b/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.cpp
new file mode 100644
index 000000000..5bfef1201
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "DirectEvalCodeCache.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+void DirectEvalCodeCache::setSlow(ExecState* exec, JSCell* owner, const String& evalSource, CallSiteIndex callSiteIndex, DirectEvalExecutable* evalExecutable)
+{
+    LockHolder locker(m_lock);
+    m_cacheMap.set(CacheKey(evalSource, callSiteIndex), WriteBarrier(exec->vm(), owner, evalExecutable));
+}
+
+void DirectEvalCodeCache::clear()
+{
+    LockHolder locker(m_lock);
+    m_cacheMap.clear();
+}
+
+void DirectEvalCodeCache::visitAggregate(SlotVisitor& visitor)
+{
+    LockHolder locker(m_lock);
+    EvalCacheMap::iterator end = m_cacheMap.end();
+    for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
+        visitor.append(ptr->value);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.h b/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.h
new file mode 100644
index 000000000..e075357a8
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "DirectEvalExecutable.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+    class SlotVisitor;
+
+    class DirectEvalCodeCache {
+    public:
+        class CacheKey {
+        public:
+            CacheKey(const String& source, CallSiteIndex callSiteIndex)
+                : m_source(source.impl())
+                , m_callSiteIndex(callSiteIndex)
+            {
+            }
+
+            CacheKey(WTF::HashTableDeletedValueType)
+                : m_source(WTF::HashTableDeletedValue)
+            {
+            }
+
+            CacheKey() = default;
+
+            unsigned hash() const { return m_source->hash() ^ m_callSiteIndex.bits(); }
+
+            bool isEmptyValue() const { return !m_source; }
+
+            bool operator==(const CacheKey& other) const
+            {
+                return m_callSiteIndex == other.m_callSiteIndex && WTF::equal(m_source.get(), other.m_source.get());
+            }
+
+            bool isHashTableDeletedValue() const { return m_source.isHashTableDeletedValue(); }
+
+            struct Hash {
+                static unsigned hash(const CacheKey& key)
+                {
+                    return key.hash();
+                }
+                static bool equal(const CacheKey& lhs, const CacheKey& rhs)
+                {
+                    return lhs == rhs;
+                }
+                static const bool safeToCompareToEmptyOrDeleted = false;
+            };
+
+            typedef SimpleClassHashTraits HashTraits;
+
+        private:
+            RefPtr m_source;
+            CallSiteIndex m_callSiteIndex;
+        };
+
+        DirectEvalExecutable* tryGet(const String& evalSource, CallSiteIndex callSiteIndex)
+        {
+            return m_cacheMap.fastGet(CacheKey(evalSource, callSiteIndex)).get();
+        }
+        
+        void set(ExecState* exec, JSCell* owner, const String& evalSource, CallSiteIndex callSiteIndex, DirectEvalExecutable* evalExecutable)
+        {
+            if (m_cacheMap.size() < maxCacheEntries)
+                setSlow(exec, owner, evalSource, callSiteIndex, evalExecutable);
+        }
+
+        bool isEmpty() const { return m_cacheMap.isEmpty(); }
+
+        void visitAggregate(SlotVisitor&);
+
+        void clear();
+
+    private:
+        static const int maxCacheEntries = 64;
+
+        void setSlow(ExecState*, JSCell* owner, const String& evalSource, CallSiteIndex, DirectEvalExecutable*);
+
+        typedef HashMap, CacheKey::Hash, CacheKey::HashTraits> EvalCacheMap;
+        EvalCacheMap m_cacheMap;
+        Lock m_lock;
+    };
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/EvalCodeBlock.cpp b/Source/JavaScriptCore/bytecode/EvalCodeBlock.cpp
new file mode 100644
index 000000000..5232a0e05
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/EvalCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "EvalCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo EvalCodeBlock::s_info = {
+    "EvalCodeBlock", &Base::s_info, 0,
+    CREATE_METHOD_TABLE(EvalCodeBlock)
+};
+
+void EvalCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~EvalCodeBlock();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/EvalCodeBlock.h b/Source/JavaScriptCore/bytecode/EvalCodeBlock.h
new file mode 100644
index 000000000..fde7b1165
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/EvalCodeBlock.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GlobalCodeBlock.h"
+
+namespace JSC {
+
+class EvalCodeBlock : public GlobalCodeBlock {
+public:
+    typedef GlobalCodeBlock Base;
+    DECLARE_INFO;
+
+    static EvalCodeBlock* create(VM* vm, CopyParsedBlockTag, EvalCodeBlock& other)
+    {
+        EvalCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            EvalCodeBlock(vm, vm->evalCodeBlockStructure.get(), CopyParsedBlock, other);
+        instance->finishCreation(*vm, CopyParsedBlock, other);
+        return instance;
+    }
+
+    static EvalCodeBlock* create(VM* vm, EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, RefPtr&& sourceProvider)
+    {
+        EvalCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            EvalCodeBlock(vm, vm->evalCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider));
+        instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+        return instance;
+    }
+
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+    {
+        return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+    }
+
+    const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
+    unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
+    
+private:
+    EvalCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, EvalCodeBlock& other)
+        : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+    {
+    }
+        
+    EvalCodeBlock(VM* vm, Structure* structure, EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, RefPtr&& sourceProvider)
+        : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), 0, 1)
+    {
+    }
+    
+    static void destroy(JSCell*);
+
+private:
+    UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast(unlinkedCodeBlock()); }
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/EvalCodeCache.h b/Source/JavaScriptCore/bytecode/EvalCodeCache.h
deleted file mode 100644
index ff5911240..000000000
--- a/Source/JavaScriptCore/bytecode/EvalCodeCache.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1.  Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 2.  Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- *     its contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef EvalCodeCache_h
-#define EvalCodeCache_h
-
-#include "Executable.h"
-#include "JSGlobalObject.h"
-#include "SourceCode.h"
-#include 
-#include 
-#include 
-
-namespace JSC {
-
-    class SlotVisitor;
-
-    class EvalCodeCache {
-    public:
-        EvalExecutable* tryGet(bool inStrictContext, const String& evalSource, JSScope* scope)
-        {
-            if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && scope->begin()->isVariableObject())
-                return m_cacheMap.get(evalSource.impl()).get();
-            return 0;
-        }
-        
-        EvalExecutable* getSlow(ExecState* exec, ScriptExecutable* owner, bool inStrictContext, const String& evalSource, JSScope* scope)
-        {
-            EvalExecutable* evalExecutable = EvalExecutable::create(exec, makeSource(evalSource), inStrictContext);
-            if (!evalExecutable)
-                return 0;
-
-            if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && scope->begin()->isVariableObject() && m_cacheMap.size() < maxCacheEntries)
-                m_cacheMap.set(evalSource.impl(), WriteBarrier(exec->vm(), owner, evalExecutable));
-            
-            return evalExecutable;
-        }
-
-        bool isEmpty() const { return m_cacheMap.isEmpty(); }
-
-        void visitAggregate(SlotVisitor&);
-
-        void clear()
-        {
-            m_cacheMap.clear();
-        }
-
-    private:
-        static const unsigned maxCacheableSourceLength = 256;
-        static const int maxCacheEntries = 64;
-
-        typedef HashMap, WriteBarrier> EvalCacheMap;
-        EvalCacheMap m_cacheMap;
-    };
-
-} // namespace JSC
-
-#endif // EvalCodeCache_h
diff --git a/Source/JavaScriptCore/bytecode/ExecutableInfo.h b/Source/JavaScriptCore/bytecode/ExecutableInfo.h
new file mode 100644
index 000000000..750900ecd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExecutableInfo.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ParserModes.h"
+
+namespace JSC {
+    
+enum class DerivedContextType : uint8_t { None, DerivedConstructorContext, DerivedMethodContext };
+enum class EvalContextType    : uint8_t { None, FunctionEvalContext };
+
+// FIXME: These flags, ParserModes and propagation to XXXCodeBlocks should be reorganized.
+// https://bugs.webkit.org/show_bug.cgi?id=151547
+struct ExecutableInfo {
+    ExecutableInfo(bool usesEval, bool isStrictMode, bool isConstructor, bool isBuiltinFunction, ConstructorKind constructorKind, JSParserScriptMode scriptMode, SuperBinding superBinding, SourceParseMode parseMode, DerivedContextType derivedContextType, bool isArrowFunctionContext, bool isClassContext, EvalContextType evalContextType)
+        : m_usesEval(usesEval)
+        , m_isStrictMode(isStrictMode)
+        , m_isConstructor(isConstructor)
+        , m_isBuiltinFunction(isBuiltinFunction)
+        , m_constructorKind(static_cast(constructorKind))
+        , m_superBinding(static_cast(superBinding))
+        , m_scriptMode(static_cast(scriptMode))
+        , m_parseMode(parseMode)
+        , m_derivedContextType(static_cast(derivedContextType))
+        , m_isArrowFunctionContext(isArrowFunctionContext)
+        , m_isClassContext(isClassContext)
+        , m_evalContextType(static_cast(evalContextType))
+    {
+        ASSERT(m_constructorKind == static_cast(constructorKind));
+        ASSERT(m_superBinding == static_cast(superBinding));
+        ASSERT(m_scriptMode == static_cast(scriptMode));
+    }
+
+    bool usesEval() const { return m_usesEval; }
+    bool isStrictMode() const { return m_isStrictMode; }
+    bool isConstructor() const { return m_isConstructor; }
+    bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+    ConstructorKind constructorKind() const { return static_cast(m_constructorKind); }
+    SuperBinding superBinding() const { return static_cast(m_superBinding); }
+    JSParserScriptMode scriptMode() const { return static_cast(m_scriptMode); }
+    SourceParseMode parseMode() const { return m_parseMode; }
+    DerivedContextType derivedContextType() const { return static_cast(m_derivedContextType); }
+    EvalContextType evalContextType() const { return static_cast(m_evalContextType); }
+    bool isArrowFunctionContext() const { return m_isArrowFunctionContext; }
+    bool isClassContext() const { return m_isClassContext; }
+
+private:
+    unsigned m_usesEval : 1;
+    unsigned m_isStrictMode : 1;
+    unsigned m_isConstructor : 1;
+    unsigned m_isBuiltinFunction : 1;
+    unsigned m_constructorKind : 2;
+    unsigned m_superBinding : 1;
+    unsigned m_scriptMode: 1;
+    SourceParseMode m_parseMode;
+    unsigned m_derivedContextType : 2;
+    unsigned m_isArrowFunctionContext : 1;
+    unsigned m_isClassContext : 1;
+    unsigned m_evalContextType : 2;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
index 3a646a86a..237c0e752 100644
--- a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
+++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,21 +28,26 @@
 
 #include "CodeBlock.h"
 #include "ExecutableAllocator.h"
+#include "JSCInlines.h"
+#include "VMInlines.h"
 #include 
 
 namespace JSC {
 
-ExecutionCounter::ExecutionCounter()
+template
+ExecutionCounter::ExecutionCounter()
 {
     reset();
 }
 
-void ExecutionCounter::forceSlowPathConcurrently()
+template
+void ExecutionCounter::forceSlowPathConcurrently()
 {
     m_counter = 0;
 }
 
-bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock)
+template
+bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock)
 {
     if (hasCrossedThreshold(codeBlock))
         return true;
@@ -53,26 +58,28 @@ bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock)
     return false;
 }
 
-void ExecutionCounter::setNewThreshold(int32_t threshold, CodeBlock* codeBlock)
+template
+void ExecutionCounter::setNewThreshold(int32_t threshold, CodeBlock* codeBlock)
 {
     reset();
     m_activeThreshold = threshold;
     setThreshold(codeBlock);
 }
 
-void ExecutionCounter::deferIndefinitely()
+template
+void ExecutionCounter::deferIndefinitely()
 {
     m_totalCount = 0;
     m_activeThreshold = std::numeric_limits::max();
     m_counter = std::numeric_limits::min();
 }
 
-double ExecutionCounter::applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock)
+double applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock)
 {
 #if ENABLE(JIT)
     double multiplier =
         ExecutableAllocator::memoryPressureMultiplier(
-            codeBlock->predictedMachineCodeSize());
+            codeBlock->baselineAlternative()->predictedMachineCodeSize());
 #else
     // This code path will probably not be taken, but if it is, we fake it.
     double multiplier = 1.0;
@@ -82,8 +89,7 @@ double ExecutionCounter::applyMemoryUsageHeuristics(int32_t value, CodeBlock* co
     return multiplier * value;
 }
 
-int32_t ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
-    int32_t value, CodeBlock* codeBlock)
+int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock* codeBlock)
 {
     double doubleResult = applyMemoryUsageHeuristics(value, codeBlock);
     
@@ -95,7 +101,8 @@ int32_t ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
     return static_cast(doubleResult);
 }
 
-bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const
+template
+bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const
 {
     // This checks if the current count rounded up to the threshold we were targeting.
     // For example, if we are using half of available executable memory and have
@@ -117,20 +124,25 @@ bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const
     
     double modifiedThreshold = applyMemoryUsageHeuristics(m_activeThreshold, codeBlock);
     
-    return static_cast(m_totalCount) + m_counter >=
-        modifiedThreshold - static_cast(
-            std::min(m_activeThreshold, Options::maximumExecutionCountsBetweenCheckpoints())) / 2;
+    double actualCount = static_cast(m_totalCount) + m_counter;
+    double desiredCount = modifiedThreshold - static_cast(
+        std::min(m_activeThreshold, maximumExecutionCountsBetweenCheckpoints())) / 2;
+    
+    bool result = actualCount >= desiredCount;
+    
+    CODEBLOCK_LOG_EVENT(codeBlock, "thresholdCheck", ("activeThreshold = ", m_activeThreshold, ", modifiedThreshold = ", modifiedThreshold, ", actualCount = ", actualCount, ", desiredCount = ", desiredCount));
+    
+    return result;
 }
 
-bool ExecutionCounter::setThreshold(CodeBlock* codeBlock)
+template
+bool ExecutionCounter::setThreshold(CodeBlock* codeBlock)
 {
     if (m_activeThreshold == std::numeric_limits::max()) {
         deferIndefinitely();
         return false;
     }
         
-    ASSERT(!m_activeThreshold || !hasCrossedThreshold(codeBlock));
-        
     // Compute the true total count.
     double trueTotalCount = count();
     
@@ -159,17 +171,22 @@ bool ExecutionCounter::setThreshold(CodeBlock* codeBlock)
     return false;
 }
 
-void ExecutionCounter::reset()
+template
+void ExecutionCounter::reset()
 {
     m_counter = 0;
     m_totalCount = 0;
     m_activeThreshold = 0;
 }
 
-void ExecutionCounter::dump(PrintStream& out) const
+template
+void ExecutionCounter::dump(PrintStream& out) const
 {
     out.printf("%lf/%lf, %d", count(), static_cast(m_activeThreshold), m_counter);
 }
 
+template class ExecutionCounter;
+template class ExecutionCounter;
+
 } // namespace JSC
 
diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.h b/Source/JavaScriptCore/bytecode/ExecutionCounter.h
index a7346691d..f78a9123c 100644
--- a/Source/JavaScriptCore/bytecode/ExecutionCounter.h
+++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,18 +23,35 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ExecutionCounter_h
-#define ExecutionCounter_h
+#pragma once
 
 #include "JSGlobalObject.h"
 #include "Options.h"
 #include 
-#include 
 
 namespace JSC {
 
 class CodeBlock;
 
+enum CountingVariant {
+    CountingForBaseline,
+    CountingForUpperTiers
+};
+
+double applyMemoryUsageHeuristics(int32_t value, CodeBlock*);
+int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*);
+
+inline int32_t formattedTotalExecutionCount(float value)
+{
+    union {
+        int32_t i;
+        float f;
+    } u;
+    u.f = value;
+    return u.i;
+}
+    
+template
 class ExecutionCounter {
 public:
     ExecutionCounter();
@@ -44,31 +61,33 @@ public:
     void deferIndefinitely();
     double count() const { return static_cast(m_totalCount) + m_counter; }
     void dump(PrintStream&) const;
-    static double applyMemoryUsageHeuristics(int32_t value, CodeBlock*);
-    static int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*);
+    
+    static int32_t maximumExecutionCountsBetweenCheckpoints()
+    {
+        switch (countingVariant) {
+        case CountingForBaseline:
+            return Options::maximumExecutionCountsBetweenCheckpointsForBaseline();
+        case CountingForUpperTiers:
+            return Options::maximumExecutionCountsBetweenCheckpointsForUpperTiers();
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return 0;
+        }
+    }
+    
     template
     static T clippedThreshold(JSGlobalObject* globalObject, T threshold)
     {
         int32_t maxThreshold;
         if (Options::randomizeExecutionCountsBetweenCheckpoints())
-            maxThreshold = globalObject->weakRandomInteger() % Options::maximumExecutionCountsBetweenCheckpoints();
+            maxThreshold = globalObject->weakRandomInteger() % maximumExecutionCountsBetweenCheckpoints();
         else
-            maxThreshold = Options::maximumExecutionCountsBetweenCheckpoints();
+            maxThreshold = maximumExecutionCountsBetweenCheckpoints();
         if (threshold > maxThreshold)
             threshold = maxThreshold;
         return threshold;
     }
 
-    static int32_t formattedTotalCount(float value)
-    {
-        union {
-            int32_t i;
-            float f;
-        } u;
-        u.f = value;
-        return u.i;
-    }
-    
 private:
     bool hasCrossedThreshold(CodeBlock*) const;
     bool setThreshold(CodeBlock*);
@@ -89,12 +108,12 @@ public:
     // m_counter.
     float m_totalCount;
 
-    // This is the threshold we were originally targetting, without any correction for
+    // This is the threshold we were originally targeting, without any correction for
     // the memory usage heuristics.
     int32_t m_activeThreshold;
 };
 
-} // namespace JSC
-
-#endif // ExecutionCounter_h
+typedef ExecutionCounter BaselineExecutionCounter;
+typedef ExecutionCounter UpperTierExecutionCounter;
 
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ExitKind.cpp b/Source/JavaScriptCore/bytecode/ExitKind.cpp
index 350aa5857..f1ea76d38 100644
--- a/Source/JavaScriptCore/bytecode/ExitKind.cpp
+++ b/Source/JavaScriptCore/bytecode/ExitKind.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -38,20 +38,20 @@ const char* exitKindToString(ExitKind kind)
         return "Unset";
     case BadType:
         return "BadType";
-    case BadFunction:
-        return "BadFunction";
+    case BadCell:
+        return "BadCell";
+    case BadIdent:
+        return "BadIdent";
     case BadExecutable:
         return "BadExecutable";
     case BadCache:
         return "BadCache";
-    case BadCacheWatchpoint:
-        return "BadCacheWatchpoint";
-    case BadWeakConstantCache:
-        return "BadWeakConstantCache";
-    case BadWeakConstantCacheWatchpoint:
-        return "BadWeakConstantCacheWatchpoint";
+    case BadConstantCache:
+        return "BadConstantCache";
     case BadIndexingType:
         return "BadIndexingType";
+    case BadTypeInfoFlags:
+        return "BadTypeInfoFlags";
     case Overflow:
         return "Overflow";
     case NegativeZero:
@@ -68,38 +68,45 @@ const char* exitKindToString(ExitKind kind)
         return "InadequateCoverage";
     case ArgumentsEscaped:
         return "ArgumentsEscaped";
+    case ExoticObjectMode:
+        return "ExoticObjectMode";
     case NotStringObject:
         return "NotStringObject";
+    case VarargsOverflow:
+        return "VarargsOverflow";
+    case TDZFailure:
+        return "TDZFailure";
+    case HoistingFailed:
+        return "HoistingFailed";
     case Uncountable:
         return "Uncountable";
-    case UncountableWatchpoint:
-        return "UncountableWatchpoint";
     case UncountableInvalidation:
         return "UncountableInvalidation";
     case WatchdogTimerFired:
         return "WatchdogTimerFired";
     case DebuggerEvent:
         return "DebuggerEvent";
+    case ExceptionCheck:
+        return "ExceptionCheck";
+    case GenericUnwind:
+        return "GenericUnwind";
     }
     RELEASE_ASSERT_NOT_REACHED();
     return "Unknown";
 }
 
-bool exitKindIsCountable(ExitKind kind)
+bool exitKindMayJettison(ExitKind kind)
 {
     switch (kind) {
-    case ExitKindUnset:
-        RELEASE_ASSERT_NOT_REACHED();
-    case BadType:
-    case Uncountable:
-    case UncountableWatchpoint:
-    case LoadFromHole: // Already counted directly by the baseline JIT.
-    case StoreToHole: // Already counted directly by the baseline JIT.
-    case OutOfBounds: // Already counted directly by the baseline JIT.
+    case ExceptionCheck:
+    case GenericUnwind:
         return false;
     default:
         return true;
     }
+
+    RELEASE_ASSERT_NOT_REACHED();
+    return false;
 }
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ExitKind.h b/Source/JavaScriptCore/bytecode/ExitKind.h
index a9f6df6d4..a6c2e0ea2 100644
--- a/Source/JavaScriptCore/bytecode/ExitKind.h
+++ b/Source/JavaScriptCore/bytecode/ExitKind.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,21 +23,20 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ExitKind_h
-#define ExitKind_h
+#pragma once
 
 namespace JSC {
 
-enum ExitKind {
+enum ExitKind : uint8_t {
     ExitKindUnset,
     BadType, // We exited because a type prediction was wrong.
-    BadFunction, // We exited because we made an incorrect assumption about what function we would see.
+    BadCell, // We exited because we made an incorrect assumption about what cell we would see. Usually used for function checks.
+    BadIdent, // We exited because we made an incorrect assumption about what identifier we would see. Usually used for cached Id check in get_by_val.
     BadExecutable, // We exited because we made an incorrect assumption about what executable we would see.
     BadCache, // We exited because an inline cache was wrong.
-    BadWeakConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong.
-    BadCacheWatchpoint, // Same as BadCache but from a watchpoint.
-    BadWeakConstantCacheWatchpoint, // Same as BadWeakConstantCache but from a watchpoint.
+    BadConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong.
     BadIndexingType, // We exited because an indexing type was wrong.
+    BadTypeInfoFlags, // We exited because we made an incorrect assumption about what TypeInfo flags we would see.
     Overflow, // We exited because of overflow.
     NegativeZero, // We exited because we encountered negative zero.
     Int52Overflow, // We exited because of an Int52 overflow.
@@ -46,28 +45,21 @@ enum ExitKind {
     OutOfBounds, // We had an out-of-bounds access to an array.
     InadequateCoverage, // We exited because we ended up in code that didn't have profiling coverage.
     ArgumentsEscaped, // We exited because arguments escaped but we didn't expect them to.
+    ExoticObjectMode, // We exited because some exotic object that we were accessing was in an exotic mode (like Arguments with slow arguments).
     NotStringObject, // We exited because we shouldn't have attempted to optimize string object access.
+    VarargsOverflow, // We exited because a varargs call passed more arguments than we expected.
+    TDZFailure, // We exited because we were in the TDZ and accessed the variable.
+    HoistingFailed, // Something that was hoisted exited. So, assume that hoisting is a bad idea.
     Uncountable, // We exited for none of the above reasons, and we should not count it. Most uses of this should be viewed as a FIXME.
     UncountableInvalidation, // We exited because the code block was invalidated; this means that we've already counted the reasons why the code block was invalidated.
-    UncountableWatchpoint, // We exited because of a watchpoint, which isn't counted because watchpoints do tracking themselves.
     WatchdogTimerFired, // We exited because we need to service the watchdog timer.
-    DebuggerEvent // We exited because we need to service the debugger.
+    DebuggerEvent, // We exited because we need to service the debugger.
+    ExceptionCheck, // We exited because a direct exception check showed that we threw an exception from a C call.
+    GenericUnwind, // We exited because a we arrived at this OSR exit from genericUnwind.
 };
 
 const char* exitKindToString(ExitKind);
-bool exitKindIsCountable(ExitKind);
-
-inline bool isWatchpoint(ExitKind kind)
-{
-    switch (kind) {
-    case BadCacheWatchpoint:
-    case BadWeakConstantCacheWatchpoint:
-    case UncountableWatchpoint:
-        return true;
-    default:
-        return false;
-    }
-}
+bool exitKindMayJettison(ExitKind);
 
 } // namespace JSC
 
@@ -77,6 +69,3 @@ class PrintStream;
 void printInternal(PrintStream&, JSC::ExitKind);
 
 } // namespace WTF
-
-#endif // ExitKind_h
-
diff --git a/Source/JavaScriptCore/bytecode/ExitingJITType.cpp b/Source/JavaScriptCore/bytecode/ExitingJITType.cpp
new file mode 100644
index 000000000..aa8f120b6
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExitingJITType.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ExitingJITType.h"
+
+#include 
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, ExitingJITType type)
+{
+    switch (type) {
+    case ExitFromAnything:
+        out.print("FromAnything");
+        return;
+    case ExitFromDFG:
+        out.print("FromDFG");
+        return;
+    case ExitFromFTL:
+        out.print("FromFTL");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/ExitingJITType.h b/Source/JavaScriptCore/bytecode/ExitingJITType.h
new file mode 100644
index 000000000..dfbfee4aa
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExitingJITType.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JITCode.h"
+
+namespace JSC {
+
+enum ExitingJITType : uint8_t {
+    ExitFromAnything,
+    ExitFromDFG,
+    ExitFromFTL
+};
+
+inline ExitingJITType exitingJITTypeFor(JITCode::JITType type)
+{
+    switch (type) {
+    case JITCode::DFGJIT:
+        return ExitFromDFG;
+    case JITCode::FTLJIT:
+        return ExitFromFTL;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return ExitFromAnything;
+    }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::ExitingJITType);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h b/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h
index 855738aec..8f83527ff 100644
--- a/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h
+++ b/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2013, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ExpressionRangeInfo_h
-#define ExpressionRangeInfo_h
+#pragma once
 
 #include 
 
@@ -86,13 +85,13 @@ struct ExpressionRangeInfo {
         position = ((line & FatColumnModeLineMask) << FatColumnModeLineShift | (column & FatColumnModeColumnMask));
     }
 
-    void decodeFatLineMode(unsigned& line, unsigned& column)
+    void decodeFatLineMode(unsigned& line, unsigned& column) const
     {
         line = (position >> FatLineModeLineShift) & FatLineModeLineMask;
         column = position & FatLineModeColumnMask;
     }
 
-    void decodeFatColumnMode(unsigned& line, unsigned& column)
+    void decodeFatColumnMode(unsigned& line, unsigned& column) const
     {
         line = (position >> FatColumnModeLineShift) & FatColumnModeLineMask;
         column = position & FatColumnModeColumnMask;
@@ -107,6 +106,3 @@ struct ExpressionRangeInfo {
 };
 
 } // namespace JSC
-
-#endif // ExpressionRangeInfo_h
-
diff --git a/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h b/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
index d34392121..073ce2757 100644
--- a/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
+++ b/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef FullBytecodeLiveness_h
-#define FullBytecodeLiveness_h
+#pragma once
 
 #include 
 
@@ -35,36 +34,22 @@ class BytecodeLivenessAnalysis;
 typedef HashMap, WTF::UnsignedWithZeroKeyHashTraits> BytecodeToBitmapMap;
 
 class FullBytecodeLiveness {
+    WTF_MAKE_FAST_ALLOCATED;
 public:
-    FullBytecodeLiveness() : m_codeBlock(0) { }
-    
-    // We say "out" to refer to the bitvector that contains raw results for a bytecode
-    // instruction.
-    const FastBitVector& getOut(unsigned bytecodeIndex) const
+    const FastBitVector& getLiveness(unsigned bytecodeIndex) const
     {
-        BytecodeToBitmapMap::const_iterator iter = m_map.find(bytecodeIndex);
-        ASSERT(iter != m_map.end());
-        return iter->value;
+        return m_map[bytecodeIndex];
     }
     
     bool operandIsLive(int operand, unsigned bytecodeIndex) const
     {
-        return operandIsAlwaysLive(m_codeBlock, operand) || operandThatIsNotAlwaysLiveIsLive(m_codeBlock, getOut(bytecodeIndex), operand);
-    }
-    
-    FastBitVector getLiveness(unsigned bytecodeIndex) const
-    {
-        return getLivenessInfo(m_codeBlock, getOut(bytecodeIndex));
+        return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(getLiveness(bytecodeIndex), operand);
     }
     
 private:
     friend class BytecodeLivenessAnalysis;
     
-    CodeBlock* m_codeBlock;
-    BytecodeToBitmapMap m_map;
+    Vector m_map;
 };
 
 } // namespace JSC
-
-#endif // FullBytecodeLiveness_h
-
diff --git a/Source/JavaScriptCore/bytecode/FunctionCodeBlock.cpp b/Source/JavaScriptCore/bytecode/FunctionCodeBlock.cpp
new file mode 100644
index 000000000..56eadc62d
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/FunctionCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FunctionCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo FunctionCodeBlock::s_info = {
+    "FunctionCodeBlock", &Base::s_info, 0,
+    CREATE_METHOD_TABLE(FunctionCodeBlock)
+};
+
+void FunctionCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~FunctionCodeBlock();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/FunctionCodeBlock.h b/Source/JavaScriptCore/bytecode/FunctionCodeBlock.h
new file mode 100644
index 000000000..4f58d0911
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/FunctionCodeBlock.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "UnlinkedFunctionCodeBlock.h"
+
+namespace JSC {
+
+class FunctionCodeBlock : public CodeBlock {
+public:
+    typedef CodeBlock Base;
+    DECLARE_INFO;
+
+    static FunctionCodeBlock* create(VM* vm, CopyParsedBlockTag, FunctionCodeBlock& other)
+    {
+        FunctionCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            FunctionCodeBlock(vm, vm->functionCodeBlockStructure.get(), CopyParsedBlock, other);
+        instance->finishCreation(*vm, CopyParsedBlock, other);
+        return instance;
+    }
+
+    static FunctionCodeBlock* create(VM* vm, FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope,
+        RefPtr&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+    {
+        FunctionCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            FunctionCodeBlock(vm, vm->functionCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), sourceOffset, firstLineColumnOffset);
+        instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+        return instance;
+    }
+
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+    {
+        return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+    }
+
+private:
+    FunctionCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, FunctionCodeBlock& other)
+        : CodeBlock(vm, structure, CopyParsedBlock, other)
+    {
+    }
+
+    FunctionCodeBlock(VM* vm, Structure* structure, FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope,
+        RefPtr&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+        : CodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), sourceOffset, firstLineColumnOffset)
+    {
+    }
+    
+    static void destroy(JSCell*);
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
index fbb3da1a5..1537cd9b1 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -27,279 +27,432 @@
 #include "GetByIdStatus.h"
 
 #include "CodeBlock.h"
+#include "ComplexGetStatus.h"
+#include "GetterSetterAccessCase.h"
+#include "IntrinsicGetterAccessCase.h"
+#include "JSCInlines.h"
 #include "JSScope.h"
 #include "LLIntData.h"
 #include "LowLevelInterpreter.h"
-#include "Operations.h"
+#include "ModuleNamespaceAccessCase.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+#include 
 
 namespace JSC {
+namespace DOMJIT {
+class GetterSetter;
+}
+
+bool GetByIdStatus::appendVariant(const GetByIdVariant& variant)
+{
+    // Attempt to merge this variant with an already existing variant.
+    for (unsigned i = 0; i < m_variants.size(); ++i) {
+        if (m_variants[i].attemptToMerge(variant))
+            return true;
+    }
+    
+    // Make sure there is no overlap. We should have pruned out opportunities for
+    // overlap but it's possible that an inline cache got into a weird state. We are
+    // defensive and bail if we detect crazy.
+    for (unsigned i = 0; i < m_variants.size(); ++i) {
+        if (m_variants[i].structureSet().overlaps(variant.structureSet()))
+            return false;
+    }
+    
+    m_variants.append(variant);
+    return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool GetByIdStatus::hasExitSite(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+    return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+        || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+}
+#endif
 
-GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
 {
     UNUSED_PARAM(profiledBlock);
     UNUSED_PARAM(bytecodeIndex);
     UNUSED_PARAM(uid);
-#if ENABLE(LLINT)
-    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+
+    VM& vm = *profiledBlock->vm();
     
-    if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length))
+    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+
+    Opcode opcode = instruction[0].u.opcode;
+
+    ASSERT(opcode == LLInt::getOpcode(op_get_array_length) || opcode == LLInt::getOpcode(op_try_get_by_id) || opcode == LLInt::getOpcode(op_get_by_id_proto_load) || opcode == LLInt::getOpcode(op_get_by_id) || opcode == LLInt::getOpcode(op_get_by_id_unset));
+
+    // FIXME: We should not just bail if we see a try_get_by_id or a get_by_id_proto_load.
+    // https://bugs.webkit.org/show_bug.cgi?id=158039
+    if (opcode != LLInt::getOpcode(op_get_by_id))
         return GetByIdStatus(NoInformation, false);
 
-    Structure* structure = instruction[4].u.structure.get();
-    if (!structure)
+    StructureID structureID = instruction[4].u.structureID;
+    if (!structureID)
         return GetByIdStatus(NoInformation, false);
 
+    Structure* structure = vm.heap.structureIDTable().get(structureID);
+
     if (structure->takesSlowPathInDFGForImpureProperty())
         return GetByIdStatus(NoInformation, false);
 
-    unsigned attributesIgnored;
-    JSCell* specificValue;
-    PropertyOffset offset = structure->getConcurrently(
-        *profiledBlock->vm(), uid, attributesIgnored, specificValue);
-    if (structure->isDictionary())
-        specificValue = 0;
+    unsigned attributes;
+    PropertyOffset offset = structure->getConcurrently(uid, attributes);
     if (!isValidOffset(offset))
         return GetByIdStatus(NoInformation, false);
+    if (attributes & CustomAccessor)
+        return GetByIdStatus(NoInformation, false);
     
-    return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue);
-#else
-    return GetByIdStatus(NoInformation, false);
-#endif
+    return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset));
 }
 
-void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
 {
-#if ENABLE(JIT)
-    // Validate the chain. If the chain is invalid, then currently the best thing
-    // we can do is to assume that TakesSlow is true. In the future, it might be
-    // worth exploring reifying the structure chain from the structure we've got
-    // instead of using the one from the cache, since that will do the right things
-    // if the structure chain has changed. But that may be harder, because we may
-    // then end up having a different type of access altogether. And it currently
-    // does not appear to be worth it to do so -- effectively, the heuristic we
-    // have now is that if the structure chain has changed between when it was
-    // cached on in the baseline JIT and when the DFG tried to inline the access,
-    // then we fall back on a polymorphic access.
-    if (!result.m_chain->isStillValid())
-        return;
+    ConcurrentJSLocker locker(profiledBlock->m_lock);
 
-    if (result.m_chain->head()->takesSlowPathInDFGForImpureProperty())
-        return;
-    size_t chainSize = result.m_chain->size();
-    for (size_t i = 0; i < chainSize; i++) {
-        if (result.m_chain->at(i)->takesSlowPathInDFGForImpureProperty())
-            return;
-    }
+    GetByIdStatus result;
 
-    JSObject* currentObject = result.m_chain->terminalPrototype();
-    Structure* currentStructure = result.m_chain->last();
+#if ENABLE(DFG_JIT)
+    result = computeForStubInfoWithoutExitSiteFeedback(
+        locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid,
+        CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
     
-    ASSERT_UNUSED(currentObject, currentObject);
-        
-    unsigned attributesIgnored;
-    JSCell* specificValue;
-        
-    result.m_offset = currentStructure->getConcurrently(
-        *profiledBlock->vm(), uid, attributesIgnored, specificValue);
-    if (currentStructure->isDictionary())
-        specificValue = 0;
-    if (!isValidOffset(result.m_offset))
-        return;
-        
-    result.m_structureSet.add(result.m_chain->head());
-    result.m_specificValue = JSValue(specificValue);
+    if (!result.takesSlowPath()
+        && hasExitSite(locker, profiledBlock, bytecodeIndex))
+        return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true);
 #else
-    UNUSED_PARAM(result);
-    UNUSED_PARAM(profiledBlock);
-    UNUSED_PARAM(uid);
-    UNREACHABLE_FOR_PLATFORM();
+    UNUSED_PARAM(map);
 #endif
+
+    if (!result)
+        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+    
+    return result;
 }
 
-GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
+#if ENABLE(DFG_JIT)
+GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
 {
-    ConcurrentJITLocker locker(profiledBlock->m_lock);
-    
-    UNUSED_PARAM(profiledBlock);
-    UNUSED_PARAM(bytecodeIndex);
-    UNUSED_PARAM(uid);
+    GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
+        locker, profiledBlock, stubInfo, uid,
+        CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex));
+
+    if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+        return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true);
+    return result;
+}
+#endif // ENABLE(DFG_JIT)
+
 #if ENABLE(JIT)
-    StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
-    if (!stubInfo || !stubInfo->seen)
-        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
-    
-    if (stubInfo->resetByGC)
-        return GetByIdStatus(TakesSlowPath, true);
-
-    PolymorphicAccessStructureList* list;
-    int listSize;
-    switch (stubInfo->accessType) {
-    case access_get_by_id_self_list:
-        list = stubInfo->u.getByIdSelfList.structureList;
-        listSize = stubInfo->u.getByIdSelfList.listSize;
-        break;
-    case access_get_by_id_proto_list:
-        list = stubInfo->u.getByIdProtoList.structureList;
-        listSize = stubInfo->u.getByIdProtoList.listSize;
-        break;
-    default:
-        list = 0;
-        listSize = 0;
-        break;
-    }
-    for (int i = 0; i < listSize; ++i) {
-        if (!list->list[i].isDirect)
-            return GetByIdStatus(MakesCalls, true);
+GetByIdStatus::GetByIdStatus(const ModuleNamespaceAccessCase& accessCase)
+    : m_state(ModuleNamespace)
+    , m_wasSeenInJIT(true)
+    , m_moduleNamespaceObject(accessCase.moduleNamespaceObject())
+    , m_moduleEnvironment(accessCase.moduleEnvironment())
+    , m_scopeOffset(accessCase.scopeOffset())
+{
+}
+
+GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
+    const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid,
+    CallLinkStatus::ExitSiteData callExitSiteData)
+{
+    if (!stubInfo || !stubInfo->everConsidered)
+        return GetByIdStatus(NoInformation);
+
+    PolymorphicAccess* list = 0;
+    State slowPathState = TakesSlowPath;
+    if (stubInfo->cacheType == CacheType::Stub) {
+        list = stubInfo->u.stub;
+        for (unsigned i = 0; i < list->size(); ++i) {
+            const AccessCase& access = list->at(i);
+            if (access.doesCalls())
+                slowPathState = MakesCalls;
+        }
     }
     
-    // Next check if it takes slow case, in which case we want to be kind of careful.
-    if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
-        return GetByIdStatus(TakesSlowPath, true);
+    if (stubInfo->tookSlowPath)
+        return GetByIdStatus(slowPathState);
     
     // Finally figure out if we can derive an access strategy.
     GetByIdStatus result;
+    result.m_state = Simple;
     result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
-    switch (stubInfo->accessType) {
-    case access_unset:
-        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+    switch (stubInfo->cacheType) {
+    case CacheType::Unset:
+        return GetByIdStatus(NoInformation);
         
-    case access_get_by_id_self: {
-        Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
+    case CacheType::GetByIdSelf: {
+        Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get();
         if (structure->takesSlowPathInDFGForImpureProperty())
-            return GetByIdStatus(TakesSlowPath, true);
-        unsigned attributesIgnored;
-        JSCell* specificValue;
-        result.m_offset = structure->getConcurrently(
-            *profiledBlock->vm(), uid, attributesIgnored, specificValue);
-        if (structure->isDictionary())
-            specificValue = 0;
+            return GetByIdStatus(slowPathState, true);
+        unsigned attributes;
+        GetByIdVariant variant;
+        variant.m_offset = structure->getConcurrently(uid, attributes);
+        if (!isValidOffset(variant.m_offset))
+            return GetByIdStatus(slowPathState, true);
+        if (attributes & CustomAccessor)
+            return GetByIdStatus(slowPathState, true);
         
-        if (isValidOffset(result.m_offset)) {
-            result.m_structureSet.add(structure);
-            result.m_specificValue = JSValue(specificValue);
-        }
-        
-        if (isValidOffset(result.m_offset))
-            ASSERT(result.m_structureSet.size());
-        break;
+        variant.m_structureSet.add(structure);
+        bool didAppend = result.appendVariant(variant);
+        ASSERT_UNUSED(didAppend, didAppend);
+        return result;
     }
         
-    case access_get_by_id_self_list: {
-        for (int i = 0; i < listSize; ++i) {
-            ASSERT(list->list[i].isDirect);
-            
-            Structure* structure = list->list[i].base.get();
-            if (structure->takesSlowPathInDFGForImpureProperty())
-                return GetByIdStatus(TakesSlowPath, true);
+    case CacheType::Stub: {
+        if (list->size() == 1) {
+            const AccessCase& access = list->at(0);
+            switch (access.type()) {
+            case AccessCase::ModuleNamespaceLoad:
+                return GetByIdStatus(access.as());
+            default:
+                break;
+            }
+        }
 
-            if (result.m_structureSet.contains(structure))
-                continue;
-            
-            unsigned attributesIgnored;
-            JSCell* specificValue;
-            PropertyOffset myOffset = structure->getConcurrently(
-                *profiledBlock->vm(), uid, attributesIgnored, specificValue);
-            if (structure->isDictionary())
-                specificValue = 0;
+        for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
+            const AccessCase& access = list->at(listIndex);
+            if (access.viaProxy())
+                return GetByIdStatus(slowPathState, true);
             
-            if (!isValidOffset(myOffset)) {
-                result.m_offset = invalidOffset;
-                break;
+            Structure* structure = access.structure();
+            if (!structure) {
+                // The null structure cases arise due to array.length and string.length. We have no way
+                // of creating a GetByIdVariant for those, and we don't really have to since the DFG
+                // handles those cases in FixupPhase using value profiling. That's a bit awkward - we
+                // shouldn't have to use value profiling to discover something that the AccessCase
+                // could have told us. But, it works well enough. So, our only concern here is to not
+                // crash on null structure.
+                return GetByIdStatus(slowPathState, true);
             }
-                    
-            if (!i) {
-                result.m_offset = myOffset;
-                result.m_specificValue = JSValue(specificValue);
-            } else if (result.m_offset != myOffset) {
-                result.m_offset = invalidOffset;
-                break;
-            } else if (result.m_specificValue != JSValue(specificValue))
-                result.m_specificValue = JSValue();
             
-            result.m_structureSet.add(structure);
+            ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+                structure, access.conditionSet(), uid);
+             
+            switch (complexGetStatus.kind()) {
+            case ComplexGetStatus::ShouldSkip:
+                continue;
+                 
+            case ComplexGetStatus::TakesSlowPath:
+                return GetByIdStatus(slowPathState, true);
+                 
+            case ComplexGetStatus::Inlineable: {
+                std::unique_ptr callLinkStatus;
+                JSFunction* intrinsicFunction = nullptr;
+                DOMJIT::GetterSetter* domJIT = nullptr;
+
+                switch (access.type()) {
+                case AccessCase::Load:
+                case AccessCase::GetGetter:
+                case AccessCase::Miss: {
+                    break;
+                }
+                case AccessCase::IntrinsicGetter: {
+                    intrinsicFunction = access.as().intrinsicFunction();
+                    break;
+                }
+                case AccessCase::Getter: {
+                    callLinkStatus = std::make_unique();
+                    if (CallLinkInfo* callLinkInfo = access.as().callLinkInfo()) {
+                        *callLinkStatus = CallLinkStatus::computeFor(
+                            locker, profiledBlock, *callLinkInfo, callExitSiteData);
+                    }
+                    break;
+                }
+                case AccessCase::CustomAccessorGetter: {
+                    domJIT = access.as().domJIT();
+                    if (!domJIT)
+                        return GetByIdStatus(slowPathState, true);
+                    result.m_state = Custom;
+                    break;
+                }
+                default: {
+                    // FIXME: It would be totally sweet to support more of these at some point in the
+                    // future. https://bugs.webkit.org/show_bug.cgi?id=133052
+                    return GetByIdStatus(slowPathState, true);
+                } }
+
+                ASSERT((AccessCase::Miss == access.type()) == (access.offset() == invalidOffset));
+                GetByIdVariant variant(
+                    StructureSet(structure), complexGetStatus.offset(),
+                    complexGetStatus.conditionSet(), WTFMove(callLinkStatus),
+                    intrinsicFunction,
+                    domJIT);
+
+                if (!result.appendVariant(variant))
+                    return GetByIdStatus(slowPathState, true);
+
+                if (domJIT) {
+                    // Give up when cutom accesses are not merged into one.
+                    if (result.numVariants() != 1)
+                        return GetByIdStatus(slowPathState, true);
+                } else {
+                    // Give up when custom access and simple access are mixed.
+                    if (result.m_state == Custom)
+                        return GetByIdStatus(slowPathState, true);
+                }
+                break;
+            } }
         }
-                    
-        if (isValidOffset(result.m_offset))
-            ASSERT(result.m_structureSet.size());
-        break;
-    }
         
-    case access_get_by_id_proto: {
-        if (!stubInfo->u.getByIdProto.isDirect)
-            return GetByIdStatus(MakesCalls, true);
-        result.m_chain = adoptRef(new IntendedStructureChain(
-            profiledBlock,
-            stubInfo->u.getByIdProto.baseObjectStructure.get(),
-            stubInfo->u.getByIdProto.prototypeStructure.get()));
-        computeForChain(result, profiledBlock, uid);
-        break;
-    }
-        
-    case access_get_by_id_chain: {
-        if (!stubInfo->u.getByIdChain.isDirect)
-            return GetByIdStatus(MakesCalls, true);
-        result.m_chain = adoptRef(new IntendedStructureChain(
-            profiledBlock,
-            stubInfo->u.getByIdChain.baseObjectStructure.get(),
-            stubInfo->u.getByIdChain.chain.get(),
-            stubInfo->u.getByIdChain.count));
-        computeForChain(result, profiledBlock, uid);
-        break;
+        return result;
     }
         
     default:
-        ASSERT(!isValidOffset(result.m_offset));
-        break;
+        return GetByIdStatus(slowPathState, true);
     }
     
-    if (!isValidOffset(result.m_offset)) {
-        result.m_state = TakesSlowPath;
-        result.m_structureSet.clear();
-        result.m_chain.clear();
-        result.m_specificValue = JSValue();
-    } else
-        result.m_state = Simple;
-    
-    return result;
-#else // ENABLE(JIT)
-    UNUSED_PARAM(map);
-    return GetByIdStatus(NoInformation, false);
+    RELEASE_ASSERT_NOT_REACHED();
+    return GetByIdStatus();
+}
 #endif // ENABLE(JIT)
+
+GetByIdStatus GetByIdStatus::computeFor(
+    CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap,
+    StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+#if ENABLE(DFG_JIT)
+    if (dfgBlock) {
+        CallLinkStatus::ExitSiteData exitSiteData;
+        {
+            ConcurrentJSLocker locker(profiledBlock->m_lock);
+            exitSiteData = CallLinkStatus::computeExitSiteData(
+                locker, profiledBlock, codeOrigin.bytecodeIndex);
+        }
+        
+        GetByIdStatus result;
+        {
+            ConcurrentJSLocker locker(dfgBlock->m_lock);
+            result = computeForStubInfoWithoutExitSiteFeedback(
+                locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+        }
+
+        if (result.takesSlowPath())
+            return result;
+    
+        {
+            ConcurrentJSLocker locker(profiledBlock->m_lock);
+            if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+                return GetByIdStatus(TakesSlowPath, true);
+        }
+        
+        if (result.isSet())
+            return result;
+    }
+#else
+    UNUSED_PARAM(dfgBlock);
+    UNUSED_PARAM(dfgMap);
+#endif
+
+    return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
 }
 
-GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid)
 {
     // For now we only handle the super simple self access case. We could handle the
     // prototype case in the future.
     
-    if (!structure)
-        return GetByIdStatus(TakesSlowPath);
+    if (set.isEmpty())
+        return GetByIdStatus();
 
-    if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
-        return GetByIdStatus(TakesSlowPath);
-    
-    if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+    if (parseIndex(*uid))
         return GetByIdStatus(TakesSlowPath);
     
-    if (!structure->propertyAccessesAreCacheable())
-        return GetByIdStatus(TakesSlowPath);
-
     GetByIdStatus result;
-    result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, StringImpl*) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically.
-    unsigned attributes;
-    JSCell* specificValue;
-    result.m_offset = structure->getConcurrently(vm, uid, attributes, specificValue);
-    if (!isValidOffset(result.m_offset))
-        return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
-    if (attributes & Accessor)
-        return GetByIdStatus(MakesCalls);
-    if (structure->isDictionary())
-        specificValue = 0;
-    result.m_structureSet.add(structure);
-    result.m_specificValue = JSValue(specificValue);
     result.m_state = Simple;
+    result.m_wasSeenInJIT = false;
+    for (unsigned i = 0; i < set.size(); ++i) {
+        Structure* structure = set[i];
+        if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+            return GetByIdStatus(TakesSlowPath);
+        
+        if (!structure->propertyAccessesAreCacheable())
+            return GetByIdStatus(TakesSlowPath);
+        
+        unsigned attributes;
+        PropertyOffset offset = structure->getConcurrently(uid, attributes);
+        if (!isValidOffset(offset))
+            return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
+        if (attributes & Accessor)
+            return GetByIdStatus(MakesCalls); // We could be smarter here, like strength-reducing this to a Call.
+        if (attributes & CustomAccessor)
+            return GetByIdStatus(TakesSlowPath);
+        
+        if (!result.appendVariant(GetByIdVariant(structure, offset)))
+            return GetByIdStatus(TakesSlowPath);
+    }
+    
     return result;
 }
 
+bool GetByIdStatus::makesCalls() const
+{
+    switch (m_state) {
+    case NoInformation:
+    case TakesSlowPath:
+    case Custom:
+    case ModuleNamespace:
+        return false;
+    case Simple:
+        for (unsigned i = m_variants.size(); i--;) {
+            if (m_variants[i].callLinkStatus())
+                return true;
+        }
+        return false;
+    case MakesCalls:
+        return true;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+
+    return false;
+}
+
+void GetByIdStatus::filter(const StructureSet& set)
+{
+    if (m_state != Simple)
+        return;
+    
+    // FIXME: We could also filter the variants themselves.
+    
+    m_variants.removeAllMatching(
+        [&] (GetByIdVariant& variant) -> bool {
+            return !variant.structureSet().overlaps(set);
+        });
+    
+    if (m_variants.isEmpty())
+        m_state = NoInformation;
+}
+
+void GetByIdStatus::dump(PrintStream& out) const
+{
+    out.print("(");
+    switch (m_state) {
+    case NoInformation:
+        out.print("NoInformation");
+        break;
+    case Simple:
+        out.print("Simple");
+        break;
+    case Custom:
+        out.print("Custom");
+        break;
+    case ModuleNamespace:
+        out.print("ModuleNamespace");
+        break;
+    case TakesSlowPath:
+        out.print("TakesSlowPath");
+        break;
+    case MakesCalls:
+        out.print("MakesCalls");
+        break;
+    }
+    out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")");
+}
+
 } // namespace JSC
 
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.h b/Source/JavaScriptCore/bytecode/GetByIdStatus.h
index a1e801cca..de47bf5cc 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.h
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,85 +23,121 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef GetByIdStatus_h
-#define GetByIdStatus_h
+#pragma once
 
-#include "IntendedStructureChain.h"
-#include "PropertyOffset.h"
-#include "StructureSet.h"
-#include "StructureStubInfo.h"
+#include "CallLinkStatus.h"
+#include "CodeOrigin.h"
+#include "ConcurrentJSLock.h"
+#include "ExitingJITType.h"
+#include "GetByIdVariant.h"
+#include "ScopeOffset.h"
 
 namespace JSC {
 
+class AccessCase;
 class CodeBlock;
+class JSModuleEnvironment;
+class JSModuleNamespaceObject;
+class ModuleNamespaceAccessCase;
+class StructureStubInfo;
+
+typedef HashMap StubInfoMap;
 
 class GetByIdStatus {
 public:
     enum State {
-        NoInformation,  // It's uncached so we have no information.
-        Simple,         // It's cached for a simple access to a known object property with
-                        // a possible structure chain and a possible specific value.
-        TakesSlowPath,  // It's known to often take slow path.
-        MakesCalls      // It's known to take paths that make calls.
+        // It's uncached so we have no information.
+        NoInformation,
+        // It's cached for a simple access to a known object property with
+        // a possible structure chain and a possible specific value.
+        Simple,
+        // It's cached for a custom accessor with a possible structure chain.
+        Custom,
+        // It's cached for an access to a module namespace object's binding.
+        ModuleNamespace,
+        // It's known to often take slow path.
+        TakesSlowPath,
+        // It's known to take paths that make calls.
+        MakesCalls,
     };
 
     GetByIdStatus()
         : m_state(NoInformation)
-        , m_offset(invalidOffset)
     {
     }
     
     explicit GetByIdStatus(State state)
         : m_state(state)
-        , m_offset(invalidOffset)
     {
         ASSERT(state == NoInformation || state == TakesSlowPath || state == MakesCalls);
     }
+
     
     GetByIdStatus(
-        State state, bool wasSeenInJIT, const StructureSet& structureSet = StructureSet(),
-        PropertyOffset offset = invalidOffset, JSValue specificValue = JSValue(), PassRefPtr chain = nullptr)
+        State state, bool wasSeenInJIT, const GetByIdVariant& variant = GetByIdVariant())
         : m_state(state)
-        , m_structureSet(structureSet)
-        , m_chain(chain)
-        , m_specificValue(specificValue)
-        , m_offset(offset)
         , m_wasSeenInJIT(wasSeenInJIT)
     {
-        ASSERT((state == Simple) == (offset != invalidOffset));
+        ASSERT((state == Simple || state == Custom) == variant.isSet());
+        m_variants.append(variant);
     }
     
-    static GetByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, StringImpl* uid);
-    static GetByIdStatus computeFor(VM&, Structure*, StringImpl* uid);
+    static GetByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid);
+    static GetByIdStatus computeFor(const StructureSet&, UniquedStringImpl* uid);
     
+    static GetByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid);
+
+#if ENABLE(DFG_JIT)
+    static GetByIdStatus computeForStubInfo(const ConcurrentJSLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid);
+#endif
+
     State state() const { return m_state; }
     
     bool isSet() const { return m_state != NoInformation; }
     bool operator!() const { return !isSet(); }
     bool isSimple() const { return m_state == Simple; }
-    bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; }
-    bool makesCalls() const { return m_state == MakesCalls; }
-    
-    const StructureSet& structureSet() const { return m_structureSet; }
-    IntendedStructureChain* chain() const { return const_cast(m_chain.get()); } // Returns null if this is a direct access.
-    JSValue specificValue() const { return m_specificValue; } // Returns JSValue() if there is no specific value.
-    PropertyOffset offset() const { return m_offset; }
+    bool isCustom() const { return m_state == Custom; }
+    bool isModuleNamespace() const { return m_state == ModuleNamespace; }
+
+    size_t numVariants() const { return m_variants.size(); }
+    const Vector& variants() const { return m_variants; }
+    const GetByIdVariant& at(size_t index) const { return m_variants[index]; }
+    const GetByIdVariant& operator[](size_t index) const { return at(index); }
+
+    bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls || m_state == Custom || m_state == ModuleNamespace; }
+    bool makesCalls() const;
     
     bool wasSeenInJIT() const { return m_wasSeenInJIT; }
     
+    // Attempts to reduce the set of variants to fit the given structure set. This may be approximate.
+    void filter(const StructureSet&);
+
+    JSModuleNamespaceObject* moduleNamespaceObject() const { return m_moduleNamespaceObject; }
+    JSModuleEnvironment* moduleEnvironment() const { return m_moduleEnvironment; }
+    ScopeOffset scopeOffset() const { return m_scopeOffset; }
+    
+    void dump(PrintStream&) const;
+    
 private:
-    static void computeForChain(GetByIdStatus& result, CodeBlock*, StringImpl* uid);
-    static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, StringImpl* uid);
+#if ENABLE(DFG_JIT)
+    static bool hasExitSite(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+#endif
+#if ENABLE(JIT)
+    GetByIdStatus(const ModuleNamespaceAccessCase&);
+    static GetByIdStatus computeForStubInfoWithoutExitSiteFeedback(
+        const ConcurrentJSLocker&, CodeBlock* profiledBlock, StructureStubInfo*,
+        UniquedStringImpl* uid, CallLinkStatus::ExitSiteData);
+#endif
+    static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid);
+    
+    bool appendVariant(const GetByIdVariant&);
     
     State m_state;
-    StructureSet m_structureSet;
-    RefPtr m_chain;
-    JSValue m_specificValue;
-    PropertyOffset m_offset;
+    Vector m_variants;
     bool m_wasSeenInJIT;
+    JSModuleNamespaceObject* m_moduleNamespaceObject { nullptr };
+    JSModuleEnvironment* m_moduleEnvironment { nullptr };
+    ScopeOffset m_scopeOffset { };
 };
 
 } // namespace JSC
-
-#endif // PropertyAccessStatus_h
-
diff --git a/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp b/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp
new file mode 100644
index 000000000..d940b62ca
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "GetByIdVariant.h"
+
+#include "CallLinkStatus.h"
+#include "JSCInlines.h"
+#include 
+
+namespace JSC {
+
+GetByIdVariant::GetByIdVariant(
+    const StructureSet& structureSet, PropertyOffset offset,
+    const ObjectPropertyConditionSet& conditionSet,
+    std::unique_ptr callLinkStatus,
+    JSFunction* intrinsicFunction,
+    DOMJIT::GetterSetter* domJIT)
+    : m_structureSet(structureSet)
+    , m_conditionSet(conditionSet)
+    , m_offset(offset)
+    , m_callLinkStatus(WTFMove(callLinkStatus))
+    , m_intrinsicFunction(intrinsicFunction)
+    , m_domJIT(domJIT)
+{
+    if (!structureSet.size()) {
+        ASSERT(offset == invalidOffset);
+        ASSERT(conditionSet.isEmpty());
+    }
+    if (intrinsicFunction)
+        ASSERT(intrinsic() != NoIntrinsic);
+}
+                     
+GetByIdVariant::~GetByIdVariant() { }
+
+GetByIdVariant::GetByIdVariant(const GetByIdVariant& other)
+    : GetByIdVariant()
+{
+    *this = other;
+}
+
+GetByIdVariant& GetByIdVariant::operator=(const GetByIdVariant& other)
+{
+    m_structureSet = other.m_structureSet;
+    m_conditionSet = other.m_conditionSet;
+    m_offset = other.m_offset;
+    m_intrinsicFunction = other.m_intrinsicFunction;
+    m_domJIT = other.m_domJIT;
+    if (other.m_callLinkStatus)
+        m_callLinkStatus = std::make_unique(*other.m_callLinkStatus);
+    else
+        m_callLinkStatus = nullptr;
+    return *this;
+}
+
+inline bool GetByIdVariant::canMergeIntrinsicStructures(const GetByIdVariant& other) const
+{
+    if (m_intrinsicFunction != other.m_intrinsicFunction)
+        return false;
+    switch (intrinsic()) {
+    case TypedArrayByteLengthIntrinsic: {
+        // We can merge these sets as long as the element size of the two sets is the same.
+        TypedArrayType thisType = (*m_structureSet.begin())->classInfo()->typedArrayStorageType;
+        TypedArrayType otherType = (*other.m_structureSet.begin())->classInfo()->typedArrayStorageType;
+
+        ASSERT(isTypedView(thisType) && isTypedView(otherType));
+
+        return logElementSize(thisType) == logElementSize(otherType);
+    }
+
+    default:
+        return true;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+bool GetByIdVariant::attemptToMerge(const GetByIdVariant& other)
+{
+    if (m_offset != other.m_offset)
+        return false;
+    if (m_callLinkStatus || other.m_callLinkStatus)
+        return false;
+
+    if (!canMergeIntrinsicStructures(other))
+        return false;
+
+    if (m_domJIT != other.m_domJIT)
+        return false;
+
+    if (m_conditionSet.isEmpty() != other.m_conditionSet.isEmpty())
+        return false;
+    
+    ObjectPropertyConditionSet mergedConditionSet;
+    if (!m_conditionSet.isEmpty()) {
+        mergedConditionSet = m_conditionSet.mergedWith(other.m_conditionSet);
+        if (!mergedConditionSet.isValid() || !mergedConditionSet.hasOneSlotBaseCondition())
+            return false;
+    }
+    m_conditionSet = mergedConditionSet;
+    
+    m_structureSet.merge(other.m_structureSet);
+    
+    return true;
+}
+
+void GetByIdVariant::dump(PrintStream& out) const
+{
+    dumpInContext(out, 0);
+}
+
+void GetByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    if (!isSet()) {
+        out.print("");
+        return;
+    }
+    
+    out.print(
+        "<", inContext(structureSet(), context), ", ", inContext(m_conditionSet, context));
+    out.print(", offset = ", offset());
+    if (m_callLinkStatus)
+        out.print(", call = ", *m_callLinkStatus);
+    if (m_intrinsicFunction)
+        out.print(", intrinsic = ", *m_intrinsicFunction);
+    if (m_domJIT)
+        out.print(", domjit = ", RawPointer(m_domJIT));
+    out.print(">");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/GetByIdVariant.h b/Source/JavaScriptCore/bytecode/GetByIdVariant.h
new file mode 100644
index 000000000..8ded24867
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GetByIdVariant.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CallLinkStatus.h"
+#include "JSCJSValue.h"
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+#include "StructureSet.h"
+
+namespace JSC {
+namespace DOMJIT {
+class GetterSetter;
+}
+
+class CallLinkStatus;
+class GetByIdStatus;
+struct DumpContext;
+
+class GetByIdVariant {
+public:
+    GetByIdVariant(
+        const StructureSet& structureSet = StructureSet(), PropertyOffset offset = invalidOffset,
+        const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+        std::unique_ptr = nullptr,
+        JSFunction* = nullptr,
+        DOMJIT::GetterSetter* = nullptr);
+
+    ~GetByIdVariant();
+    
+    GetByIdVariant(const GetByIdVariant&);
+    GetByIdVariant& operator=(const GetByIdVariant&);
+    
+    bool isSet() const { return !!m_structureSet.size(); }
+    bool operator!() const { return !isSet(); }
+    const StructureSet& structureSet() const { return m_structureSet; }
+    StructureSet& structureSet() { return m_structureSet; }
+
+    // A non-empty condition set means that this is a prototype load.
+    const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+    
+    PropertyOffset offset() const { return m_offset; }
+    CallLinkStatus* callLinkStatus() const { return m_callLinkStatus.get(); }
+    JSFunction* intrinsicFunction() const { return m_intrinsicFunction; }
+    Intrinsic intrinsic() const { return m_intrinsicFunction ? m_intrinsicFunction->intrinsic() : NoIntrinsic; }
+    DOMJIT::GetterSetter* domJIT() const { return m_domJIT; }
+
+    bool isPropertyUnset() const { return offset() == invalidOffset; }
+
+    bool attemptToMerge(const GetByIdVariant& other);
+    
+    void dump(PrintStream&) const;
+    void dumpInContext(PrintStream&, DumpContext*) const;
+    
+private:
+    friend class GetByIdStatus;
+
+    bool canMergeIntrinsicStructures(const GetByIdVariant&) const;
+    
+    StructureSet m_structureSet;
+    ObjectPropertyConditionSet m_conditionSet;
+    PropertyOffset m_offset;
+    std::unique_ptr m_callLinkStatus;
+    JSFunction* m_intrinsicFunction;
+    DOMJIT::GetterSetter* m_domJIT;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp b/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp
new file mode 100644
index 000000000..9b6bccc29
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GetterSetterAccessCase.h"
+
+#if ENABLE(JIT)
+
+#include "DOMJITAccessCasePatchpointParams.h"
+#include "DOMJITCallDOMGetterPatchpoint.h"
+#include "DOMJITGetterSetter.h"
+#include "HeapInlines.h"
+#include "JSCJSValueInlines.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+
+namespace JSC {
+
+static const bool verbose = false;
+
+GetterSetterAccessCase::GetterSetterAccessCase(VM& vm, JSCell* owner, AccessType accessType, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet, JSObject* customSlotBase)
+    : Base(vm, owner, accessType, offset, structure, conditionSet, viaProxy, additionalSet)
+{
+    m_customSlotBase.setMayBeNull(vm, owner, customSlotBase);
+}
+
+
+std::unique_ptr GetterSetterAccessCase::create(
+    VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
+    const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
+    PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase, DOMJIT::GetterSetter* domJIT)
+{
+    switch (type) {
+    case Getter:
+    case CustomAccessorGetter:
+    case CustomValueGetter:
+        break;
+    default:
+        ASSERT_NOT_REACHED();
+    };
+
+    std::unique_ptr result(new GetterSetterAccessCase(vm, owner, type, offset, structure, conditionSet, viaProxy, additionalSet, customSlotBase));
+    result->m_domJIT = domJIT;
+    result->m_customAccessor.getter = customGetter;
+    return WTFMove(result);
+}
+
+std::unique_ptr GetterSetterAccessCase::create(VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
+    const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
+    JSObject* customSlotBase)
+{
+    ASSERT(type == Setter || type == CustomValueSetter || type == CustomAccessorSetter);
+    std::unique_ptr result(new GetterSetterAccessCase(vm, owner, type, offset, structure, conditionSet, false, nullptr, customSlotBase));
+    result->m_customAccessor.setter = customSetter;
+    return WTFMove(result);
+}
+
+
+GetterSetterAccessCase::~GetterSetterAccessCase()
+{
+}
+
+
+GetterSetterAccessCase::GetterSetterAccessCase(const GetterSetterAccessCase& other)
+    : Base(other)
+    , m_customSlotBase(other.m_customSlotBase)
+{
+    m_customAccessor.opaque = other.m_customAccessor.opaque;
+    m_domJIT = other.m_domJIT;
+}
+
+std::unique_ptr GetterSetterAccessCase::clone() const
+{
+    std::unique_ptr result(new GetterSetterAccessCase(*this));
+    result->resetState();
+    return WTFMove(result);
+}
+
+JSObject* GetterSetterAccessCase::alternateBase() const
+{
+    if (customSlotBase())
+        return customSlotBase();
+    return conditionSet().slotBaseCondition().object();
+}
+
+void GetterSetterAccessCase::dumpImpl(PrintStream& out, CommaPrinter& comma) const
+{
+    Base::dumpImpl(out, comma);
+    out.print(comma, "customSlotBase = ", RawPointer(customSlotBase()));
+    if (callLinkInfo())
+        out.print(comma, "callLinkInfo = ", RawPointer(callLinkInfo()));
+    out.print(comma, "customAccessor = ", RawPointer(m_customAccessor.opaque));
+}
+
+void GetterSetterAccessCase::emitDOMJITGetter(AccessGenerationState& state, GPRReg baseForGetGPR)
+{
+    CCallHelpers& jit = *state.jit;
+    StructureStubInfo& stubInfo = *state.stubInfo;
+    JSValueRegs valueRegs = state.valueRegs;
+    GPRReg baseGPR = state.baseGPR;
+    GPRReg scratchGPR = state.scratchGPR;
+
+    // We construct the environment that can execute the DOMJIT::Patchpoint here.
+    Ref patchpoint = domJIT()->callDOMGetter();
+
+    Vector gpScratch;
+    Vector fpScratch;
+    Vector regs;
+
+    ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+    allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+    allocator.lock(static_cast(stubInfo.patch.baseTagGPR));
+#endif
+    allocator.lock(valueRegs);
+    allocator.lock(scratchGPR);
+
+    GPRReg paramBaseGPR = InvalidGPRReg;
+    GPRReg paramGlobalObjectGPR = InvalidGPRReg;
+    JSValueRegs paramValueRegs = valueRegs;
+    GPRReg remainingScratchGPR = InvalidGPRReg;
+
+    // valueRegs and baseForGetGPR may be the same. For example, in Baseline JIT, we pass the same regT0 for baseGPR and valueRegs.
+    // In FTL, there is no constraint that the baseForGetGPR interferes with the result. To make implementation simple in
+    // DOMJIT::Patchpoint, DOMJIT::Patchpoint assumes that result registers always early interfere with input registers, in this case,
+    // baseForGetGPR. So we move baseForGetGPR to the other register if baseForGetGPR == valueRegs.
+    if (baseForGetGPR != valueRegs.payloadGPR()) {
+        paramBaseGPR = baseForGetGPR;
+        if (!patchpoint->requireGlobalObject)
+            remainingScratchGPR = scratchGPR;
+        else
+            paramGlobalObjectGPR = scratchGPR;
+    } else {
+        jit.move(valueRegs.payloadGPR(), scratchGPR);
+        paramBaseGPR = scratchGPR;
+        if (patchpoint->requireGlobalObject)
+            paramGlobalObjectGPR = allocator.allocateScratchGPR();
+    }
+
+    JSGlobalObject* globalObjectForDOMJIT = structure()->globalObject();
+
+    regs.append(paramValueRegs);
+    regs.append(paramBaseGPR);
+    if (patchpoint->requireGlobalObject) {
+        ASSERT(paramGlobalObjectGPR != InvalidGPRReg);
+        regs.append(DOMJIT::Value(paramGlobalObjectGPR, globalObjectForDOMJIT));
+    }
+
+    if (patchpoint->numGPScratchRegisters) {
+        unsigned i = 0;
+        if (remainingScratchGPR != InvalidGPRReg) {
+            gpScratch.append(remainingScratchGPR);
+            ++i;
+        }
+        for (; i < patchpoint->numGPScratchRegisters; ++i)
+            gpScratch.append(allocator.allocateScratchGPR());
+    }
+
+    for (unsigned i = 0; i < patchpoint->numFPScratchRegisters; ++i)
+        fpScratch.append(allocator.allocateScratchFPR());
+
+    // Let's store the reused registers to the stack. After that, we can use allocated scratch registers.
+    ScratchRegisterAllocator::PreservedState preservedState =
+    allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+
+    if (verbose) {
+        dataLog("baseGPR = ", baseGPR, "\n");
+        dataLog("valueRegs = ", valueRegs, "\n");
+        dataLog("scratchGPR = ", scratchGPR, "\n");
+        dataLog("paramBaseGPR = ", paramBaseGPR, "\n");
+        if (paramGlobalObjectGPR != InvalidGPRReg)
+            dataLog("paramGlobalObjectGPR = ", paramGlobalObjectGPR, "\n");
+        dataLog("paramValueRegs = ", paramValueRegs, "\n");
+        for (unsigned i = 0; i < patchpoint->numGPScratchRegisters; ++i)
+            dataLog("gpScratch[", i, "] = ", gpScratch[i], "\n");
+    }
+
+    if (patchpoint->requireGlobalObject)
+        jit.move(CCallHelpers::TrustedImmPtr(globalObjectForDOMJIT), paramGlobalObjectGPR);
+
+    // We just spill the registers used in DOMJIT::Patchpoint here. For not spilled registers here explicitly,
+    // they must be in the used register set passed by the callers (Baseline, DFG, and FTL) if they need to be kept.
+    // Some registers can be locked, but not in the used register set. For example, the caller could make baseGPR
+    // same to valueRegs, and not include it in the used registers since it will be changed.
+    RegisterSet registersToSpillForCCall;
+    for (auto& value : regs) {
+        DOMJIT::Reg reg = value.reg();
+        if (reg.isJSValueRegs())
+            registersToSpillForCCall.set(reg.jsValueRegs());
+        else if (reg.isGPR())
+            registersToSpillForCCall.set(reg.gpr());
+        else
+            registersToSpillForCCall.set(reg.fpr());
+    }
+    for (GPRReg reg : gpScratch)
+        registersToSpillForCCall.set(reg);
+    for (FPRReg reg : fpScratch)
+        registersToSpillForCCall.set(reg);
+    registersToSpillForCCall.exclude(RegisterSet::registersToNotSaveForCCall());
+
+    DOMJITAccessCasePatchpointParams params(WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
+    patchpoint->generator()->run(jit, params);
+    allocator.restoreReusedRegistersByPopping(jit, preservedState);
+    state.succeed();
+    
+    CCallHelpers::JumpList exceptions = params.emitSlowPathCalls(state, registersToSpillForCCall, jit);
+    if (!exceptions.empty()) {
+        exceptions.link(&jit);
+        allocator.restoreReusedRegistersByPopping(jit, preservedState);
+        state.emitExplicitExceptionHandler();
+    }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.h b/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.h
new file mode 100644
index 000000000..06192dac2
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "ProxyableAccessCase.h"
+
+namespace JSC {
+
+class GetterSetterAccessCase : public ProxyableAccessCase {
+public:
+    typedef ProxyableAccessCase Base;
+    friend class AccessCase;
+
+    // This can return null if it hasn't been generated yet. That's
+    // actually somewhat likely because of how we do buffering of new cases.
+    CallLinkInfo* callLinkInfo() const { return m_callLinkInfo.get(); }
+    JSObject* customSlotBase() const { return m_customSlotBase.get(); }
+    DOMJIT::GetterSetter* domJIT() const { return m_domJIT; }
+
+    JSObject* alternateBase() const override;
+
+    void emitDOMJITGetter(AccessGenerationState&, GPRReg baseForGetGPR);
+
+    static std::unique_ptr create(
+        VM&, JSCell* owner, AccessType, PropertyOffset, Structure*,
+        const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+        bool viaProxy = false,
+        WatchpointSet* additionalSet = nullptr,
+        PropertySlot::GetValueFunc = nullptr,
+        JSObject* customSlotBase = nullptr,
+        DOMJIT::GetterSetter* = nullptr);
+
+    static std::unique_ptr create(VM&, JSCell* owner, AccessType, Structure*, PropertyOffset,
+        const ObjectPropertyConditionSet&, PutPropertySlot::PutValueFunc = nullptr,
+        JSObject* customSlotBase = nullptr);
+
+    void dumpImpl(PrintStream&, CommaPrinter&) const override;
+    std::unique_ptr clone() const override;
+
+    ~GetterSetterAccessCase();
+
+private:
+    GetterSetterAccessCase(VM&, JSCell*, AccessType, PropertyOffset, Structure*, const ObjectPropertyConditionSet&, bool viaProxy, WatchpointSet* additionalSet, JSObject* customSlotBase);
+
+    GetterSetterAccessCase(const GetterSetterAccessCase&);
+
+    WriteBarrier m_customSlotBase;
+    std::unique_ptr m_callLinkInfo;
+    union {
+        PutPropertySlot::PutValueFunc setter;
+        PropertySlot::GetValueFunc getter;
+        void* opaque;
+    } m_customAccessor;
+    DOMJIT::GetterSetter* m_domJIT;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/GlobalCodeBlock.h b/Source/JavaScriptCore/bytecode/GlobalCodeBlock.h
new file mode 100644
index 000000000..aa29cca33
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GlobalCodeBlock.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+
+namespace JSC {
+
+// Program code is not marked by any function, so we make the global object
+// responsible for marking it.
+
+class GlobalCodeBlock : public CodeBlock {
+    typedef CodeBlock Base;
+
+protected:
+    GlobalCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, GlobalCodeBlock& other)
+        : CodeBlock(vm, structure, CopyParsedBlock, other)
+    {
+    }
+
+    GlobalCodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, RefPtr&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+        : CodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), sourceOffset, firstLineColumnOffset)
+    {
+    }
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/HandlerInfo.h b/Source/JavaScriptCore/bytecode/HandlerInfo.h
index 8396c9607..752defe8a 100644
--- a/Source/JavaScriptCore/bytecode/HandlerInfo.h
+++ b/Source/JavaScriptCore/bytecode/HandlerInfo.h
@@ -23,25 +23,100 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef HandlerInfo_h
-#define HandlerInfo_h
+#pragma once
 
 #include "CodeLocation.h"
-#include 
+#include 
 
 namespace JSC {
 
-struct HandlerInfo {
+enum class HandlerType {
+    Catch = 0,
+    Finally = 1,
+    SynthesizedCatch = 2,
+    SynthesizedFinally = 3
+};
+
+enum class RequiredHandler {
+    CatchHandler,
+    AnyHandler
+};
+
+struct HandlerInfoBase {
+    HandlerType type() const { return static_cast(typeBits); }
+    void setType(HandlerType type) { typeBits = static_cast(type); }
+
+    const char* typeName()
+    {
+        switch (type()) {
+        case HandlerType::Catch:
+            return "catch";
+        case HandlerType::Finally:
+            return "finally";
+        case HandlerType::SynthesizedCatch:
+            return "synthesized catch";
+        case HandlerType::SynthesizedFinally:
+            return "synthesized finally";
+        default:
+            ASSERT_NOT_REACHED();
+        }
+        return nullptr;
+    }
+
+    bool isCatchHandler() const { return type() == HandlerType::Catch; }
+
+    template
+    static Handler* handlerForIndex(Vector& exeptionHandlers, unsigned index, RequiredHandler requiredHandler)
+    {
+        for (Handler& handler : exeptionHandlers) {
+            if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler())
+                continue;
+
+            // Handlers are ordered innermost first, so the first handler we encounter
+            // that contains the source address is the correct handler to use.
+            // This index used is either the BytecodeOffset or a CallSiteIndex.
+            if (handler.start <= index && handler.end > index)
+                return &handler;
+        }
+
+        return nullptr;
+    }
+
     uint32_t start;
     uint32_t end;
     uint32_t target;
-    uint32_t scopeDepth;
+    uint32_t typeBits : 2; // HandlerType
+};
+
+struct UnlinkedHandlerInfo : public HandlerInfoBase {
+    UnlinkedHandlerInfo(uint32_t start, uint32_t end, uint32_t target, HandlerType handlerType)
+    {
+        this->start = start;
+        this->end = end;
+        this->target = target;
+        setType(handlerType);
+        ASSERT(type() == handlerType);
+    }
+};
+
+struct HandlerInfo : public HandlerInfoBase {
+    void initialize(const UnlinkedHandlerInfo& unlinkedInfo)
+    {
+        start = unlinkedInfo.start;
+        end = unlinkedInfo.end;
+        target = unlinkedInfo.target;
+        typeBits = unlinkedInfo.typeBits;
+    }
+
 #if ENABLE(JIT)
+    void initialize(const UnlinkedHandlerInfo& unlinkedInfo, CodeLocationLabel label)
+    {
+        initialize(unlinkedInfo);
+        nativeCode = label;
+    }
+
     CodeLocationLabel nativeCode;
 #endif
 };
 
 } // namespace JSC
-
-#endif // HandlerInfo_h
-
diff --git a/Source/JavaScriptCore/bytecode/InlineAccess.cpp b/Source/JavaScriptCore/bytecode/InlineAccess.cpp
new file mode 100644
index 000000000..667492ac3
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InlineAccess.cpp
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "InlineAccess.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "JSArray.h"
+#include "JSCellInlines.h"
+#include "LinkBuffer.h"
+#include "ScratchRegisterAllocator.h"
+#include "Structure.h"
+#include "StructureStubInfo.h"
+#include "VM.h"
+
+namespace JSC {
+
+void InlineAccess::dumpCacheSizesAndCrash(VM& vm)
+{
+    GPRReg base = GPRInfo::regT0;
+    GPRReg value = GPRInfo::regT1;
+#if USE(JSVALUE32_64)
+    JSValueRegs regs(base, value);
+#else
+    JSValueRegs regs(base);
+#endif
+
+    {
+        CCallHelpers jit(&vm);
+
+        GPRReg scratchGPR = value;
+        jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), value);
+        jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), value);
+        jit.patchableBranch32(
+            CCallHelpers::NotEqual, value, CCallHelpers::TrustedImm32(IsArray | ContiguousShape));
+        jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value);
+        jit.load32(CCallHelpers::Address(value, ArrayStorage::lengthOffset()), value);
+        jit.boxInt32(scratchGPR, regs);
+
+        dataLog("array length size: ", jit.m_assembler.buffer().codeSize(), "\n");
+    }
+
+    {
+        CCallHelpers jit(&vm);
+
+        jit.patchableBranch32(
+            MacroAssembler::NotEqual,
+            MacroAssembler::Address(base, JSCell::structureIDOffset()),
+            MacroAssembler::TrustedImm32(0x000ab21ca));
+        jit.loadPtr(
+            CCallHelpers::Address(base, JSObject::butterflyOffset()),
+            value);
+        GPRReg storageGPR = value;
+        jit.loadValue(
+            CCallHelpers::Address(storageGPR, 0x000ab21ca), regs);
+
+        dataLog("out of line offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+    }
+
+    {
+        CCallHelpers jit(&vm);
+
+        jit.patchableBranch32(
+            MacroAssembler::NotEqual,
+            MacroAssembler::Address(base, JSCell::structureIDOffset()),
+            MacroAssembler::TrustedImm32(0x000ab21ca));
+        jit.loadValue(
+            MacroAssembler::Address(base, 0x000ab21ca), regs);
+
+        dataLog("inline offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+    }
+
+    {
+        CCallHelpers jit(&vm);
+
+        jit.patchableBranch32(
+            MacroAssembler::NotEqual,
+            MacroAssembler::Address(base, JSCell::structureIDOffset()),
+            MacroAssembler::TrustedImm32(0x000ab21ca));
+
+        jit.storeValue(
+            regs, MacroAssembler::Address(base, 0x000ab21ca));
+
+        dataLog("replace cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+    }
+
+    {
+        CCallHelpers jit(&vm);
+
+        jit.patchableBranch32(
+            MacroAssembler::NotEqual,
+            MacroAssembler::Address(base, JSCell::structureIDOffset()),
+            MacroAssembler::TrustedImm32(0x000ab21ca));
+
+        jit.loadPtr(MacroAssembler::Address(base, JSObject::butterflyOffset()), value);
+        jit.storeValue(
+            regs,
+            MacroAssembler::Address(base, 120342));
+
+        dataLog("replace out of line cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+    }
+
+    CRASH();
+}
+
+
+template 
+ALWAYS_INLINE static bool linkCodeInline(const char* name, CCallHelpers& jit, StructureStubInfo& stubInfo, const Function& function)
+{
+    if (jit.m_assembler.buffer().codeSize() <= stubInfo.patch.inlineSize) {
+        bool needsBranchCompaction = false;
+        LinkBuffer linkBuffer(jit, stubInfo.patch.start.dataLocation(), stubInfo.patch.inlineSize, JITCompilationMustSucceed, needsBranchCompaction);
+        ASSERT(linkBuffer.isValid());
+        function(linkBuffer);
+        FINALIZE_CODE(linkBuffer, ("InlineAccessType: '%s'", name));
+        return true;
+    }
+
+    // This is helpful when determining the size for inline ICs on various
+    // platforms. You want to choose a size that usually succeeds, but sometimes
+    // there may be variability in the length of the code we generate just because
+    // of randomness. It's helpful to flip this on when running tests or browsing
+    // the web just to see how often it fails. You don't want an IC size that always fails.
+    const bool failIfCantInline = false;
+    if (failIfCantInline) {
+        dataLog("Failure for: ", name, "\n");
+        dataLog("real size: ", jit.m_assembler.buffer().codeSize(), " inline size:", stubInfo.patch.inlineSize, "\n");
+        CRASH();
+    }
+
+    return false;
+}
+
+bool InlineAccess::generateSelfPropertyAccess(VM& vm, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
+{
+    CCallHelpers jit(&vm);
+    
+    GPRReg base = static_cast(stubInfo.patch.baseGPR);
+    JSValueRegs value = stubInfo.valueRegs();
+
+    auto branchToSlowPath = jit.patchableBranch32(
+        MacroAssembler::NotEqual,
+        MacroAssembler::Address(base, JSCell::structureIDOffset()),
+        MacroAssembler::TrustedImm32(bitwise_cast(structure->id())));
+    GPRReg storage;
+    if (isInlineOffset(offset))
+        storage = base;
+    else {
+        jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
+        storage = value.payloadGPR();
+    }
+    
+    jit.loadValue(
+        MacroAssembler::Address(storage, offsetRelativeToBase(offset)), value);
+
+    bool linkedCodeInline = linkCodeInline("property access", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
+        linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
+    });
+    return linkedCodeInline;
+}
+
+ALWAYS_INLINE static GPRReg getScratchRegister(StructureStubInfo& stubInfo)
+{
+    ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+    allocator.lock(static_cast(stubInfo.patch.baseGPR));
+    allocator.lock(static_cast(stubInfo.patch.valueGPR));
+#if USE(JSVALUE32_64)
+    allocator.lock(static_cast(stubInfo.patch.baseTagGPR));
+    allocator.lock(static_cast(stubInfo.patch.valueTagGPR));
+#endif
+    GPRReg scratch = allocator.allocateScratchGPR();
+    if (allocator.didReuseRegisters())
+        return InvalidGPRReg;
+    return scratch;
+}
+
+ALWAYS_INLINE static bool hasFreeRegister(StructureStubInfo& stubInfo)
+{
+    return getScratchRegister(stubInfo) != InvalidGPRReg;
+}
+
+bool InlineAccess::canGenerateSelfPropertyReplace(StructureStubInfo& stubInfo, PropertyOffset offset)
+{
+    if (isInlineOffset(offset))
+        return true;
+
+    return hasFreeRegister(stubInfo);
+}
+
+bool InlineAccess::generateSelfPropertyReplace(VM& vm, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
+{
+    ASSERT(canGenerateSelfPropertyReplace(stubInfo, offset));
+
+    CCallHelpers jit(&vm);
+
+    GPRReg base = static_cast(stubInfo.patch.baseGPR);
+    JSValueRegs value = stubInfo.valueRegs();
+
+    auto branchToSlowPath = jit.patchableBranch32(
+        MacroAssembler::NotEqual,
+        MacroAssembler::Address(base, JSCell::structureIDOffset()),
+        MacroAssembler::TrustedImm32(bitwise_cast(structure->id())));
+
+    GPRReg storage;
+    if (isInlineOffset(offset))
+        storage = base;
+    else {
+        storage = getScratchRegister(stubInfo);
+        ASSERT(storage != InvalidGPRReg);
+        jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), storage);
+    }
+
+    jit.storeValue(
+        value, MacroAssembler::Address(storage, offsetRelativeToBase(offset)));
+
+    bool linkedCodeInline = linkCodeInline("property replace", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
+        linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
+    });
+    return linkedCodeInline;
+}
+
+bool InlineAccess::isCacheableArrayLength(StructureStubInfo& stubInfo, JSArray* array)
+{
+    ASSERT(array->indexingType() & IsArray);
+
+    if (!hasFreeRegister(stubInfo))
+        return false;
+
+    return array->indexingType() == ArrayWithInt32
+        || array->indexingType() == ArrayWithDouble
+        || array->indexingType() == ArrayWithContiguous;
+}
+
+bool InlineAccess::generateArrayLength(VM& vm, StructureStubInfo& stubInfo, JSArray* array)
+{
+    ASSERT(isCacheableArrayLength(stubInfo, array));
+
+    CCallHelpers jit(&vm);
+
+    GPRReg base = static_cast(stubInfo.patch.baseGPR);
+    JSValueRegs value = stubInfo.valueRegs();
+    GPRReg scratch = getScratchRegister(stubInfo);
+
+    jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), scratch);
+    jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), scratch);
+    auto branchToSlowPath = jit.patchableBranch32(
+        CCallHelpers::NotEqual, scratch, CCallHelpers::TrustedImm32(array->indexingType()));
+    jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
+    jit.load32(CCallHelpers::Address(value.payloadGPR(), ArrayStorage::lengthOffset()), value.payloadGPR());
+    jit.boxInt32(value.payloadGPR(), value);
+
+    bool linkedCodeInline = linkCodeInline("array length", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
+        linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
+    });
+    return linkedCodeInline;
+}
+
+void InlineAccess::rewireStubAsJump(VM& vm, StructureStubInfo& stubInfo, CodeLocationLabel target)
+{
+    CCallHelpers jit(&vm);
+
+    auto jump = jit.jump();
+
+    // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
+    bool needsBranchCompaction = false;
+    LinkBuffer linkBuffer(jit, stubInfo.patch.start.dataLocation(), jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
+    RELEASE_ASSERT(linkBuffer.isValid());
+    linkBuffer.link(jump, target);
+
+    FINALIZE_CODE(linkBuffer, ("InlineAccess: linking constant jump"));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/InlineAccess.h b/Source/JavaScriptCore/bytecode/InlineAccess.h
new file mode 100644
index 000000000..3910c5b3b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InlineAccess.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CodeLocation.h"
+#include "PropertyOffset.h"
+
+namespace JSC {
+
+class JSArray;
+class Structure;
+class StructureStubInfo;
+class VM;
+
+class InlineAccess {
+public:
+
+    // This is the maximum between inline and out of line self access cases.
+    static constexpr size_t sizeForPropertyAccess()
+    {
+#if CPU(X86_64)
+        return 23;
+#elif CPU(X86)
+        return 27;
+#elif CPU(ARM64)
+        return 40;
+#elif CPU(ARM)
+#if CPU(ARM_THUMB2)
+        return 48;
+#else
+        return 52;
+#endif
+#else
+#error "unsupported platform"
+#endif
+    }
+
+    // This is the maximum between inline and out of line property replace cases.
+    static constexpr size_t sizeForPropertyReplace()
+    {
+#if CPU(X86_64)
+        return 23;
+#elif CPU(X86)
+        return 27;
+#elif CPU(ARM64)
+        return 40;
+#elif CPU(ARM)
+#if CPU(ARM_THUMB2)
+        return 48;
+#else
+        return 48;
+#endif
+#else
+#error "unsupported platform"
+#endif
+    }
+
+    // FIXME: Make this constexpr when GCC is able to compile std::max() inside a constexpr function.
+    // https://bugs.webkit.org/show_bug.cgi?id=159436
+    //
+    // This is the maximum between the size for array length access, and the size for regular self access.
+    ALWAYS_INLINE static size_t sizeForLengthAccess()
+    {
+#if CPU(X86_64)
+        size_t size = 26;
+#elif CPU(X86)
+        size_t size = 27;
+#elif CPU(ARM64)
+        size_t size = 32;
+#elif CPU(ARM)
+#if CPU(ARM_THUMB2)
+        size_t size = 30;
+#else
+        size_t size = 32;
+#endif
+#else
+#error "unsupported platform"
+#endif
+        return std::max(size, sizeForPropertyAccess());
+    }
+
+    static bool generateSelfPropertyAccess(VM&, StructureStubInfo&, Structure*, PropertyOffset);
+    static bool canGenerateSelfPropertyReplace(StructureStubInfo&, PropertyOffset);
+    static bool generateSelfPropertyReplace(VM&, StructureStubInfo&, Structure*, PropertyOffset);
+    static bool isCacheableArrayLength(StructureStubInfo&, JSArray*);
+    static bool generateArrayLength(VM&, StructureStubInfo&, JSArray*);
+    static void rewireStubAsJump(VM&, StructureStubInfo&, CodeLocationLabel);
+
+    // This is helpful when determining the size of an IC on
+    // various platforms. When adding a new type of IC, implement
+    // its placeholder code here, and log the size. That way we
+    // can intelligently choose sizes on various platforms.
+    NO_RETURN_DUE_TO_CRASH static void dumpCacheSizesAndCrash(VM&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp b/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp
new file mode 100644
index 000000000..97ce84d63
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "InlineCallFrame.h"
+
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+JSFunction* InlineCallFrame::calleeConstant() const
+{
+    if (calleeRecovery.isConstant())
+        return jsCast(calleeRecovery.constant());
+    return nullptr;
+}
+
+JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const
+{
+    return jsCast(calleeRecovery.recover(exec));
+}
+
+CodeBlockHash InlineCallFrame::hash() const
+{
+    return baselineCodeBlock->hash();
+}
+
+CString InlineCallFrame::hashAsStringIfPossible() const
+{
+    return baselineCodeBlock->hashAsStringIfPossible();
+}
+
+CString InlineCallFrame::inferredName() const
+{
+    return jsCast(baselineCodeBlock->ownerExecutable())->inferredName().utf8();
+}
+
+void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const
+{
+    out.print(inferredName(), "#", hashAsStringIfPossible());
+}
+
+void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    out.print(briefFunctionInformation(), ":<", RawPointer(baselineCodeBlock.get()));
+    if (isStrictMode())
+        out.print(" (StrictMode)");
+    out.print(", bc#", directCaller.bytecodeIndex, ", ", static_cast(kind));
+    if (isClosureCall)
+        out.print(", closure call");
+    else
+        out.print(", known callee: ", inContext(calleeRecovery.constant(), context));
+    out.print(", numArgs+this = ", arguments.size());
+    out.print(", stackOffset = ", stackOffset);
+    out.print(" (", virtualRegisterForLocal(0), " maps to ", virtualRegisterForLocal(0) + stackOffset, ")>");
+}
+
+void InlineCallFrame::dump(PrintStream& out) const
+{
+    dumpInContext(out, 0);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::InlineCallFrame::Kind kind)
+{
+    switch (kind) {
+    case JSC::InlineCallFrame::Call:
+        out.print("Call");
+        return;
+    case JSC::InlineCallFrame::Construct:
+        out.print("Construct");
+        return;
+    case JSC::InlineCallFrame::TailCall:
+        out.print("TailCall");
+        return;
+    case JSC::InlineCallFrame::CallVarargs:
+        out.print("CallVarargs");
+        return;
+    case JSC::InlineCallFrame::ConstructVarargs:
+        out.print("ConstructVarargs");
+        return;
+    case JSC::InlineCallFrame::TailCallVarargs:
+        out.print("TailCallVarargs");
+        return;
+    case JSC::InlineCallFrame::GetterCall:
+        out.print("GetterCall");
+        return;
+    case JSC::InlineCallFrame::SetterCall:
+        out.print("SetterCall");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrame.h b/Source/JavaScriptCore/bytecode/InlineCallFrame.h
new file mode 100644
index 000000000..cd2a5fe11
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrame.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "CodeBlockHash.h"
+#include "CodeOrigin.h"
+#include "ValueRecovery.h"
+#include "WriteBarrier.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+struct InlineCallFrame;
+class ExecState;
+class JSFunction;
+
+struct InlineCallFrame {
+    enum Kind {
+        Call,
+        Construct,
+        TailCall,
+        CallVarargs,
+        ConstructVarargs,
+        TailCallVarargs,
+        
+        // For these, the stackOffset incorporates the argument count plus the true return PC
+        // slot.
+        GetterCall,
+        SetterCall
+    };
+
+    static CallMode callModeFor(Kind kind)
+    {
+        switch (kind) {
+        case Call:
+        case CallVarargs:
+        case GetterCall:
+        case SetterCall:
+            return CallMode::Regular;
+        case TailCall:
+        case TailCallVarargs:
+            return CallMode::Tail;
+        case Construct:
+        case ConstructVarargs:
+            return CallMode::Construct;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+
+    static Kind kindFor(CallMode callMode)
+    {
+        switch (callMode) {
+        case CallMode::Regular:
+            return Call;
+        case CallMode::Construct:
+            return Construct;
+        case CallMode::Tail:
+            return TailCall;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    static Kind varargsKindFor(CallMode callMode)
+    {
+        switch (callMode) {
+        case CallMode::Regular:
+            return CallVarargs;
+        case CallMode::Construct:
+            return ConstructVarargs;
+        case CallMode::Tail:
+            return TailCallVarargs;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    static CodeSpecializationKind specializationKindFor(Kind kind)
+    {
+        switch (kind) {
+        case Call:
+        case CallVarargs:
+        case TailCall:
+        case TailCallVarargs:
+        case GetterCall:
+        case SetterCall:
+            return CodeForCall;
+        case Construct:
+        case ConstructVarargs:
+            return CodeForConstruct;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    static bool isVarargs(Kind kind)
+    {
+        switch (kind) {
+        case CallVarargs:
+        case TailCallVarargs:
+        case ConstructVarargs:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    static bool isTail(Kind kind)
+    {
+        switch (kind) {
+        case TailCall:
+        case TailCallVarargs:
+            return true;
+        default:
+            return false;
+        }
+    }
+    bool isTail() const
+    {
+        return isTail(static_cast(kind));
+    }
+
+    static CodeOrigin* computeCallerSkippingTailCalls(InlineCallFrame* inlineCallFrame, Kind* callerCallKind = nullptr)
+    {
+        CodeOrigin* codeOrigin;
+        bool tailCallee;
+        int callKind;
+        do {
+            tailCallee = inlineCallFrame->isTail();
+            callKind = inlineCallFrame->kind;
+            codeOrigin = &inlineCallFrame->directCaller;
+            inlineCallFrame = codeOrigin->inlineCallFrame;
+        } while (inlineCallFrame && tailCallee);
+
+        if (tailCallee)
+            return nullptr;
+
+        if (callerCallKind)
+            *callerCallKind = static_cast(callKind);
+
+        return codeOrigin;
+    }
+
+    CodeOrigin* getCallerSkippingTailCalls(Kind* callerCallKind = nullptr)
+    {
+        return computeCallerSkippingTailCalls(this, callerCallKind);
+    }
+
+    InlineCallFrame* getCallerInlineFrameSkippingTailCalls()
+    {
+        CodeOrigin* caller = getCallerSkippingTailCalls();
+        return caller ? caller->inlineCallFrame : nullptr;
+    }
+    
+    Vector arguments; // Includes 'this'.
+    WriteBarrier baselineCodeBlock;
+    ValueRecovery calleeRecovery;
+    CodeOrigin directCaller;
+
+    signed stackOffset : 28;
+    unsigned kind : 3; // real type is Kind
+    bool isClosureCall : 1; // If false then we know that callee/scope are constants and the DFG won't treat them as variables, i.e. they have to be recovered manually.
+    VirtualRegister argumentCountRegister; // Only set when we inline a varargs call.
+    
+    // There is really no good notion of a "default" set of values for
+    // InlineCallFrame's fields. This constructor is here just to reduce confusion if
+    // we forgot to initialize explicitly.
+    InlineCallFrame()
+        : stackOffset(0)
+        , kind(Call)
+        , isClosureCall(false)
+    {
+    }
+    
+    bool isVarargs() const
+    {
+        return isVarargs(static_cast(kind));
+    }
+
+    CodeSpecializationKind specializationKind() const { return specializationKindFor(static_cast(kind)); }
+
+    JSFunction* calleeConstant() const;
+    
+    // Get the callee given a machine call frame to which this InlineCallFrame belongs.
+    JSFunction* calleeForCallFrame(ExecState*) const;
+    
+    CString inferredName() const;
+    CodeBlockHash hash() const;
+    CString hashAsStringIfPossible() const;
+    
+    void setStackOffset(signed offset)
+    {
+        stackOffset = offset;
+        RELEASE_ASSERT(static_cast(stackOffset) == offset);
+    }
+
+    ptrdiff_t callerFrameOffset() const { return stackOffset * sizeof(Register) + CallFrame::callerFrameOffset(); }
+    ptrdiff_t returnPCOffset() const { return stackOffset * sizeof(Register) + CallFrame::returnPCOffset(); }
+
+    bool isStrictMode() const { return baselineCodeBlock->isStrictMode(); }
+
+    void dumpBriefFunctionInformation(PrintStream&) const;
+    void dump(PrintStream&) const;
+    void dumpInContext(PrintStream&, DumpContext*) const;
+
+    MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation);
+
+};
+
+inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
+{
+    RELEASE_ASSERT(inlineCallFrame);
+    return inlineCallFrame->baselineCodeBlock.get();
+}
+
+inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+{
+    if (codeOrigin.inlineCallFrame)
+        return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
+    return baselineCodeBlock;
+}
+
+template 
+inline void CodeOrigin::walkUpInlineStack(const Function& function)
+{
+    CodeOrigin codeOrigin = *this;
+    while (true) {
+        function(codeOrigin);
+        if (!codeOrigin.inlineCallFrame)
+            break;
+        codeOrigin = codeOrigin.inlineCallFrame->directCaller;
+    }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::InlineCallFrame::Kind);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp
index be5edb34c..402cfd06d 100644
--- a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp
@@ -26,6 +26,9 @@
 #include "config.h"
 #include "InlineCallFrameSet.h"
 
+#include "InlineCallFrame.h"
+#include "JSCInlines.h"
+
 namespace JSC {
 
 InlineCallFrameSet::InlineCallFrameSet() { }
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h
index 0a8b2e79c..6c6184173 100644
--- a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h
@@ -23,17 +23,15 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef InlineCallFrameSet_h
-#define InlineCallFrameSet_h
+#pragma once
 
 #include "CodeOrigin.h"
 #include 
-#include 
+#include 
 
 namespace JSC {
 
-class InlineCallFrameSet {
-    WTF_MAKE_NONCOPYABLE(InlineCallFrameSet);
+class InlineCallFrameSet : public RefCounted {
 public:
     InlineCallFrameSet();
     ~InlineCallFrameSet();
@@ -45,12 +43,9 @@ public:
     typedef Bag::iterator iterator;
     iterator begin() { return m_frames.begin(); }
     iterator end() { return m_frames.end(); }
-    
+
 private:
     Bag m_frames;
 };
 
 } // namespace JSC
-
-#endif // InlineCallFrameSet_h
-
diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h
index 00bd8155b..a86739f47 100644
--- a/Source/JavaScriptCore/bytecode/Instruction.h
+++ b/Source/JavaScriptCore/bytecode/Instruction.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -26,15 +26,18 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef Instruction_h
-#define Instruction_h
+#pragma once
 
+#include "BasicBlockLocation.h"
 #include "MacroAssembler.h"
-#include "Opcode.h"
+#include "PutByIdFlags.h"
+#include "SymbolTable.h"
+#include "TypeLocation.h"
 #include "PropertySlot.h"
 #include "SpecialPointer.h"
 #include "Structure.h"
 #include "StructureChain.h"
+#include "ToThisStatus.h"
 #include "VirtualRegister.h"
 #include 
 
@@ -43,10 +46,16 @@ namespace JSC {
 class ArrayAllocationProfile;
 class ArrayProfile;
 class ObjectAllocationProfile;
-class VariableWatchpointSet;
+class WatchpointSet;
 struct LLIntCallLinkInfo;
 struct ValueProfile;
 
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+typedef void* Opcode;
+#else
+typedef OpcodeID Opcode;
+#endif
+
 struct Instruction {
     Instruction()
     {
@@ -70,6 +79,18 @@ struct Instruction {
         u.jsCell.clear();
         u.operand = operand;
     }
+    Instruction(unsigned unsignedValue)
+    {
+        // We have to initialize one of the pointer members to ensure that
+        // the entire struct is initialized in 64-bit.
+        u.jsCell.clear();
+        u.unsignedValue = unsignedValue;
+    }
+
+    Instruction(PutByIdFlags flags)
+    {
+        u.putByIdFlags = flags;
+    }
 
     Instruction(VM& vm, JSCell* owner, Structure* structure)
     {
@@ -94,30 +115,36 @@ struct Instruction {
     Instruction(ArrayProfile* profile) { u.arrayProfile = profile; }
     Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; }
     Instruction(ObjectAllocationProfile* profile) { u.objectAllocationProfile = profile; }
-    Instruction(WriteBarrier* registerPointer) { u.registerPointer = registerPointer; }
+    Instruction(WriteBarrier* variablePointer) { u.variablePointer = variablePointer; }
     Instruction(Special::Pointer pointer) { u.specialPointer = pointer; }
-    Instruction(StringImpl* uid) { u.uid = uid; }
+    Instruction(UniquedStringImpl* uid) { u.uid = uid; }
     Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; }
 
     union {
         Opcode opcode;
         int operand;
+        unsigned unsignedValue;
         WriteBarrierBase structure;
+        StructureID structureID;
+        WriteBarrierBase symbolTable;
         WriteBarrierBase structureChain;
         WriteBarrierBase jsCell;
-        WriteBarrier* registerPointer;
+        WriteBarrier* variablePointer;
         Special::Pointer specialPointer;
         PropertySlot::GetValueFunc getterFunc;
         LLIntCallLinkInfo* callLinkInfo;
-        StringImpl* uid;
+        UniquedStringImpl* uid;
         ValueProfile* profile;
         ArrayProfile* arrayProfile;
         ArrayAllocationProfile* arrayAllocationProfile;
         ObjectAllocationProfile* objectAllocationProfile;
-        VariableWatchpointSet* watchpointSet;
-        WriteBarrierBase activation;
+        WatchpointSet* watchpointSet;
         void* pointer;
         bool* predicatePointer;
+        ToThisStatus toThisStatus;
+        TypeLocation* location;
+        BasicBlockLocation* basicBlockLocation;
+        PutByIdFlags putByIdFlags;
     } u;
         
 private:
@@ -132,5 +159,3 @@ namespace WTF {
 template<> struct VectorTraits : VectorTraitsBase { };
 
 } // namespace WTF
-
-#endif // Instruction_h
diff --git a/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h b/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h
new file mode 100644
index 000000000..a4865233b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSGlobalObject.h"
+#include "ObjectPrototype.h"
+#include "SlotVisitor.h"
+#include "WriteBarrier.h"
+
+namespace JSC {
+
+class InternalFunctionAllocationProfile {
+public:
+    Structure* structure() { return m_structure.get(); }
+    Structure* createAllocationStructureFromBase(VM&, JSGlobalObject*, JSCell* owner, JSObject* prototype, Structure* base);
+
+    void clear() { m_structure.clear(); }
+    void visitAggregate(SlotVisitor& visitor) { visitor.append(m_structure); }
+
+private:
+    WriteBarrier m_structure;
+};
+
+inline Structure* InternalFunctionAllocationProfile::createAllocationStructureFromBase(VM& vm, JSGlobalObject* globalObject, JSCell* owner, JSObject* prototype, Structure* baseStructure)
+{
+    ASSERT(!m_structure || m_structure.get()->classInfo() != baseStructure->classInfo());
+
+    Structure* structure;
+    if (prototype == baseStructure->storedPrototype())
+        structure = baseStructure;
+    else
+        structure = vm.prototypeMap.emptyStructureForPrototypeFromBaseStructure(globalObject, prototype, baseStructure);
+
+    // Ensure that if another thread sees the structure, it will see it properly created.
+    WTF::storeStoreFence();
+
+    m_structure.set(vm, owner, structure);
+    return m_structure.get();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.cpp b/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.cpp
new file mode 100644
index 000000000..92d6a5580
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "IntrinsicGetterAccessCase.h"
+
+#if ENABLE(JIT)
+
+#include "HeapInlines.h"
+
+namespace JSC {
+
+IntrinsicGetterAccessCase::IntrinsicGetterAccessCase(VM& vm, JSCell* owner, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, JSFunction* intrinsicFunction)
+    : Base(vm, owner, IntrinsicGetter, offset, structure, conditionSet)
+{
+    m_intrinsicFunction.set(vm, owner, intrinsicFunction);
+}
+
+std::unique_ptr IntrinsicGetterAccessCase::create(VM& vm, JSCell* owner, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, JSFunction* intrinsicFunction)
+{
+    return std::unique_ptr(new IntrinsicGetterAccessCase(vm, owner, offset, structure, conditionSet, intrinsicFunction));
+}
+
+IntrinsicGetterAccessCase::~IntrinsicGetterAccessCase()
+{
+}
+
+std::unique_ptr IntrinsicGetterAccessCase::clone() const
+{
+    std::unique_ptr result(new IntrinsicGetterAccessCase(*this));
+    result->resetState();
+    return WTFMove(result);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.h b/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.h
new file mode 100644
index 000000000..1021c18e6
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "AccessCase.h"
+
+namespace JSC {
+
+class IntrinsicGetterAccessCase : public AccessCase {
+public:
+    typedef AccessCase Base;
+    friend class AccessCase;
+
+    JSFunction* intrinsicFunction() const { return m_intrinsicFunction.get(); }
+    Intrinsic intrinsic() const { return m_intrinsicFunction->intrinsic(); }
+
+    static bool canEmitIntrinsicGetter(JSFunction*, Structure*);
+    void emitIntrinsicGetter(AccessGenerationState&);
+
+    static std::unique_ptr create(VM&, JSCell*, PropertyOffset, Structure*, const ObjectPropertyConditionSet&, JSFunction* intrinsicFunction);
+
+    std::unique_ptr clone() const override;
+
+    ~IntrinsicGetterAccessCase();
+
+private:
+    IntrinsicGetterAccessCase(VM&, JSCell*, PropertyOffset, Structure*, const ObjectPropertyConditionSet&, JSFunction* intrinsicFunction);
+
+    WriteBarrier m_intrinsicFunction;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/JumpTable.cpp b/Source/JavaScriptCore/bytecode/JumpTable.cpp
index ef7098b65..e22ad03c9 100644
--- a/Source/JavaScriptCore/bytecode/JumpTable.cpp
+++ b/Source/JavaScriptCore/bytecode/JumpTable.cpp
@@ -11,7 +11,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
diff --git a/Source/JavaScriptCore/bytecode/JumpTable.h b/Source/JavaScriptCore/bytecode/JumpTable.h
index 55d6855a5..333542517 100644
--- a/Source/JavaScriptCore/bytecode/JumpTable.h
+++ b/Source/JavaScriptCore/bytecode/JumpTable.h
@@ -11,7 +11,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -27,8 +27,7 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef JumpTable_h
-#define JumpTable_h
+#pragma once
 
 #include "MacroAssembler.h"
 #include 
@@ -94,6 +93,12 @@ namespace JSC {
         }
 
 #if ENABLE(JIT)
+        void ensureCTITable()
+        {
+            ASSERT(ctiOffsets.isEmpty() || ctiOffsets.size() == branchOffsets.size());
+            ctiOffsets.grow(branchOffsets.size());
+        }
+        
         inline CodeLocationLabel ctiForValue(int32_t value)
         {
             if (value >= min && static_cast(value - min) < ctiOffsets.size())
@@ -112,5 +117,3 @@ namespace JSC {
     };
 
 } // namespace JSC
-
-#endif // JumpTable_h
diff --git a/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
index bfb951018..c2cf4d1dc 100644
--- a/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
+++ b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef LLIntCallLinkInfo_h
-#define LLIntCallLinkInfo_h
+#pragma once
 
 #include "JSFunction.h"
 #include "MacroAssemblerCodeRef.h"
@@ -45,7 +44,7 @@ struct LLIntCallLinkInfo : public BasicRawSentinelNode {
             remove();
     }
     
-    bool isLinked() { return callee; }
+    bool isLinked() { return !!callee; }
     
     void unlink()
     {
@@ -61,6 +60,3 @@ struct LLIntCallLinkInfo : public BasicRawSentinelNode {
 };
 
 } // namespace JSC
-
-#endif // LLIntCallLinkInfo_h
-
diff --git a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp
new file mode 100644
index 000000000..9a5ac0112
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
+
+#include "CodeBlock.h"
+#include "Instruction.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+LLIntPrototypeLoadAdaptiveStructureWatchpoint::LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition& key, Instruction* getByIdInstruction)
+    : m_key(key)
+    , m_getByIdInstruction(getByIdInstruction)
+{
+    RELEASE_ASSERT(key.watchingRequiresStructureTransitionWatchpoint());
+    RELEASE_ASSERT(!key.watchingRequiresReplacementWatchpoint());
+}
+
+void LLIntPrototypeLoadAdaptiveStructureWatchpoint::install()
+{
+    RELEASE_ASSERT(m_key.isWatchable());
+
+    m_key.object()->structure()->addTransitionWatchpoint(this);
+}
+
+void LLIntPrototypeLoadAdaptiveStructureWatchpoint::fireInternal(const FireDetail& detail)
+{
+    if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+        install();
+        return;
+    }
+
+    StringPrintStream out;
+    out.print("ObjectToStringValue Adaptation of ", m_key, " failed: ", detail);
+
+    StringFireDetail stringDetail(out.toCString().data());
+
+    CodeBlock::clearLLIntGetByIdCache(m_getByIdInstruction);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h
new file mode 100644
index 000000000..8a73c6c79
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Instruction.h"
+#include "ObjectPropertyCondition.h"
+#include "Watchpoint.h"
+
+namespace JSC {
+
+class LLIntPrototypeLoadAdaptiveStructureWatchpoint : public Watchpoint {
+public:
+    LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, Instruction*);
+
+    void install();
+
+protected:
+    void fireInternal(const FireDetail&) override;
+
+private:
+    ObjectPropertyCondition m_key;
+    Instruction* m_getByIdInstruction;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
index a8ad779ac..0929d6fb4 100644
--- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
@@ -26,14 +26,14 @@
 #include "config.h"
 #include "LazyOperandValueProfile.h"
 
-#include "Operations.h"
+#include "JSCInlines.h"
 
 namespace JSC {
 
 CompressedLazyOperandValueProfileHolder::CompressedLazyOperandValueProfileHolder() { }
 CompressedLazyOperandValueProfileHolder::~CompressedLazyOperandValueProfileHolder() { }
 
-void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions(const ConcurrentJITLocker& locker)
+void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions(const ConcurrentJSLocker& locker)
 {
     if (!m_data)
         return;
@@ -43,10 +43,10 @@ void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions(const Co
 }
 
 LazyOperandValueProfile* CompressedLazyOperandValueProfileHolder::add(
-    const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key)
+    const ConcurrentJSLocker&, const LazyOperandValueProfileKey& key)
 {
     if (!m_data)
-        m_data = adoptPtr(new LazyOperandValueProfile::List());
+        m_data = std::make_unique();
     else {
         for (unsigned i = 0; i < m_data->size(); ++i) {
             if (m_data->at(i).key() == key)
@@ -62,7 +62,7 @@ LazyOperandValueProfileParser::LazyOperandValueProfileParser() { }
 LazyOperandValueProfileParser::~LazyOperandValueProfileParser() { }
 
 void LazyOperandValueProfileParser::initialize(
-    const ConcurrentJITLocker&, CompressedLazyOperandValueProfileHolder& holder)
+    const ConcurrentJSLocker&, CompressedLazyOperandValueProfileHolder& holder)
 {
     ASSERT(m_map.isEmpty());
     
@@ -87,7 +87,7 @@ LazyOperandValueProfile* LazyOperandValueProfileParser::getIfPresent(
 }
 
 SpeculatedType LazyOperandValueProfileParser::prediction(
-    const ConcurrentJITLocker& locker, const LazyOperandValueProfileKey& key) const
+    const ConcurrentJSLocker& locker, const LazyOperandValueProfileKey& key) const
 {
     LazyOperandValueProfile* profile = getIfPresent(key);
     if (!profile)
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
index 95ef941cd..9c3b06842 100644
--- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
@@ -23,15 +23,13 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef LazyOperandValueProfile_h
-#define LazyOperandValueProfile_h
+#pragma once
 
-#include "ConcurrentJITLock.h"
+#include "ConcurrentJSLock.h"
 #include "ValueProfile.h"
 #include "VirtualRegister.h"
 #include 
 #include 
-#include 
 #include 
 
 namespace JSC {
@@ -154,14 +152,14 @@ public:
     CompressedLazyOperandValueProfileHolder();
     ~CompressedLazyOperandValueProfileHolder();
     
-    void computeUpdatedPredictions(const ConcurrentJITLocker&);
+    void computeUpdatedPredictions(const ConcurrentJSLocker&);
     
     LazyOperandValueProfile* add(
-        const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key);
+        const ConcurrentJSLocker&, const LazyOperandValueProfileKey& key);
     
 private:
     friend class LazyOperandValueProfileParser;
-    OwnPtr m_data;
+    std::unique_ptr m_data;
 };
 
 class LazyOperandValueProfileParser {
@@ -171,19 +169,15 @@ public:
     ~LazyOperandValueProfileParser();
     
     void initialize(
-        const ConcurrentJITLocker&, CompressedLazyOperandValueProfileHolder& holder);
+        const ConcurrentJSLocker&, CompressedLazyOperandValueProfileHolder& holder);
     
     LazyOperandValueProfile* getIfPresent(
         const LazyOperandValueProfileKey& key) const;
     
     SpeculatedType prediction(
-        const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key) const;
+        const ConcurrentJSLocker&, const LazyOperandValueProfileKey& key) const;
 private:
     HashMap m_map;
 };
 
 } // namespace JSC
-
-#endif // LazyOperandValueProfile_h
-
-
diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
index 1ac5bb5a0..f479e5f85 100644
--- a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,7 +28,10 @@
 
 #if ENABLE(DFG_JIT)
 
+#include "ArithProfile.h"
+#include "CCallHelpers.h"
 #include "CodeBlock.h"
+#include "JSCInlines.h"
 
 namespace JSC {
 
@@ -43,28 +46,32 @@ MethodOfGettingAValueProfile MethodOfGettingAValueProfile::fromLazyOperand(
     return result;
 }
 
-EncodedJSValue* MethodOfGettingAValueProfile::getSpecFailBucket(unsigned index) const
+void MethodOfGettingAValueProfile::emitReportValue(CCallHelpers& jit, JSValueRegs regs) const
 {
     switch (m_kind) {
     case None:
-        return 0;
+        return;
         
     case Ready:
-        return u.profile->specFailBucket(index);
+        jit.storeValue(regs, u.profile->specFailBucket(0));
+        return;
         
     case LazyOperand: {
         LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand));
         
-        ConcurrentJITLocker locker(u.lazyOperand.codeBlock->m_lock);
+        ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
         LazyOperandValueProfile* profile =
             u.lazyOperand.codeBlock->lazyOperandValueProfiles().add(locker, key);
-        return profile->specFailBucket(index);
+        jit.storeValue(regs, profile->specFailBucket(0));
+        return;
     }
         
-    default:
-        RELEASE_ASSERT_NOT_REACHED();
-        return 0;
-    }
+    case ArithProfileReady: {
+        u.arithProfile->emitObserveResult(jit, regs, DoNotHaveTagRegisters);
+        return;
+    } }
+    
+    RELEASE_ASSERT_NOT_REACHED();
 }
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
index c6fe6c5f0..98e39db1d 100644
--- a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,10 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef MethodOfGettingAValueProfile_h
-#define MethodOfGettingAValueProfile_h
-
-#include 
+#pragma once
 
 // This is guarded by ENABLE_DFG_JIT only because it uses some value profiles
 // that are currently only used if the DFG is enabled (i.e. they are not
@@ -34,12 +31,15 @@
 // these #if's will disappear...
 #if ENABLE(DFG_JIT)
 
+#include "GPRInfo.h"
 #include "JSCJSValue.h"
 
 namespace JSC {
 
+class CCallHelpers;
 class CodeBlock;
 class LazyOperandValueProfileKey;
+struct ArithProfile;
 struct ValueProfile;
 
 class MethodOfGettingAValueProfile {
@@ -49,7 +49,7 @@ public:
     {
     }
     
-    explicit MethodOfGettingAValueProfile(ValueProfile* profile)
+    MethodOfGettingAValueProfile(ValueProfile* profile)
     {
         if (profile) {
             m_kind = Ready;
@@ -58,31 +58,34 @@ public:
             m_kind = None;
     }
     
+    MethodOfGettingAValueProfile(ArithProfile* profile)
+    {
+        if (profile) {
+            m_kind = ArithProfileReady;
+            u.arithProfile = profile;
+        } else
+            m_kind = None;
+    }
+    
     static MethodOfGettingAValueProfile fromLazyOperand(
         CodeBlock*, const LazyOperandValueProfileKey&);
     
-    bool operator!() const { return m_kind == None; }
-    
-    // This logically has a pointer to a "There exists X such that
-    // ValueProfileBase". But since C++ does not have existential
-    // templates, I cannot return it. So instead, for any methods that
-    // users of this class would like to call, we'll just have to provide
-    // a method here that does it through an indirection. Or we could
-    // possibly just make ValueProfile less template-based. But last I
-    // tried that, it felt more yucky than this class.
+    explicit operator bool() const { return m_kind != None; }
     
-    EncodedJSValue* getSpecFailBucket(unsigned index) const;
+    void emitReportValue(CCallHelpers&, JSValueRegs) const;
     
 private:
     enum Kind {
         None,
         Ready,
+        ArithProfileReady,
         LazyOperand
     };
     
     Kind m_kind;
     union {
         ValueProfile* profile;
+        ArithProfile* arithProfile;
         struct {
             CodeBlock* codeBlock;
             unsigned bytecodeOffset;
@@ -94,6 +97,3 @@ private:
 } // namespace JSC
 
 #endif // ENABLE(DFG_JIT)
-
-#endif // MethodOfGettingAValueProfile_h
-
diff --git a/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.cpp b/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.cpp
new file mode 100644
index 000000000..3c168c6c9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2017 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ModuleNamespaceAccessCase.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "HeapInlines.h"
+#include "JSModuleEnvironment.h"
+#include "JSModuleNamespaceObject.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+
+namespace JSC {
+
+ModuleNamespaceAccessCase::ModuleNamespaceAccessCase(VM& vm, JSCell* owner, JSModuleNamespaceObject* moduleNamespaceObject, JSModuleEnvironment* moduleEnvironment, ScopeOffset scopeOffset)
+    : Base(vm, owner, ModuleNamespaceLoad, invalidOffset, nullptr, ObjectPropertyConditionSet())
+    , m_scopeOffset(scopeOffset)
+{
+    m_moduleNamespaceObject.set(vm, owner, moduleNamespaceObject);
+    m_moduleEnvironment.set(vm, owner, moduleEnvironment);
+}
+
+std::unique_ptr ModuleNamespaceAccessCase::create(VM& vm, JSCell* owner, JSModuleNamespaceObject* moduleNamespaceObject, JSModuleEnvironment* moduleEnvironment, ScopeOffset scopeOffset)
+{
+    return std::unique_ptr(new ModuleNamespaceAccessCase(vm, owner, moduleNamespaceObject, moduleEnvironment, scopeOffset));
+}
+
+ModuleNamespaceAccessCase::~ModuleNamespaceAccessCase()
+{
+}
+
+std::unique_ptr ModuleNamespaceAccessCase::clone() const
+{
+    std::unique_ptr result(new ModuleNamespaceAccessCase(*this));
+    result->resetState();
+    return WTFMove(result);
+}
+
+void ModuleNamespaceAccessCase::emit(AccessGenerationState& state, MacroAssembler::JumpList& fallThrough)
+{
+    CCallHelpers& jit = *state.jit;
+    JSValueRegs valueRegs = state.valueRegs;
+    GPRReg baseGPR = state.baseGPR;
+
+    fallThrough.append(
+        jit.branchPtr(
+            CCallHelpers::NotEqual,
+            baseGPR,
+            CCallHelpers::TrustedImmPtr(m_moduleNamespaceObject.get())));
+
+    jit.loadValue(&m_moduleEnvironment->variableAt(m_scopeOffset), valueRegs);
+    state.failAndIgnore.append(jit.branchIfEmpty(valueRegs));
+    state.succeed();
+}
+
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.h b/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.h
new file mode 100644
index 000000000..333075f2e
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "AccessCase.h"
+
+namespace JSC {
+
+class JSModuleEnvironment;
+class JSModuleNamespaceObject;
+
+class ModuleNamespaceAccessCase : public AccessCase {
+public:
+    using Base = AccessCase;
+    friend class AccessCase;
+
+    JSModuleNamespaceObject* moduleNamespaceObject() const { return m_moduleNamespaceObject.get(); }
+    JSModuleEnvironment* moduleEnvironment() const { return m_moduleEnvironment.get(); }
+    ScopeOffset scopeOffset() const { return m_scopeOffset; }
+
+    static std::unique_ptr create(VM&, JSCell* owner, JSModuleNamespaceObject*, JSModuleEnvironment*, ScopeOffset);
+
+    std::unique_ptr clone() const override;
+
+    void emit(AccessGenerationState&, MacroAssembler::JumpList& fallThrough);
+
+    ~ModuleNamespaceAccessCase();
+
+private:
+    ModuleNamespaceAccessCase(VM&, JSCell* owner, JSModuleNamespaceObject*, JSModuleEnvironment*, ScopeOffset);
+
+    WriteBarrier m_moduleNamespaceObject;
+    WriteBarrier m_moduleEnvironment;
+    ScopeOffset m_scopeOffset;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.cpp b/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.cpp
new file mode 100644
index 000000000..3d54c3ac8
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ModuleProgramCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo ModuleProgramCodeBlock::s_info = {
+    "ModuleProgramCodeBlock", &Base::s_info, 0,
+    CREATE_METHOD_TABLE(ModuleProgramCodeBlock)
+};
+
+void ModuleProgramCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~ModuleProgramCodeBlock();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.h b/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.h
new file mode 100644
index 000000000..62674ea92
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GlobalCodeBlock.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+
+namespace JSC {
+
+class ModuleProgramCodeBlock : public GlobalCodeBlock {
+public:
+    typedef GlobalCodeBlock Base;
+    DECLARE_INFO;
+
+    static ModuleProgramCodeBlock* create(VM* vm, CopyParsedBlockTag, ModuleProgramCodeBlock& other)
+    {
+        ModuleProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            ModuleProgramCodeBlock(vm, vm->moduleProgramCodeBlockStructure.get(), CopyParsedBlock, other);
+        instance->finishCreation(*vm, CopyParsedBlock, other);
+        return instance;
+    }
+
+    static ModuleProgramCodeBlock* create(VM* vm, ModuleProgramExecutable* ownerExecutable, UnlinkedModuleProgramCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, RefPtr&& sourceProvider, unsigned firstLineColumnOffset)
+    {
+        ModuleProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            ModuleProgramCodeBlock(vm, vm->moduleProgramCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), firstLineColumnOffset);
+        instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+        return instance;
+    }
+
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+    {
+        return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+    }
+
+private:
+    ModuleProgramCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, ModuleProgramCodeBlock& other)
+        : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+    {
+    }
+
+    ModuleProgramCodeBlock(VM* vm, Structure* structure, ModuleProgramExecutable* ownerExecutable, UnlinkedModuleProgramCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, RefPtr&& sourceProvider, unsigned firstLineColumnOffset)
+        : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), 0, firstLineColumnOffset)
+    {
+    }
+
+    static void destroy(JSCell*);
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
index 9a9db0bc7..301a3580c 100644
--- a/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
+++ b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ObjectAllocationProfile_h
-#define ObjectAllocationProfile_h
+#pragma once
 
 #include "VM.h"
 #include "JSGlobalObject.h"
@@ -39,18 +38,21 @@ class ObjectAllocationProfile {
 public:
     static ptrdiff_t offsetOfAllocator() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_allocator); }
     static ptrdiff_t offsetOfStructure() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_structure); }
+    static ptrdiff_t offsetOfInlineCapacity() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_inlineCapacity); }
 
     ObjectAllocationProfile()
         : m_allocator(0)
+        , m_inlineCapacity(0)
     {
     }
 
-    bool isNull() { return !m_allocator; }
+    bool isNull() { return !m_structure; }
 
-    void initialize(VM& vm, JSCell* owner, JSObject* prototype, unsigned inferredInlineCapacity)
+    void initialize(VM& vm, JSGlobalObject* globalObject, JSCell* owner, JSObject* prototype, unsigned inferredInlineCapacity)
     {
         ASSERT(!m_allocator);
         ASSERT(!m_structure);
+        ASSERT(!m_inlineCapacity);
 
         unsigned inlineCapacity = 0;
         if (inferredInlineCapacity < JSFinalObject::defaultInlineCapacity()) {
@@ -80,33 +82,46 @@ public:
         ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity());
 
         size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
-        MarkedAllocator* allocator = &vm.heap.allocatorForObjectWithoutDestructor(allocationSize);
-        ASSERT(allocator->cellSize());
-
+        MarkedAllocator* allocator = vm.cellSpace.allocatorFor(allocationSize);
+        
         // Take advantage of extra inline capacity available in the size class.
-        size_t slop = (allocator->cellSize() - allocationSize) / sizeof(WriteBarrier);
-        inlineCapacity += slop;
-        if (inlineCapacity > JSFinalObject::maxInlineCapacity())
-            inlineCapacity = JSFinalObject::maxInlineCapacity();
+        if (allocator) {
+            size_t slop = (allocator->cellSize() - allocationSize) / sizeof(WriteBarrier);
+            inlineCapacity += slop;
+            if (inlineCapacity > JSFinalObject::maxInlineCapacity())
+                inlineCapacity = JSFinalObject::maxInlineCapacity();
+        }
+
+        Structure* structure = vm.prototypeMap.emptyObjectStructureForPrototype(globalObject, prototype, inlineCapacity);
+
+        // Ensure that if another thread sees the structure, it will see it properly created
+        WTF::storeStoreFence();
 
         m_allocator = allocator;
-        m_structure.set(vm, owner,
-            vm.prototypeMap.emptyObjectStructureForPrototype(prototype, inlineCapacity));
+        m_structure.set(vm, owner, structure);
+        m_inlineCapacity = inlineCapacity;
     }
 
-    Structure* structure() { return m_structure.get(); }
-    unsigned inlineCapacity() { return m_structure->inlineCapacity(); }
+    Structure* structure()
+    {
+        Structure* structure = m_structure.get();
+        // Ensure that if we see the structure, it has been properly created
+        WTF::loadLoadFence();
+        return structure;
+    }
+    unsigned inlineCapacity() { return m_inlineCapacity; }
 
     void clear()
     {
         m_allocator = 0;
         m_structure.clear();
+        m_inlineCapacity = 0;
         ASSERT(isNull());
     }
 
     void visitAggregate(SlotVisitor& visitor)
     {
-        visitor.append(&m_structure);
+        visitor.append(m_structure);
     }
 
 private:
@@ -117,14 +132,14 @@ private:
             return 0;
 
         size_t count = 0;
-        PropertyNameArray propertyNameArray(&vm);
-        prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, ExcludeDontEnumProperties);
+        PropertyNameArray propertyNameArray(&vm, PropertyNameMode::StringsAndSymbols);
+        prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, EnumerationMode());
         PropertyNameArrayData::PropertyNameVector& propertyNameVector = propertyNameArray.data()->propertyNameVector();
         for (size_t i = 0; i < propertyNameVector.size(); ++i) {
             JSValue value = prototype->getDirect(vm, propertyNameVector[i]);
 
             // Functions are common, and are usually class-level objects that are not overridden.
-            if (jsDynamicCast(value))
+            if (jsDynamicCast(vm, value))
                 continue;
 
             ++count;
@@ -135,8 +150,7 @@ private:
 
     MarkedAllocator* m_allocator; // Precomputed to make things easier for generated code.
     WriteBarrier m_structure;
+    unsigned m_inlineCapacity;
 };
 
 } // namespace JSC
-
-#endif // ObjectAllocationProfile_h
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp
new file mode 100644
index 000000000..3aad09409
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ObjectPropertyCondition.h"
+
+#include "JSCInlines.h"
+#include "TrackedReferences.h"
+
+namespace JSC {
+
+void ObjectPropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    if (!*this) {
+        out.print("");
+        return;
+    }
+    
+    out.print("<", inContext(JSValue(m_object), context), ": ", inContext(m_condition, context), ">");
+}
+
+void ObjectPropertyCondition::dump(PrintStream& out) const
+{
+    dumpInContext(out, nullptr);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint(
+    Structure* structure) const
+{
+    return m_condition.isStillValidAssumingImpurePropertyWatchpoint(structure);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint() const
+{
+    if (!*this)
+        return false;
+    
+    return structureEnsuresValidityAssumingImpurePropertyWatchpoint(m_object->structure());
+}
+
+bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const
+{
+    return m_condition.validityRequiresImpurePropertyWatchpoint(structure);
+}
+
+bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint() const
+{
+    if (!*this)
+        return false;
+    
+    return validityRequiresImpurePropertyWatchpoint(m_object->structure());
+}
+
+bool ObjectPropertyCondition::isStillValidAssumingImpurePropertyWatchpoint(Structure* structure) const
+{
+    return m_condition.isStillValidAssumingImpurePropertyWatchpoint(structure, m_object);
+}
+
+bool ObjectPropertyCondition::isStillValidAssumingImpurePropertyWatchpoint() const
+{
+    if (!*this)
+        return false;
+
+    return isStillValidAssumingImpurePropertyWatchpoint(m_object->structure());
+}
+
+
+bool ObjectPropertyCondition::isStillValid(Structure* structure) const
+{
+    return m_condition.isStillValid(structure, m_object);
+}
+
+bool ObjectPropertyCondition::isStillValid() const
+{
+    if (!*this)
+        return false;
+    
+    return isStillValid(m_object->structure());
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidity(Structure* structure) const
+{
+    return m_condition.isStillValid(structure);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidity() const
+{
+    if (!*this)
+        return false;
+    
+    return structureEnsuresValidity(m_object->structure());
+}
+
+bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+    Structure* structure, PropertyCondition::WatchabilityEffort effort) const
+{
+    return m_condition.isWatchableAssumingImpurePropertyWatchpoint(structure, m_object, effort);
+}
+
+bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+    PropertyCondition::WatchabilityEffort effort) const
+{
+    if (!*this)
+        return false;
+    
+    return isWatchableAssumingImpurePropertyWatchpoint(m_object->structure(), effort);
+}
+
+bool ObjectPropertyCondition::isWatchable(
+    Structure* structure, PropertyCondition::WatchabilityEffort effort) const
+{
+    return m_condition.isWatchable(structure, m_object, effort);
+}
+
+bool ObjectPropertyCondition::isWatchable(PropertyCondition::WatchabilityEffort effort) const
+{
+    if (!*this)
+        return false;
+    
+    return isWatchable(m_object->structure(), effort);
+}
+
+bool ObjectPropertyCondition::isStillLive() const
+{
+    if (!*this)
+        return false;
+    
+    if (!Heap::isMarked(m_object))
+        return false;
+    
+    return m_condition.isStillLive();
+}
+
+void ObjectPropertyCondition::validateReferences(const TrackedReferences& tracked) const
+{
+    if (!*this)
+        return;
+    
+    tracked.check(m_object);
+    m_condition.validateReferences(tracked);
+}
+
+ObjectPropertyCondition ObjectPropertyCondition::attemptToMakeEquivalenceWithoutBarrier(VM& vm) const
+{
+    PropertyCondition result = condition().attemptToMakeEquivalenceWithoutBarrier(vm, object());
+    if (!result)
+        return ObjectPropertyCondition();
+    return ObjectPropertyCondition(object(), result);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h
new file mode 100644
index 000000000..377e07ba2
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JSObject.h"
+#include "PropertyCondition.h"
+#include 
+
+namespace JSC {
+
+class TrackedReferences;
+
+class ObjectPropertyCondition {
+public:
+    ObjectPropertyCondition()
+        : m_object(nullptr)
+    {
+    }
+    
+    ObjectPropertyCondition(WTF::HashTableDeletedValueType token)
+        : m_object(nullptr)
+        , m_condition(token)
+    {
+    }
+    
+    ObjectPropertyCondition(JSObject* object, const PropertyCondition& condition)
+        : m_object(object)
+        , m_condition(condition)
+    {
+    }
+    
+    static ObjectPropertyCondition presenceWithoutBarrier(
+        JSObject* object, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+    {
+        ObjectPropertyCondition result;
+        result.m_object = object;
+        result.m_condition = PropertyCondition::presenceWithoutBarrier(uid, offset, attributes); 
+        return result;
+    }
+    
+    static ObjectPropertyCondition presence(
+        VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyOffset offset,
+        unsigned attributes)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return presenceWithoutBarrier(object, uid, offset, attributes);
+    }
+
+    // NOTE: The prototype is the storedPrototype, not the prototypeForLookup.
+    static ObjectPropertyCondition absenceWithoutBarrier(
+        JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        ObjectPropertyCondition result;
+        result.m_object = object;
+        result.m_condition = PropertyCondition::absenceWithoutBarrier(uid, prototype);
+        return result;
+    }
+    
+    static ObjectPropertyCondition absence(
+        VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return absenceWithoutBarrier(object, uid, prototype);
+    }
+    
+    static ObjectPropertyCondition absenceOfSetterWithoutBarrier(
+        JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        ObjectPropertyCondition result;
+        result.m_object = object;
+        result.m_condition = PropertyCondition::absenceOfSetterWithoutBarrier(uid, prototype);
+        return result;
+    }
+    
+    static ObjectPropertyCondition absenceOfSetter(
+        VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return absenceOfSetterWithoutBarrier(object, uid, prototype);
+    }
+    
+    static ObjectPropertyCondition equivalenceWithoutBarrier(
+        JSObject* object, UniquedStringImpl* uid, JSValue value)
+    {
+        ObjectPropertyCondition result;
+        result.m_object = object;
+        result.m_condition = PropertyCondition::equivalenceWithoutBarrier(uid, value);
+        return result;
+    }
+    
+    static ObjectPropertyCondition equivalence(
+        VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSValue value)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return equivalenceWithoutBarrier(object, uid, value);
+    }
+
+    explicit operator bool() const { return !!m_condition; }
+    
+    JSObject* object() const { return m_object; }
+    PropertyCondition condition() const { return m_condition; }
+    
+    PropertyCondition::Kind kind() const { return condition().kind(); }
+    UniquedStringImpl* uid() const { return condition().uid(); }
+    bool hasOffset() const { return condition().hasOffset(); }
+    PropertyOffset offset() const { return condition().offset(); }
+    unsigned hasAttributes() const { return condition().hasAttributes(); }
+    unsigned attributes() const { return condition().attributes(); }
+    bool hasPrototype() const { return condition().hasPrototype(); }
+    JSObject* prototype() const { return condition().prototype(); }
+    bool hasRequiredValue() const { return condition().hasRequiredValue(); }
+    JSValue requiredValue() const { return condition().requiredValue(); }
+    
+    void dumpInContext(PrintStream&, DumpContext*) const;
+    void dump(PrintStream&) const;
+    
+    unsigned hash() const
+    {
+        return WTF::PtrHash::hash(m_object) ^ m_condition.hash();
+    }
+    
+    bool operator==(const ObjectPropertyCondition& other) const
+    {
+        return m_object == other.m_object
+            && m_condition == other.m_condition;
+    }
+    
+    bool isHashTableDeletedValue() const
+    {
+        return !m_object && m_condition.isHashTableDeletedValue();
+    }
+    
+    // Two conditions are compatible if they are identical or if they speak of different uids or
+    // different objects. If false is returned, you have to decide how to resolve the conflict -
+    // for example if there is a Presence and an Equivalence then in some cases you'll want the
+    // more general of the two while in other cases you'll want the more specific of the two. This
+    // will also return false for contradictions, like Presence and Absence on the same
+    // object/uid. By convention, invalid conditions aren't compatible with anything.
+    bool isCompatibleWith(const ObjectPropertyCondition& other) const
+    {
+        if (!*this || !other)
+            return false;
+        return *this == other || uid() != other.uid() || object() != other.object();
+    }
+    
+    // These validity-checking methods can optionally take a Struture* instead of loading the
+    // Structure* from the object. If you're in the concurrent JIT, then you must use the forms
+    // that take an explicit Structure* because you want the compiler to optimize for the same
+    // structure that you validated (i.e. avoid a TOCTOU race).
+    
+    // Checks if the object's structure claims that the property won't be intercepted. Validity
+    // does not require watchpoints on the object.
+    bool structureEnsuresValidityAssumingImpurePropertyWatchpoint(Structure*) const;
+    bool structureEnsuresValidityAssumingImpurePropertyWatchpoint() const;
+    
+    // Returns true if we need an impure property watchpoint to ensure validity even if
+    // isStillValidAccordingToStructure() returned true.
+    bool validityRequiresImpurePropertyWatchpoint(Structure*) const;
+    bool validityRequiresImpurePropertyWatchpoint() const;
+
+    // Checks if the condition still holds setting aside the need for an impure property watchpoint.
+    // Validity might still require watchpoints on the object.
+    bool isStillValidAssumingImpurePropertyWatchpoint(Structure*) const;
+    bool isStillValidAssumingImpurePropertyWatchpoint() const;
+
+    // Checks if the condition still holds. May conservatively return false, if the object and
+    // structure alone don't guarantee the condition. Note that this may return true if the
+    // condition still requires some watchpoints on the object in addition to checking the
+    // structure. If you want to check if the condition holds by using the structure alone,
+    // use structureEnsuresValidity().
+    bool isStillValid(Structure*) const;
+    bool isStillValid() const;
+    
+    // Shorthand for condition().isStillValid(structure).
+    bool structureEnsuresValidity(Structure*) const;
+    bool structureEnsuresValidity() const;
+    
+    // This means that it's still valid and we could enforce validity by setting a transition
+    // watchpoint on the structure and possibly an impure property watchpoint.
+    bool isWatchableAssumingImpurePropertyWatchpoint(
+        Structure*,
+        PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+    bool isWatchableAssumingImpurePropertyWatchpoint(
+        PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+
+    // This means that it's still valid and we could enforce validity by setting a transition
+    // watchpoint on the structure.
+    bool isWatchable(
+        Structure*,
+        PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+    bool isWatchable(
+        PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+    
+    bool watchingRequiresStructureTransitionWatchpoint() const
+    {
+        return condition().watchingRequiresStructureTransitionWatchpoint();
+    }
+    bool watchingRequiresReplacementWatchpoint() const
+    {
+        return condition().watchingRequiresReplacementWatchpoint();
+    }
+    
+    // This means that the objects involved in this are still live.
+    bool isStillLive() const;
+    
+    void validateReferences(const TrackedReferences&) const;
+
+    bool isValidValueForPresence(VM& vm, JSValue value) const
+    {
+        return condition().isValidValueForPresence(vm, value);
+    }
+
+    ObjectPropertyCondition attemptToMakeEquivalenceWithoutBarrier(VM&) const;
+
+private:
+    JSObject* m_object;
+    PropertyCondition m_condition;
+};
+
+struct ObjectPropertyConditionHash {
+    static unsigned hash(const ObjectPropertyCondition& key) { return key.hash(); }
+    static bool equal(
+        const ObjectPropertyCondition& a, const ObjectPropertyCondition& b)
+    {
+        return a == b;
+    }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::ObjectPropertyConditionHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits { };
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp
new file mode 100644
index 000000000..e2e4a8fbb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ObjectPropertyConditionSet.h"
+
+#include "JSCInlines.h"
+#include 
+
+namespace JSC {
+
+ObjectPropertyCondition ObjectPropertyConditionSet::forObject(JSObject* object) const
+{
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (condition.object() == object)
+            return condition;
+    }
+    return ObjectPropertyCondition();
+}
+
+ObjectPropertyCondition ObjectPropertyConditionSet::forConditionKind(
+    PropertyCondition::Kind kind) const
+{
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (condition.kind() == kind)
+            return condition;
+    }
+    return ObjectPropertyCondition();
+}
+
+unsigned ObjectPropertyConditionSet::numberOfConditionsWithKind(PropertyCondition::Kind kind) const
+{
+    unsigned result = 0;
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (condition.kind() == kind)
+            result++;
+    }
+    return result;
+}
+
+bool ObjectPropertyConditionSet::hasOneSlotBaseCondition() const
+{
+    return (numberOfConditionsWithKind(PropertyCondition::Presence) == 1) != (numberOfConditionsWithKind(PropertyCondition::Equivalence) == 1);
+}
+
+ObjectPropertyCondition ObjectPropertyConditionSet::slotBaseCondition() const
+{
+    ObjectPropertyCondition result;
+    unsigned numFound = 0;
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (condition.kind() == PropertyCondition::Presence
+            || condition.kind() == PropertyCondition::Equivalence) {
+            result = condition;
+            numFound++;
+        }
+    }
+    RELEASE_ASSERT(numFound == 1);
+    return result;
+}
+
+ObjectPropertyConditionSet ObjectPropertyConditionSet::mergedWith(
+    const ObjectPropertyConditionSet& other) const
+{
+    if (!isValid() || !other.isValid())
+        return invalid();
+
+    Vector result;
+    
+    if (!isEmpty())
+        result.appendVector(m_data->vector);
+    
+    for (const ObjectPropertyCondition& newCondition : other) {
+        bool foundMatch = false;
+        for (const ObjectPropertyCondition& existingCondition : *this) {
+            if (newCondition == existingCondition) {
+                foundMatch = true;
+                continue;
+            }
+            if (!newCondition.isCompatibleWith(existingCondition))
+                return invalid();
+        }
+        if (!foundMatch)
+            result.append(newCondition);
+    }
+
+    return create(result);
+}
+
+bool ObjectPropertyConditionSet::structuresEnsureValidity() const
+{
+    if (!isValid())
+        return false;
+    
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (!condition.structureEnsuresValidity())
+            return false;
+    }
+    return true;
+}
+
+bool ObjectPropertyConditionSet::structuresEnsureValidityAssumingImpurePropertyWatchpoint() const
+{
+    if (!isValid())
+        return false;
+    
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint())
+            return false;
+    }
+    return true;
+}
+
+bool ObjectPropertyConditionSet::needImpurePropertyWatchpoint() const
+{
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (condition.validityRequiresImpurePropertyWatchpoint())
+            return true;
+    }
+    return false;
+}
+
+bool ObjectPropertyConditionSet::areStillLive() const
+{
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (!condition.isStillLive())
+            return false;
+    }
+    return true;
+}
+
+void ObjectPropertyConditionSet::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    if (!isValid()) {
+        out.print("");
+        return;
+    }
+    
+    out.print("[");
+    if (m_data)
+        out.print(listDumpInContext(m_data->vector, context));
+    out.print("]");
+}
+
+void ObjectPropertyConditionSet::dump(PrintStream& out) const
+{
+    dumpInContext(out, nullptr);
+}
+
+bool ObjectPropertyConditionSet::isValidAndWatchable() const
+{
+    if (!isValid())
+        return false;
+
+    for (ObjectPropertyCondition condition : m_data->vector) {
+        if (!condition.isWatchable())
+            return false;
+    }
+    return true;
+}
+
+namespace {
+
+bool verbose = false;
+
+ObjectPropertyCondition generateCondition(
+    VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyCondition::Kind conditionKind)
+{
+    Structure* structure = object->structure();
+    if (verbose)
+        dataLog("Creating condition ", conditionKind, " for ", pointerDump(structure), "\n");
+
+    ObjectPropertyCondition result;
+    switch (conditionKind) {
+    case PropertyCondition::Presence: {
+        unsigned attributes;
+        PropertyOffset offset = structure->getConcurrently(uid, attributes);
+        if (offset == invalidOffset)
+            return ObjectPropertyCondition();
+        result = ObjectPropertyCondition::presence(vm, owner, object, uid, offset, attributes);
+        break;
+    }
+    case PropertyCondition::Absence: {
+        result = ObjectPropertyCondition::absence(
+            vm, owner, object, uid, object->structure()->storedPrototypeObject());
+        break;
+    }
+    case PropertyCondition::AbsenceOfSetter: {
+        result = ObjectPropertyCondition::absenceOfSetter(
+            vm, owner, object, uid, object->structure()->storedPrototypeObject());
+        break;
+    }
+    case PropertyCondition::Equivalence: {
+        unsigned attributes;
+        PropertyOffset offset = structure->getConcurrently(uid, attributes);
+        if (offset == invalidOffset)
+            return ObjectPropertyCondition();
+        JSValue value = object->getDirect(offset);
+        result = ObjectPropertyCondition::equivalence(vm, owner, object, uid, value);
+        break;
+    }
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return ObjectPropertyCondition();
+    }
+
+    if (!result.isStillValidAssumingImpurePropertyWatchpoint()) {
+        if (verbose)
+            dataLog("Failed to create condition: ", result, "\n");
+        return ObjectPropertyCondition();
+    }
+
+    if (verbose)
+        dataLog("New condition: ", result, "\n");
+    return result;
+}
+
+enum Concurrency {
+    MainThread,
+    Concurrent
+};
+template
+ObjectPropertyConditionSet generateConditions(
+    VM& vm, JSGlobalObject* globalObject, Structure* structure, JSObject* prototype, const Functor& functor,
+    Concurrency concurrency = MainThread)
+{
+    Vector conditions;
+    
+    for (;;) {
+        if (verbose)
+            dataLog("Considering structure: ", pointerDump(structure), "\n");
+        
+        if (structure->isProxy()) {
+            if (verbose)
+                dataLog("It's a proxy, so invalid.\n");
+            return ObjectPropertyConditionSet::invalid();
+        }
+        
+        JSValue value = structure->prototypeForLookup(globalObject);
+        
+        if (value.isNull()) {
+            if (!prototype) {
+                if (verbose)
+                    dataLog("Reached end of prototype chain as expected, done.\n");
+                break;
+            }
+            if (verbose)
+                dataLog("Unexpectedly reached end of prototype chain, so invalid.\n");
+            return ObjectPropertyConditionSet::invalid();
+        }
+        
+        JSObject* object = jsCast(value);
+        structure = object->structure(vm);
+        
+        if (structure->isDictionary()) {
+            if (concurrency == MainThread) {
+                if (structure->hasBeenFlattenedBefore()) {
+                    if (verbose)
+                        dataLog("Dictionary has been flattened before, so invalid.\n");
+                    return ObjectPropertyConditionSet::invalid();
+                }
+
+                if (verbose)
+                    dataLog("Flattening ", pointerDump(structure));
+                structure->flattenDictionaryStructure(vm, object);
+            } else {
+                if (verbose)
+                    dataLog("Cannot flatten dictionary when not on main thread, so invalid.\n");
+                return ObjectPropertyConditionSet::invalid();
+            }
+        }
+
+        if (!functor(conditions, object)) {
+            if (verbose)
+                dataLog("Functor failed, invalid.\n");
+            return ObjectPropertyConditionSet::invalid();
+        }
+        
+        if (object == prototype) {
+            if (verbose)
+                dataLog("Reached desired prototype, done.\n");
+            break;
+        }
+    }
+
+    if (verbose)
+        dataLog("Returning conditions: ", listDump(conditions), "\n");
+    return ObjectPropertyConditionSet::create(conditions);
+}
+
+} // anonymous namespace
+
+ObjectPropertyConditionSet generateConditionsForPropertyMiss(
+    VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, exec->lexicalGlobalObject(), headStructure, nullptr,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            ObjectPropertyCondition result =
+                generateCondition(vm, owner, object, uid, PropertyCondition::Absence);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        });
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertySetterMiss(
+    VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, exec->lexicalGlobalObject(), headStructure, nullptr,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            ObjectPropertyCondition result =
+                generateCondition(vm, owner, object, uid, PropertyCondition::AbsenceOfSetter);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit(
+    VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype,
+    UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, exec->lexicalGlobalObject(), headStructure, prototype,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            PropertyCondition::Kind kind =
+                object == prototype ? PropertyCondition::Presence : PropertyCondition::Absence;
+            ObjectPropertyCondition result =
+                generateCondition(vm, owner, object, uid, kind);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom(
+    VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype,
+    UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, exec->lexicalGlobalObject(), headStructure, prototype,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            if (object == prototype)
+                return true;
+            ObjectPropertyCondition result =
+                generateCondition(vm, owner, object, uid, PropertyCondition::Absence);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypeEquivalenceConcurrently(
+    VM& vm, JSGlobalObject* globalObject, Structure* headStructure, JSObject* prototype, UniquedStringImpl* uid)
+{
+    return generateConditions(vm, globalObject, headStructure, prototype,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            PropertyCondition::Kind kind =
+                object == prototype ? PropertyCondition::Equivalence : PropertyCondition::Absence;
+            ObjectPropertyCondition result = generateCondition(vm, nullptr, object, uid, kind);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        }, Concurrent);
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertyMissConcurrently(
+    VM& vm, JSGlobalObject* globalObject, Structure* headStructure, UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, globalObject, headStructure, nullptr,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            ObjectPropertyCondition result = generateCondition(vm, nullptr, object, uid, PropertyCondition::Absence);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        }, Concurrent);
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently(
+    VM& vm, JSGlobalObject* globalObject, Structure* headStructure, UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, globalObject, headStructure, nullptr,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            ObjectPropertyCondition result =
+                generateCondition(vm, nullptr, object, uid, PropertyCondition::AbsenceOfSetter);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        }, Concurrent);
+}
+
+ObjectPropertyCondition generateConditionForSelfEquivalence(
+    VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid)
+{
+    return generateCondition(vm, owner, object, uid, PropertyCondition::Equivalence);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h
new file mode 100644
index 000000000..2b15965f6
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "ObjectPropertyCondition.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+// An object property condition set is used to represent the set of additional conditions
+// that need to be met for some heap access to be valid. The set can have the following
+// interesting states:
+//
+// Empty: There are no special conditions that need to be met.
+// Invalid: The heap access is never valid.
+// Non-empty: The heap access is valid if all the ObjectPropertyConditions in the set are valid.
+
+class ObjectPropertyConditionSet {
+public:
+    ObjectPropertyConditionSet() { }
+    
+    static ObjectPropertyConditionSet invalid()
+    {
+        ObjectPropertyConditionSet result;
+        result.m_data = adoptRef(new Data());
+        return result;
+    }
+    
+    static ObjectPropertyConditionSet create(const Vector& vector)
+    {
+        if (vector.isEmpty())
+            return ObjectPropertyConditionSet();
+        
+        ObjectPropertyConditionSet result;
+        result.m_data = adoptRef(new Data());
+        result.m_data->vector = vector;
+        return result;
+    }
+    
+    bool isValid() const
+    {
+        return !m_data || !m_data->vector.isEmpty();
+    }
+
+    bool isValidAndWatchable() const;
+    
+    bool isEmpty() const
+    {
+        return !m_data;
+    }
+    
+    typedef const ObjectPropertyCondition* iterator;
+    
+    iterator begin() const
+    {
+        if (!m_data)
+            return nullptr;
+        return m_data->vector.begin();
+    }
+    iterator end() const
+    {
+        if (!m_data)
+            return nullptr;
+        return m_data->vector.end();
+    }
+    
+    ObjectPropertyCondition forObject(JSObject*) const;
+    ObjectPropertyCondition forConditionKind(PropertyCondition::Kind) const;
+
+    unsigned numberOfConditionsWithKind(PropertyCondition::Kind) const;
+
+    bool hasOneSlotBaseCondition() const;
+    
+    // If this is a condition set for a prototype hit, then this is guaranteed to return the
+    // condition on the prototype itself. This allows you to get the object, offset, and
+    // attributes for the prototype. This will RELEASE_ASSERT that there is exactly one Presence
+    // in the set, and it will return that presence.
+    ObjectPropertyCondition slotBaseCondition() const;
+    
+    // Attempt to create a new condition set by merging this one with the other one. This will
+    // fail if any of the conditions are incompatible with each other. When if fails, it returns
+    // invalid().
+    ObjectPropertyConditionSet mergedWith(const ObjectPropertyConditionSet& other) const;
+    
+    bool structuresEnsureValidity() const;
+    bool structuresEnsureValidityAssumingImpurePropertyWatchpoint() const;
+    
+    bool needImpurePropertyWatchpoint() const;
+    bool areStillLive() const;
+    
+    void dumpInContext(PrintStream&, DumpContext*) const;
+    void dump(PrintStream&) const;
+    
+    // Helpers for using this in a union.
+    void* releaseRawPointer()
+    {
+        return static_cast(m_data.leakRef());
+    }
+    static ObjectPropertyConditionSet adoptRawPointer(void* rawPointer)
+    {
+        ObjectPropertyConditionSet result;
+        result.m_data = adoptRef(static_cast(rawPointer));
+        return result;
+    }
+    static ObjectPropertyConditionSet fromRawPointer(void* rawPointer)
+    {
+        ObjectPropertyConditionSet result;
+        result.m_data = static_cast(rawPointer);
+        return result;
+    }
+
+    // FIXME: Everything below here should be private, but cannot be because of a bug in VS.
+    
+    // Internally, this represents Invalid using a pointer to a Data that has an empty vector.
+    
+    // FIXME: This could be made more compact by having it internally use a vector that just has
+    // the non-uid portion of ObjectPropertyCondition, and then requiring that the callers of all
+    // of the APIs supply the uid.
+    
+    class Data : public ThreadSafeRefCounted {
+        WTF_MAKE_NONCOPYABLE(Data);
+        WTF_MAKE_FAST_ALLOCATED;
+        
+    public:
+        Data() { }
+        
+        Vector vector;
+    };
+    
+private:
+    RefPtr m_data;
+};
+
+ObjectPropertyCondition generateConditionForSelfEquivalence(
+    VM&, JSCell* owner, JSObject* object, UniquedStringImpl* uid);
+
+ObjectPropertyConditionSet generateConditionsForPropertyMiss(
+    VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPropertySetterMiss(
+    VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit(
+    VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype,
+    UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom(
+    VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype,
+    UniquedStringImpl* uid);
+
+ObjectPropertyConditionSet generateConditionsForPrototypeEquivalenceConcurrently(
+    VM&, JSGlobalObject*, Structure* headStructure, JSObject* prototype,
+    UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPropertyMissConcurrently(
+    VM&, JSGlobalObject*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently(
+    VM&, JSGlobalObject*, Structure* headStructure, UniquedStringImpl* uid);
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/Opcode.cpp b/Source/JavaScriptCore/bytecode/Opcode.cpp
index 26f53511a..0d16dfc2f 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.cpp
+++ b/Source/JavaScriptCore/bytecode/Opcode.cpp
@@ -11,7 +11,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -30,6 +30,8 @@
 #include "config.h"
 #include "Opcode.h"
 
+#include 
+
 #if ENABLE(OPCODE_STATS)
 #include 
 #include 
@@ -185,3 +187,14 @@ void OpcodeStats::resetLastInstruction()
 #endif
 
 } // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, OpcodeID opcode)
+{
+    out.print(opcodeNames[opcode]);
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h
index e8636e785..41c8509a2 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.h
+++ b/Source/JavaScriptCore/bytecode/Opcode.h
@@ -11,7 +11,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -27,9 +27,9 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef Opcode_h
-#define Opcode_h
+#pragma once
 
+#include "Bytecodes.h"
 #include "LLIntOpcode.h"
 
 #include 
@@ -40,158 +40,8 @@
 namespace JSC {
 
 #define FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, extension__) \
-    macro(op_enter, 1) \
-    macro(op_create_activation, 2) \
-    macro(op_touch_entry, 1) \
-    macro(op_init_lazy_reg, 2) \
-    macro(op_create_arguments, 2) \
-    macro(op_create_this, 4) \
-    macro(op_get_callee, 3) \
-    macro(op_to_this, 3) \
-    \
-    macro(op_new_object, 4) \
-    macro(op_new_array, 5) \
-    macro(op_new_array_with_size, 4) \
-    macro(op_new_array_buffer, 5) \
-    macro(op_new_regexp, 3) \
-    macro(op_mov, 3) \
-    macro(op_captured_mov, 4) \
-    \
-    macro(op_not, 3) \
-    macro(op_eq, 4) \
-    macro(op_eq_null, 3) \
-    macro(op_neq, 4) \
-    macro(op_neq_null, 3) \
-    macro(op_stricteq, 4) \
-    macro(op_nstricteq, 4) \
-    macro(op_less, 4) \
-    macro(op_lesseq, 4) \
-    macro(op_greater, 4) \
-    macro(op_greatereq, 4) \
-    \
-    macro(op_inc, 2) \
-    macro(op_dec, 2) \
-    macro(op_to_number, 3) \
-    macro(op_negate, 3) \
-    macro(op_add, 5) \
-    macro(op_mul, 5) \
-    macro(op_div, 5) \
-    macro(op_mod, 4) \
-    macro(op_sub, 5) \
-    \
-    macro(op_lshift, 4) \
-    macro(op_rshift, 4) \
-    macro(op_urshift, 4) \
-    macro(op_unsigned, 3) \
-    macro(op_bitand, 5) \
-    macro(op_bitxor, 5) \
-    macro(op_bitor, 5) \
-    \
-    macro(op_check_has_instance, 5) \
-    macro(op_instanceof, 4) \
-    macro(op_typeof, 3) \
-    macro(op_is_undefined, 3) \
-    macro(op_is_boolean, 3) \
-    macro(op_is_number, 3) \
-    macro(op_is_string, 3) \
-    macro(op_is_object, 3) \
-    macro(op_is_function, 3) \
-    macro(op_in, 4) \
-    \
-    macro(op_init_global_const_nop, 5) \
-    macro(op_init_global_const, 5) \
-    macro(op_get_by_id, 9) /* has value profiling */ \
-    macro(op_get_by_id_out_of_line, 9) /* has value profiling */ \
-    macro(op_get_by_id_self, 9) /* has value profiling */ \
-    macro(op_get_by_id_proto, 9) /* has value profiling */ \
-    macro(op_get_by_id_chain, 9) /* has value profiling */ \
-    macro(op_get_by_id_getter_self, 9) /* has value profiling */ \
-    macro(op_get_by_id_getter_proto, 9) /* has value profiling */ \
-    macro(op_get_by_id_getter_chain, 9) /* has value profiling */ \
-    macro(op_get_by_id_custom_self, 9) /* has value profiling */ \
-    macro(op_get_by_id_custom_proto, 9) /* has value profiling */ \
-    macro(op_get_by_id_custom_chain, 9) /* has value profiling */ \
-    macro(op_get_by_id_generic, 9) /* has value profiling */ \
-    macro(op_get_array_length, 9) /* has value profiling */ \
-    macro(op_get_string_length, 9) /* has value profiling */ \
-    macro(op_get_arguments_length, 4) \
-    macro(op_put_by_id, 9) \
-    macro(op_put_by_id_out_of_line, 9) \
-    macro(op_put_by_id_transition, 9) \
-    macro(op_put_by_id_transition_direct, 9) \
-    macro(op_put_by_id_transition_direct_out_of_line, 9) \
-    macro(op_put_by_id_transition_normal, 9) \
-    macro(op_put_by_id_transition_normal_out_of_line, 9) \
-    macro(op_put_by_id_replace, 9) \
-    macro(op_put_by_id_generic, 9) \
-    macro(op_del_by_id, 4) \
-    macro(op_get_by_val, 6) /* has value profiling */ \
-    macro(op_get_argument_by_val, 6) /* must be the same size as op_get_by_val */ \
-    macro(op_get_by_pname, 7) \
-    macro(op_put_by_val, 5) \
-    macro(op_put_by_val_direct, 5) \
-    macro(op_del_by_val, 4) \
-    macro(op_put_by_index, 4) \
-    macro(op_put_getter_setter, 5) \
-    \
-    macro(op_jmp, 2) \
-    macro(op_jtrue, 3) \
-    macro(op_jfalse, 3) \
-    macro(op_jeq_null, 3) \
-    macro(op_jneq_null, 3) \
-    macro(op_jneq_ptr, 4) \
-    macro(op_jless, 4) \
-    macro(op_jlesseq, 4) \
-    macro(op_jgreater, 4) \
-    macro(op_jgreatereq, 4) \
-    macro(op_jnless, 4) \
-    macro(op_jnlesseq, 4) \
-    macro(op_jngreater, 4) \
-    macro(op_jngreatereq, 4) \
-    \
-    macro(op_loop_hint, 1) \
-    \
-    macro(op_switch_imm, 4) \
-    macro(op_switch_char, 4) \
-    macro(op_switch_string, 4) \
-    \
-    macro(op_new_func, 4) \
-    macro(op_new_captured_func, 4) \
-    macro(op_new_func_exp, 3) \
-    macro(op_call, 8) /* has value profiling */ \
-    macro(op_call_eval, 8) /* has value profiling */ \
-    macro(op_call_varargs, 8) /* has value profiling */ \
-    macro(op_tear_off_activation, 2) \
-    macro(op_tear_off_arguments, 3) \
-    macro(op_ret, 2) \
-    macro(op_ret_object_or_this, 3) \
-    \
-    macro(op_construct, 8) \
-    macro(op_strcat, 4) \
-    macro(op_to_primitive, 3) \
-    \
-    macro(op_get_pnames, 6) \
-    macro(op_next_pname, 7) \
-    \
-    macro(op_resolve_scope, 6) \
-    macro(op_get_from_scope, 8) /* has value profiling */ \
-    macro(op_put_to_scope, 7) \
-    \
-    macro(op_push_with_scope, 2) \
-    macro(op_pop_scope, 1) \
-    macro(op_push_name_scope, 4) \
-    \
-    macro(op_catch, 2) \
-    macro(op_throw, 2) \
-    macro(op_throw_static_error, 3) \
-    \
-    macro(op_debug, 3) \
-    macro(op_profile_will_call, 2) \
-    macro(op_profile_did_call, 2) \
-    \
-    extension__ \
-    \
-    macro(op_end, 2) // end must be the last opcode in the list
+    FOR_EACH_BYTECODE_ID(macro) \
+    extension__
 
 #define FOR_EACH_CORE_OPCODE_ID(macro) \
     FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, /* No extension */ )
@@ -204,11 +54,15 @@ namespace JSC {
 
 
 #define OPCODE_ID_ENUM(opcode, length) opcode,
-    typedef enum { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) } OpcodeID;
+    enum OpcodeID : unsigned { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) };
 #undef OPCODE_ID_ENUM
 
 const int maxOpcodeLength = 9;
-const int numOpcodeIDs = op_end + 1;
+#if !ENABLE(JIT)
+const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_CLOOP_BYTECODE_HELPER_IDS + NUMBER_OF_BYTECODE_HELPER_IDS;
+#else
+const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_BYTECODE_HELPER_IDS;
+#endif
 
 #define OPCODE_ID_LENGTHS(id, length) const int id##_length = length;
     FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS);
@@ -220,10 +74,19 @@ const int numOpcodeIDs = op_end + 1;
     const int opcodeLengths[numOpcodeIDs] = { FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTH_MAP) };
 #undef OPCODE_ID_LENGTH_MAP
 
-#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= op_end, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
+#if COMPILER(GCC)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wtype-limits"
+#endif
+
+#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= numOpcodeIDs, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
     FOR_EACH_OPCODE_ID(VERIFY_OPCODE_ID);
 #undef VERIFY_OPCODE_ID
 
+#if COMPILER(GCC)
+#pragma GCC diagnostic pop
+#endif
+
 #if ENABLE(COMPUTED_GOTO_OPCODES)
 typedef void* Opcode;
 #else
@@ -271,6 +134,70 @@ inline size_t opcodeLength(OpcodeID opcode)
     return 0;
 }
 
+inline bool isBranch(OpcodeID opcodeID)
+{
+    switch (opcodeID) {
+    case op_jmp:
+    case op_jtrue:
+    case op_jfalse:
+    case op_jeq_null:
+    case op_jneq_null:
+    case op_jneq_ptr:
+    case op_jless:
+    case op_jlesseq:
+    case op_jgreater:
+    case op_jgreatereq:
+    case op_jnless:
+    case op_jnlesseq:
+    case op_jngreater:
+    case op_jngreatereq:
+    case op_switch_imm:
+    case op_switch_char:
+    case op_switch_string:
+        return true;
+    default:
+        return false;
+    }
+}
+
+inline bool isUnconditionalBranch(OpcodeID opcodeID)
+{
+    switch (opcodeID) {
+    case op_jmp:
+        return true;
+    default:
+        return false;
+    }
+}
+
+inline bool isTerminal(OpcodeID opcodeID)
+{
+    switch (opcodeID) {
+    case op_ret:
+    case op_end:
+        return true;
+    default:
+        return false;
+    }
+}
+
+inline bool isThrow(OpcodeID opcodeID)
+{
+    switch (opcodeID) {
+    case op_throw:
+    case op_throw_static_error:
+        return true;
+    default:
+        return false;
+    }
+}
+
 } // namespace JSC
 
-#endif // Opcode_h
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::OpcodeID);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/Operands.h b/Source/JavaScriptCore/bytecode/Operands.h
index f21e05f5f..102879814 100644
--- a/Source/JavaScriptCore/bytecode/Operands.h
+++ b/Source/JavaScriptCore/bytecode/Operands.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013, 2015, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef Operands_h
-#define Operands_h
+#pragma once
 
 #include "CallFrame.h"
 #include "JSObject.h"
@@ -37,32 +36,37 @@ namespace JSC {
 
 template struct OperandValueTraits;
 
-template
-struct OperandValueTraits {
-    static T defaultValue() { return T(); }
-    static bool isEmptyForDump(const T& value) { return !value; }
-};
-
 enum OperandKind { ArgumentOperand, LocalOperand };
 
 enum OperandsLikeTag { OperandsLike };
 
-template>
+template
 class Operands {
 public:
     Operands() { }
     
     explicit Operands(size_t numArguments, size_t numLocals)
     {
-        m_arguments.fill(Traits::defaultValue(), numArguments);
-        m_locals.fill(Traits::defaultValue(), numLocals);
+        if (WTF::VectorTraits::needsInitialization) {
+            m_arguments.resize(numArguments);
+            m_locals.resize(numLocals);
+        } else {
+            m_arguments.fill(T(), numArguments);
+            m_locals.fill(T(), numLocals);
+        }
+    }
+
+    explicit Operands(size_t numArguments, size_t numLocals, const T& initialValue)
+    {
+        m_arguments.fill(initialValue, numArguments);
+        m_locals.fill(initialValue, numLocals);
     }
     
-    template
-    explicit Operands(OperandsLikeTag, const Operands& other)
+    template
+    explicit Operands(OperandsLikeTag, const Operands& other)
     {
-        m_arguments.fill(Traits::defaultValue(), other.numberOfArguments());
-        m_locals.fill(Traits::defaultValue(), other.numberOfLocals());
+        m_arguments.fill(T(), other.numberOfArguments());
+        m_locals.fill(T(), other.numberOfLocals());
     }
     
     size_t numberOfArguments() const { return m_arguments.size(); }
@@ -97,6 +101,19 @@ public:
     }
     
     void ensureLocals(size_t size)
+    {
+        if (size <= m_locals.size())
+            return;
+
+        size_t oldSize = m_locals.size();
+        m_locals.resize(size);
+        if (!WTF::VectorTraits::needsInitialization) {
+            for (size_t i = oldSize; i < m_locals.size(); ++i)
+                m_locals[i] = T();
+        }
+    }
+
+    void ensureLocals(size_t size, const T& ensuredValue)
     {
         if (size <= m_locals.size())
             return;
@@ -104,7 +121,7 @@ public:
         size_t oldSize = m_locals.size();
         m_locals.resize(size);
         for (size_t i = oldSize; i < m_locals.size(); ++i)
-            m_locals[i] = Traits::defaultValue();
+            m_locals[i] = ensuredValue;
     }
     
     void setLocal(size_t idx, const T& value)
@@ -117,19 +134,19 @@ public:
     T getLocal(size_t idx)
     {
         if (idx >= m_locals.size())
-            return Traits::defaultValue();
+            return T();
         return m_locals[idx];
     }
     
     void setArgumentFirstTime(size_t idx, const T& value)
     {
-        ASSERT(m_arguments[idx] == Traits::defaultValue());
+        ASSERT(m_arguments[idx] == T());
         argument(idx) = value;
     }
     
     void setLocalFirstTime(size_t idx, const T& value)
     {
-        ASSERT(idx >= m_locals.size() || m_locals[idx] == Traits::defaultValue());
+        ASSERT(idx >= m_locals.size() || m_locals[idx] == T());
         setLocal(idx, value);
     }
     
@@ -149,6 +166,7 @@ public:
     }
 
     const T& operand(int operand) const { return const_cast(const_cast(this)->operand(operand)); }
+    const T& operand(VirtualRegister operand) const { return const_cast(const_cast(this)->operand(operand)); }
     
     bool hasOperand(int operand) const
     {
@@ -209,6 +227,10 @@ public:
             return virtualRegisterForArgument(index).offset();
         return virtualRegisterForLocal(index - numberOfArguments()).offset();
     }
+    VirtualRegister virtualRegisterForIndex(size_t index) const
+    {
+        return VirtualRegister(operandForIndex(index));
+    }
     size_t indexForOperand(int operand) const
     {
         if (operandIsArgument(operand))
@@ -240,7 +262,7 @@ public:
     
     void clear()
     {
-        fill(Traits::defaultValue());
+        fill(T());
     }
     
     bool operator==(const Operands& other) const
@@ -252,11 +274,7 @@ public:
     }
     
     void dumpInContext(PrintStream& out, DumpContext* context) const;
-    
-    void dump(PrintStream& out) const
-    {
-        dumpInContext(out, 0);
-    }
+    void dump(PrintStream& out) const;
     
 private:
     Vector m_arguments;
@@ -264,6 +282,3 @@ private:
 };
 
 } // namespace JSC
-
-#endif // Operands_h
-
diff --git a/Source/JavaScriptCore/bytecode/OperandsInlines.h b/Source/JavaScriptCore/bytecode/OperandsInlines.h
index 74ad60bc1..65fedda07 100644
--- a/Source/JavaScriptCore/bytecode/OperandsInlines.h
+++ b/Source/JavaScriptCore/bytecode/OperandsInlines.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,31 +23,43 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef OperandsInlines_h
-#define OperandsInlines_h
+#pragma once
 
 #include "Operands.h"
 #include 
 
 namespace JSC {
 
-template
-void Operands::dumpInContext(PrintStream& out, DumpContext* context) const
+template
+void Operands::dumpInContext(PrintStream& out, DumpContext* context) const
 {
     CommaPrinter comma(" ");
     for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) {
-        if (Traits::isEmptyForDump(argument(argumentIndex)))
+        if (!argument(argumentIndex))
             continue;
         out.print(comma, "arg", argumentIndex, ":", inContext(argument(argumentIndex), context));
     }
     for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) {
-        if (Traits::isEmptyForDump(local(localIndex)))
+        if (!local(localIndex))
             continue;
         out.print(comma, "loc", localIndex, ":", inContext(local(localIndex), context));
     }
 }
 
-} // namespace JSC
-
-#endif // OperandsInlines_h
+template
+void Operands::dump(PrintStream& out) const
+{
+    CommaPrinter comma(" ");
+    for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) {
+        if (!argument(argumentIndex))
+            continue;
+        out.print(comma, "arg", argumentIndex, ":", argument(argumentIndex));
+    }
+    for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) {
+        if (!local(localIndex))
+            continue;
+        out.print(comma, "loc", localIndex, ":", local(localIndex));
+    }
+}
 
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp b/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
new file mode 100644
index 000000000..f062bc5c0
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
@@ -0,0 +1,677 @@
+/*
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "PolymorphicAccess.h"
+
+#if ENABLE(JIT)
+
+#include "BinarySwitch.h"
+#include "CCallHelpers.h"
+#include "CodeBlock.h"
+#include "Heap.h"
+#include "JITOperations.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "StructureStubClearingWatchpoint.h"
+#include "StructureStubInfo.h"
+#include 
+#include 
+
+namespace JSC {
+
+static const bool verbose = false;
+
+void AccessGenerationResult::dump(PrintStream& out) const
+{
+    out.print(m_kind);
+    if (m_code)
+        out.print(":", m_code);
+}
+
+Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
+{
+    return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
+        watchpoints, jit->codeBlock(), stubInfo, condition);
+}
+
+void AccessGenerationState::restoreScratch()
+{
+    allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
+}
+
+void AccessGenerationState::succeed()
+{
+    restoreScratch();
+    success.append(jit->jump());
+}
+
+const RegisterSet& AccessGenerationState::liveRegistersForCall()
+{
+    if (!m_calculatedRegistersForCallAndExceptionHandling)
+        calculateLiveRegistersForCallAndExceptionHandling();
+    return m_liveRegistersForCall;
+}
+
+const RegisterSet& AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite()
+{
+    if (!m_calculatedRegistersForCallAndExceptionHandling)
+        calculateLiveRegistersForCallAndExceptionHandling();
+    return m_liveRegistersToPreserveAtExceptionHandlingCallSite;
+}
+
+static RegisterSet calleeSaveRegisters()
+{
+    RegisterSet result = RegisterSet::registersToNotSaveForJSCall();
+    result.filter(RegisterSet::registersToNotSaveForCCall());
+    return result;
+}
+
+const RegisterSet& AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
+{
+    if (!m_calculatedRegistersForCallAndExceptionHandling) {
+        m_calculatedRegistersForCallAndExceptionHandling = true;
+
+        m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
+        m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
+        if (m_needsToRestoreRegistersIfException)
+            RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
+
+        m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
+        m_liveRegistersForCall.exclude(calleeSaveRegisters());
+    }
+    return m_liveRegistersForCall;
+}
+
+auto AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra) -> SpillState
+{
+    RegisterSet liveRegisters = liveRegistersForCall();
+    liveRegisters.merge(extra);
+    
+    unsigned extraStackPadding = 0;
+    unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding);
+    return SpillState {
+        WTFMove(liveRegisters),
+        numberOfStackBytesUsedForRegisterPreservation
+    };
+}
+
+void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException(const SpillState& spillState)
+{
+    // Even if we're a getter, we don't want to ignore the result value like we normally do
+    // because the getter threw, and therefore, didn't return a value that means anything.
+    // Instead, we want to restore that register to what it was upon entering the getter
+    // inline cache. The subtlety here is if the base and the result are the same register,
+    // and the getter threw, we want OSR exit to see the original base value, not the result
+    // of the getter call.
+    RegisterSet dontRestore = spillState.spilledRegisters;
+    // As an optimization here, we only need to restore what is live for exception handling.
+    // We can construct the dontRestore set to accomplish this goal by having it contain only
+    // what is live for call but not live for exception handling. By ignoring things that are
+    // only live at the call but not the exception handler, we will only restore things live
+    // at the exception handler.
+    dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
+    restoreLiveRegistersFromStackForCall(spillState, dontRestore);
+}
+
+void AccessGenerationState::restoreLiveRegistersFromStackForCall(const SpillState& spillState, const RegisterSet& dontRestore)
+{
+    unsigned extraStackPadding = 0;
+    ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, spillState.spilledRegisters, dontRestore, spillState.numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
+}
+
+CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
+{
+    if (!m_calculatedRegistersForCallAndExceptionHandling)
+        calculateLiveRegistersForCallAndExceptionHandling();
+
+    if (!m_calculatedCallSiteIndex) {
+        m_calculatedCallSiteIndex = true;
+
+        if (m_needsToRestoreRegistersIfException)
+            m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
+        else
+            m_callSiteIndex = originalCallSiteIndex();
+    }
+
+    return m_callSiteIndex;
+}
+
+const HandlerInfo& AccessGenerationState::originalExceptionHandler()
+{
+    if (!m_calculatedRegistersForCallAndExceptionHandling)
+        calculateLiveRegistersForCallAndExceptionHandling();
+
+    RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
+    HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
+    RELEASE_ASSERT(exceptionHandler);
+    return *exceptionHandler;
+}
+
+CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
+
+void AccessGenerationState::emitExplicitExceptionHandler()
+{
+    restoreScratch();
+    jit->copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+    if (needsToRestoreRegistersIfException()) {
+        // To the JIT that produces the original exception handling
+        // call site, they will expect the OSR exit to be arrived
+        // at from genericUnwind. Therefore we must model what genericUnwind
+        // does here. I.e, set callFrameForCatch and copy callee saves.
+
+        jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
+        CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
+
+        // We don't need to insert a new exception handler in the table
+        // because we're doing a manual exception check here. i.e, we'll
+        // never arrive here from genericUnwind().
+        HandlerInfo originalHandler = originalExceptionHandler();
+        jit->addLinkTask(
+            [=] (LinkBuffer& linkBuffer) {
+                linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
+            });
+    } else {
+        jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
+        CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
+        jit->addLinkTask(
+            [=] (LinkBuffer& linkBuffer) {
+                linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
+            });
+        jit->jumpToExceptionHandler();
+    }
+}
+
+
+PolymorphicAccess::PolymorphicAccess() { }
+PolymorphicAccess::~PolymorphicAccess() { }
+
+AccessGenerationResult PolymorphicAccess::addCases(
+    VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
+    Vector, 2> originalCasesToAdd)
+{
+    SuperSamplerScope superSamplerScope(false);
+    
+    // This method will add the originalCasesToAdd to the list one at a time while preserving the
+    // invariants:
+    // - If a newly added case canReplace() any existing case, then the existing case is removed before
+    //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
+    //   can be removed via the canReplace() rule.
+    // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
+    //   cascade through the cases in reverse order, you will get the most recent cases first.
+    // - If this method fails (returns null, doesn't add the cases), then both the previous case list
+    //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
+    //   add more things after failure.
+    
+    // First ensure that the originalCasesToAdd doesn't contain duplicates.
+    Vector> casesToAdd;
+    for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
+        std::unique_ptr myCase = WTFMove(originalCasesToAdd[i]);
+
+        // Add it only if it is not replaced by the subsequent cases in the list.
+        bool found = false;
+        for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
+            if (originalCasesToAdd[j]->canReplace(*myCase)) {
+                found = true;
+                break;
+            }
+        }
+
+        if (found)
+            continue;
+        
+        casesToAdd.append(WTFMove(myCase));
+    }
+
+    if (verbose)
+        dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
+
+    // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
+    // new stub that will be identical to the old one. Returning null should tell the caller to just
+    // keep doing what they were doing before.
+    if (casesToAdd.isEmpty())
+        return AccessGenerationResult::MadeNoChanges;
+
+    // Now add things to the new list. Note that at this point, we will still have old cases that
+    // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
+    for (auto& caseToAdd : casesToAdd) {
+        commit(vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
+        m_list.append(WTFMove(caseToAdd));
+    }
+    
+    if (verbose)
+        dataLog("After addCases: m_list: ", listDump(m_list), "\n");
+
+    return AccessGenerationResult::Buffered;
+}
+
+AccessGenerationResult PolymorphicAccess::addCase(
+    VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
+    std::unique_ptr newAccess)
+{
+    Vector, 2> newAccesses;
+    newAccesses.append(WTFMove(newAccess));
+    return addCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
+}
+
+bool PolymorphicAccess::visitWeak(VM& vm) const
+{
+    for (unsigned i = 0; i < size(); ++i) {
+        if (!at(i).visitWeak(vm))
+            return false;
+    }
+    if (Vector>* weakReferences = m_weakReferences.get()) {
+        for (WriteBarrier& weakReference : *weakReferences) {
+            if (!Heap::isMarked(weakReference.get()))
+                return false;
+        }
+    }
+    return true;
+}
+
+bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const
+{
+    bool result = true;
+    for (unsigned i = 0; i < size(); ++i)
+        result &= at(i).propagateTransitions(visitor);
+    return result;
+}
+
+void PolymorphicAccess::dump(PrintStream& out) const
+{
+    out.print(RawPointer(this), ":[");
+    CommaPrinter comma;
+    for (auto& entry : m_list)
+        out.print(comma, *entry);
+    out.print("]");
+}
+
+void PolymorphicAccess::commit(
+    VM& vm, std::unique_ptr& watchpoints, CodeBlock* codeBlock,
+    StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
+{
+    // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
+    // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
+    // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
+    // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
+    // Those common kinds of JSC object accesses don't hit this case.
+    
+    for (WatchpointSet* set : accessCase.commit(vm, ident)) {
+        Watchpoint* watchpoint =
+            WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
+                watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
+        
+        set->add(watchpoint);
+    }
+}
+
+AccessGenerationResult PolymorphicAccess::regenerate(
+    VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
+{
+    SuperSamplerScope superSamplerScope(false);
+    
+    if (verbose)
+        dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
+    
+    AccessGenerationState state;
+
+    state.access = this;
+    state.stubInfo = &stubInfo;
+    state.ident = &ident;
+    
+    state.baseGPR = static_cast(stubInfo.patch.baseGPR);
+    state.valueRegs = stubInfo.valueRegs();
+
+    ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+    state.allocator = &allocator;
+    allocator.lock(state.baseGPR);
+    allocator.lock(state.valueRegs);
+#if USE(JSVALUE32_64)
+    allocator.lock(static_cast(stubInfo.patch.baseTagGPR));
+#endif
+
+    state.scratchGPR = allocator.allocateScratchGPR();
+    
+    CCallHelpers jit(&vm, codeBlock);
+    state.jit = &jit;
+
+    state.preservedReusedRegisterState =
+        allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
+
+    // Regenerating is our opportunity to figure out what our list of cases should look like. We
+    // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
+    // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
+    // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
+    // from the code of the current stub (aka previous).
+    ListType cases;
+    unsigned srcIndex = 0;
+    unsigned dstIndex = 0;
+    while (srcIndex < m_list.size()) {
+        std::unique_ptr someCase = WTFMove(m_list[srcIndex++]);
+        
+        // If the case had been generated, then we have to keep the original in m_list in case we
+        // fail to regenerate. That case may have data structures that are used by the code that it
+        // had generated. If the case had not been generated, then we want to remove it from m_list.
+        bool isGenerated = someCase->state() == AccessCase::Generated;
+        
+        [&] () {
+            if (!someCase->couldStillSucceed())
+                return;
+
+            // Figure out if this is replaced by any later case.
+            for (unsigned j = srcIndex; j < m_list.size(); ++j) {
+                if (m_list[j]->canReplace(*someCase))
+                    return;
+            }
+            
+            if (isGenerated)
+                cases.append(someCase->clone());
+            else
+                cases.append(WTFMove(someCase));
+        }();
+        
+        if (isGenerated)
+            m_list[dstIndex++] = WTFMove(someCase);
+    }
+    m_list.resize(dstIndex);
+    
+    if (verbose)
+        dataLog("Optimized cases: ", listDump(cases), "\n");
+    
+    // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
+    // won't change that set anymore.
+    
+    bool allGuardedByStructureCheck = true;
+    bool hasJSGetterSetterCall = false;
+    for (auto& newCase : cases) {
+        commit(vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
+        allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
+        if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
+            hasJSGetterSetterCall = true;
+    }
+
+    if (cases.isEmpty()) {
+        // This is super unlikely, but we make it legal anyway.
+        state.failAndRepatch.append(jit.jump());
+    } else if (!allGuardedByStructureCheck || cases.size() == 1) {
+        // If there are any proxies in the list, we cannot just use a binary switch over the structure.
+        // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
+        // one case.
+        CCallHelpers::JumpList fallThrough;
+
+        // Cascade through the list, preferring newer entries.
+        for (unsigned i = cases.size(); i--;) {
+            fallThrough.link(&jit);
+            fallThrough.clear();
+            cases[i]->generateWithGuard(state, fallThrough);
+        }
+        state.failAndRepatch.append(fallThrough);
+    } else {
+        jit.load32(
+            CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
+            state.scratchGPR);
+        
+        Vector caseValues(cases.size());
+        for (unsigned i = 0; i < cases.size(); ++i)
+            caseValues[i] = bitwise_cast(cases[i]->structure()->id());
+        
+        BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
+        while (binarySwitch.advance(jit))
+            cases[binarySwitch.caseIndex()]->generate(state);
+        state.failAndRepatch.append(binarySwitch.fallThrough());
+    }
+
+    if (!state.failAndIgnore.empty()) {
+        state.failAndIgnore.link(&jit);
+        
+        // Make sure that the inline cache optimization code knows that we are taking slow path because
+        // of something that isn't patchable. The slow path will decrement "countdown" and will only
+        // patch things if the countdown reaches zero. We increment the slow path count here to ensure
+        // that the slow path does not try to patch.
+#if CPU(X86) || CPU(X86_64)
+        jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
+        jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
+#else
+        jit.load8(&stubInfo.countdown, state.scratchGPR);
+        jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
+        jit.store8(state.scratchGPR, &stubInfo.countdown);
+#endif
+    }
+
+    CCallHelpers::JumpList failure;
+    if (allocator.didReuseRegisters()) {
+        state.failAndRepatch.link(&jit);
+        state.restoreScratch();
+    } else
+        failure = state.failAndRepatch;
+    failure.append(jit.jump());
+
+    CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
+    CallSiteIndex callSiteIndexForExceptionHandling;
+    if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
+        // Emit the exception handler.
+        // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
+        // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
+        // their own exception handling logic that doesn't go through genericUnwind.
+        MacroAssembler::Label makeshiftCatchHandler = jit.label();
+
+        int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
+        AccessGenerationState::SpillState spillStateForJSGetterSetter = state.spillStateForJSGetterSetter();
+        ASSERT(!spillStateForJSGetterSetter.isEmpty());
+        stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
+        stackPointerOffset -= spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation;
+
+        jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
+        jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+
+        state.restoreLiveRegistersFromStackForCallWithThrownException(spillStateForJSGetterSetter);
+        state.restoreScratch();
+        CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
+
+        HandlerInfo oldHandler = state.originalExceptionHandler();
+        CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
+        jit.addLinkTask(
+            [=] (LinkBuffer& linkBuffer) {
+                linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
+
+                HandlerInfo handlerToRegister = oldHandler;
+                handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
+                handlerToRegister.start = newExceptionHandlingCallSite.bits();
+                handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
+                codeBlock->appendExceptionHandler(handlerToRegister);
+            });
+
+        // We set these to indicate to the stub to remove itself from the CodeBlock's
+        // exception handler table when it is deallocated.
+        codeBlockThatOwnsExceptionHandlers = codeBlock;
+        ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
+        callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
+    }
+
+    LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
+    if (linkBuffer.didFailToAllocate()) {
+        if (verbose)
+            dataLog("Did fail to allocate.\n");
+        return AccessGenerationResult::GaveUp;
+    }
+
+    CodeLocationLabel successLabel = stubInfo.doneLocation();
+        
+    linkBuffer.link(state.success, successLabel);
+
+    linkBuffer.link(failure, stubInfo.slowPathStartLocation());
+    
+    if (verbose)
+        dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
+
+    MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
+        codeBlock, linkBuffer,
+        ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
+
+    bool doesCalls = false;
+    Vector cellsToMark;
+    for (auto& entry : cases)
+        doesCalls |= entry->doesCalls(&cellsToMark);
+    
+    m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
+    m_watchpoints = WTFMove(state.watchpoints);
+    if (!state.weakReferences.isEmpty())
+        m_weakReferences = std::make_unique>>(WTFMove(state.weakReferences));
+    if (verbose)
+        dataLog("Returning: ", code.code(), "\n");
+    
+    m_list = WTFMove(cases);
+    
+    AccessGenerationResult::Kind resultKind;
+    if (m_list.size() >= Options::maxAccessVariantListSize())
+        resultKind = AccessGenerationResult::GeneratedFinalCode;
+    else
+        resultKind = AccessGenerationResult::GeneratedNewCode;
+    
+    return AccessGenerationResult(resultKind, code.code());
+}
+
+void PolymorphicAccess::aboutToDie()
+{
+    if (m_stubRoutine)
+        m_stubRoutine->aboutToDie();
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
+{
+    switch (kind) {
+    case AccessGenerationResult::MadeNoChanges:
+        out.print("MadeNoChanges");
+        return;
+    case AccessGenerationResult::GaveUp:
+        out.print("GaveUp");
+        return;
+    case AccessGenerationResult::Buffered:
+        out.print("Buffered");
+        return;
+    case AccessGenerationResult::GeneratedNewCode:
+        out.print("GeneratedNewCode");
+        return;
+    case AccessGenerationResult::GeneratedFinalCode:
+        out.print("GeneratedFinalCode");
+        return;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, AccessCase::AccessType type)
+{
+    switch (type) {
+    case AccessCase::Load:
+        out.print("Load");
+        return;
+    case AccessCase::Transition:
+        out.print("Transition");
+        return;
+    case AccessCase::Replace:
+        out.print("Replace");
+        return;
+    case AccessCase::Miss:
+        out.print("Miss");
+        return;
+    case AccessCase::GetGetter:
+        out.print("GetGetter");
+        return;
+    case AccessCase::Getter:
+        out.print("Getter");
+        return;
+    case AccessCase::Setter:
+        out.print("Setter");
+        return;
+    case AccessCase::CustomValueGetter:
+        out.print("CustomValueGetter");
+        return;
+    case AccessCase::CustomAccessorGetter:
+        out.print("CustomAccessorGetter");
+        return;
+    case AccessCase::CustomValueSetter:
+        out.print("CustomValueSetter");
+        return;
+    case AccessCase::CustomAccessorSetter:
+        out.print("CustomAccessorSetter");
+        return;
+    case AccessCase::IntrinsicGetter:
+        out.print("IntrinsicGetter");
+        return;
+    case AccessCase::InHit:
+        out.print("InHit");
+        return;
+    case AccessCase::InMiss:
+        out.print("InMiss");
+        return;
+    case AccessCase::ArrayLength:
+        out.print("ArrayLength");
+        return;
+    case AccessCase::StringLength:
+        out.print("StringLength");
+        return;
+    case AccessCase::DirectArgumentsLength:
+        out.print("DirectArgumentsLength");
+        return;
+    case AccessCase::ScopedArgumentsLength:
+        out.print("ScopedArgumentsLength");
+        return;
+    case AccessCase::ModuleNamespaceLoad:
+        out.print("ModuleNamespaceLoad");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, AccessCase::State state)
+{
+    switch (state) {
+    case AccessCase::Primordial:
+        out.print("Primordial");
+        return;
+    case AccessCase::Committed:
+        out.print("Committed");
+        return;
+    case AccessCase::Generated:
+        out.print("Generated");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
+
+
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccess.h b/Source/JavaScriptCore/bytecode/PolymorphicAccess.h
new file mode 100644
index 000000000..d1852c7b5
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PolymorphicAccess.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "AccessCase.h"
+#include "CodeOrigin.h"
+#include "JITStubRoutine.h"
+#include "JSFunctionInlines.h"
+#include "MacroAssembler.h"
+#include "ObjectPropertyConditionSet.h"
+#include "ScratchRegisterAllocator.h"
+#include "Structure.h"
+#include 
+
+namespace JSC {
+namespace DOMJIT {
+class GetterSetter;
+}
+
+class CodeBlock;
+class PolymorphicAccess;
+class StructureStubInfo;
+class WatchpointsOnStructureStubInfo;
+class ScratchRegisterAllocator;
+
+class AccessGenerationResult {
+public:
+    enum Kind {
+        MadeNoChanges,
+        GaveUp,
+        Buffered,
+        GeneratedNewCode,
+        GeneratedFinalCode // Generated so much code that we never want to generate code again.
+    };
+    
+    AccessGenerationResult()
+    {
+    }
+    
+    AccessGenerationResult(Kind kind)
+        : m_kind(kind)
+    {
+        RELEASE_ASSERT(kind != GeneratedNewCode);
+        RELEASE_ASSERT(kind != GeneratedFinalCode);
+    }
+    
+    AccessGenerationResult(Kind kind, MacroAssemblerCodePtr code)
+        : m_kind(kind)
+        , m_code(code)
+    {
+        RELEASE_ASSERT(kind == GeneratedNewCode || kind == GeneratedFinalCode);
+        RELEASE_ASSERT(code);
+    }
+    
+    bool operator==(const AccessGenerationResult& other) const
+    {
+        return m_kind == other.m_kind && m_code == other.m_code;
+    }
+    
+    bool operator!=(const AccessGenerationResult& other) const
+    {
+        return !(*this == other);
+    }
+    
+    explicit operator bool() const
+    {
+        return *this != AccessGenerationResult();
+    }
+    
+    Kind kind() const { return m_kind; }
+    
+    const MacroAssemblerCodePtr& code() const { return m_code; }
+    
+    bool madeNoChanges() const { return m_kind == MadeNoChanges; }
+    bool gaveUp() const { return m_kind == GaveUp; }
+    bool buffered() const { return m_kind == Buffered; }
+    bool generatedNewCode() const { return m_kind == GeneratedNewCode; }
+    bool generatedFinalCode() const { return m_kind == GeneratedFinalCode; }
+    
+    // If we gave up on this attempt to generate code, or if we generated the "final" code, then we
+    // should give up after this.
+    bool shouldGiveUpNow() const { return gaveUp() || generatedFinalCode(); }
+    
+    bool generatedSomeCode() const { return generatedNewCode() || generatedFinalCode(); }
+    
+    void dump(PrintStream&) const;
+    
+private:
+    Kind m_kind;
+    MacroAssemblerCodePtr m_code;
+};
+
+class PolymorphicAccess {
+    WTF_MAKE_NONCOPYABLE(PolymorphicAccess);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    PolymorphicAccess();
+    ~PolymorphicAccess();
+
+    // When this fails (returns GaveUp), this will leave the old stub intact but you should not try
+    // to call this method again for that PolymorphicAccess instance.
+    AccessGenerationResult addCases(
+        VM&, CodeBlock*, StructureStubInfo&, const Identifier&, Vector, 2>);
+
+    AccessGenerationResult addCase(
+        VM&, CodeBlock*, StructureStubInfo&, const Identifier&, std::unique_ptr);
+    
+    AccessGenerationResult regenerate(VM&, CodeBlock*, StructureStubInfo&, const Identifier&);
+    
+    bool isEmpty() const { return m_list.isEmpty(); }
+    unsigned size() const { return m_list.size(); }
+    const AccessCase& at(unsigned i) const { return *m_list[i]; }
+    const AccessCase& operator[](unsigned i) const { return *m_list[i]; }
+
+    // If this returns false then we are requesting a reset of the owning StructureStubInfo.
+    bool visitWeak(VM&) const;
+    
+    // This returns true if it has marked everything it will ever marked. This can be used as an
+    // optimization to then avoid calling this method again during the fixpoint.
+    bool propagateTransitions(SlotVisitor&) const;
+
+    void aboutToDie();
+
+    void dump(PrintStream& out) const;
+    bool containsPC(void* pc) const
+    { 
+        if (!m_stubRoutine)
+            return false;
+
+        uintptr_t pcAsInt = bitwise_cast(pc);
+        return m_stubRoutine->startAddress() <= pcAsInt && pcAsInt <= m_stubRoutine->endAddress();
+    }
+
+private:
+    friend class AccessCase;
+    friend class CodeBlock;
+    friend struct AccessGenerationState;
+    
+    typedef Vector, 2> ListType;
+    
+    void commit(
+        VM&, std::unique_ptr&, CodeBlock*, StructureStubInfo&,
+        const Identifier&, AccessCase&);
+
+    MacroAssemblerCodePtr regenerate(
+        VM&, CodeBlock*, StructureStubInfo&, const Identifier&, ListType& cases);
+
+    ListType m_list;
+    RefPtr m_stubRoutine;
+    std::unique_ptr m_watchpoints;
+    std::unique_ptr>> m_weakReferences;
+};
+
+struct AccessGenerationState {
+    AccessGenerationState()
+        : m_calculatedRegistersForCallAndExceptionHandling(false)
+        , m_needsToRestoreRegistersIfException(false)
+        , m_calculatedCallSiteIndex(false)
+    {
+    }
+    CCallHelpers* jit { nullptr };
+    ScratchRegisterAllocator* allocator;
+    ScratchRegisterAllocator::PreservedState preservedReusedRegisterState;
+    PolymorphicAccess* access { nullptr };
+    StructureStubInfo* stubInfo { nullptr };
+    MacroAssembler::JumpList success;
+    MacroAssembler::JumpList failAndRepatch;
+    MacroAssembler::JumpList failAndIgnore;
+    GPRReg baseGPR { InvalidGPRReg };
+    JSValueRegs valueRegs;
+    GPRReg scratchGPR { InvalidGPRReg };
+    const Identifier* ident;
+    std::unique_ptr watchpoints;
+    Vector> weakReferences;
+
+    Watchpoint* addWatchpoint(const ObjectPropertyCondition& = ObjectPropertyCondition());
+
+    void restoreScratch();
+    void succeed();
+
+    struct SpillState {
+        SpillState() = default;
+        SpillState(RegisterSet&& regs, unsigned usedStackBytes)
+            : spilledRegisters(WTFMove(regs))
+            , numberOfStackBytesUsedForRegisterPreservation(usedStackBytes)
+        {
+        }
+
+        RegisterSet spilledRegisters { };
+        unsigned numberOfStackBytesUsedForRegisterPreservation { std::numeric_limits::max() };
+
+        bool isEmpty() const { return numberOfStackBytesUsedForRegisterPreservation == std::numeric_limits::max(); }
+    };
+
+    const RegisterSet& calculateLiveRegistersForCallAndExceptionHandling();
+
+    SpillState preserveLiveRegistersToStackForCall(const RegisterSet& extra = RegisterSet());
+
+    void restoreLiveRegistersFromStackForCallWithThrownException(const SpillState&);
+    void restoreLiveRegistersFromStackForCall(const SpillState&, const RegisterSet& dontRestore = RegisterSet());
+
+    const RegisterSet& liveRegistersForCall();
+
+    CallSiteIndex callSiteIndexForExceptionHandlingOrOriginal();
+    CallSiteIndex callSiteIndexForExceptionHandling()
+    {
+        RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+        RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
+        RELEASE_ASSERT(m_calculatedCallSiteIndex);
+        return m_callSiteIndex;
+    }
+
+    const HandlerInfo& originalExceptionHandler();
+
+    bool needsToRestoreRegistersIfException() const { return m_needsToRestoreRegistersIfException; }
+    CallSiteIndex originalCallSiteIndex() const;
+    
+    void emitExplicitExceptionHandler();
+
+    void setSpillStateForJSGetterSetter(SpillState& spillState)
+    {
+        if (!m_spillStateForJSGetterSetter.isEmpty()) {
+            ASSERT(m_spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation == spillState.numberOfStackBytesUsedForRegisterPreservation);
+            ASSERT(m_spillStateForJSGetterSetter.spilledRegisters == spillState.spilledRegisters);
+        }
+        m_spillStateForJSGetterSetter = spillState;
+    }
+    SpillState spillStateForJSGetterSetter() const { return m_spillStateForJSGetterSetter; }
+    
+private:
+    const RegisterSet& liveRegistersToPreserveAtExceptionHandlingCallSite();
+    
+    RegisterSet m_liveRegistersToPreserveAtExceptionHandlingCallSite;
+    RegisterSet m_liveRegistersForCall;
+    CallSiteIndex m_callSiteIndex { CallSiteIndex(std::numeric_limits::max()) };
+    SpillState m_spillStateForJSGetterSetter;
+    bool m_calculatedRegistersForCallAndExceptionHandling : 1;
+    bool m_needsToRestoreRegistersIfException : 1;
+    bool m_calculatedCallSiteIndex : 1;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::AccessGenerationResult::Kind);
+void printInternal(PrintStream&, JSC::AccessCase::AccessType);
+void printInternal(PrintStream&, JSC::AccessCase::State);
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h b/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h
deleted file mode 100644
index 61d97354f..000000000
--- a/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef PolymorphicAccessStructureList_h
-#define PolymorphicAccessStructureList_h
-
-#include "JITStubRoutine.h"
-#include "Structure.h"
-#include "StructureChain.h"
-#include 
-
-#define POLYMORPHIC_LIST_CACHE_SIZE 8
-
-namespace JSC {
-
-// *Sigh*, If the JIT is enabled we need to track the stubRountine (of type CodeLocationLabel),
-// If the JIT is not in use we don't actually need the variable (that said, if the JIT is not in use we don't
-// curently actually use PolymorphicAccessStructureLists, which we should).  Anyway, this seems like the best
-// solution for now - will need to something smarter if/when we actually want mixed-mode operation.
-
-#if ENABLE(JIT)
-// Structure used by op_get_by_id_self_list and op_get_by_id_proto_list instruction to hold data off the main opcode stream.
-struct PolymorphicAccessStructureList {
-    WTF_MAKE_FAST_ALLOCATED;
-public:
-    struct PolymorphicStubInfo {
-        bool isChain;
-        bool isDirect;
-        RefPtr stubRoutine;
-        WriteBarrier base;
-        union {
-            WriteBarrierBase proto;
-            WriteBarrierBase chain;
-        } u;
-
-        PolymorphicStubInfo()
-        {
-            u.proto.clear();
-        }
-
-        void set(VM& vm, JSCell* owner, PassRefPtr _stubRoutine, Structure* _base, bool _isDirect)
-        {
-            stubRoutine = _stubRoutine;
-            base.set(vm, owner, _base);
-            u.proto.clear();
-            isChain = false;
-            isDirect = _isDirect;
-        }
-            
-        void set(VM& vm, JSCell* owner, PassRefPtr _stubRoutine, Structure* _base, Structure* _proto, bool _isDirect)
-        {
-            stubRoutine = _stubRoutine;
-            base.set(vm, owner, _base);
-            u.proto.set(vm, owner, _proto);
-            isChain = false;
-            isDirect = _isDirect;
-        }
-            
-        void set(VM& vm, JSCell* owner, PassRefPtr _stubRoutine, Structure* _base, StructureChain* _chain, bool _isDirect)
-        {
-            stubRoutine = _stubRoutine;
-            base.set(vm, owner, _base);
-            u.chain.set(vm, owner, _chain);
-            isChain = true;
-            isDirect = _isDirect;
-        }
-    } list[POLYMORPHIC_LIST_CACHE_SIZE];
-        
-    PolymorphicAccessStructureList()
-    {
-    }
-        
-    PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr stubRoutine, Structure* firstBase, bool isDirect)
-    {
-        list[0].set(vm, owner, stubRoutine, firstBase, isDirect);
-    }
-
-    PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr stubRoutine, Structure* firstBase, Structure* firstProto, bool isDirect)
-    {
-        list[0].set(vm, owner, stubRoutine, firstBase, firstProto, isDirect);
-    }
-
-    PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr stubRoutine, Structure* firstBase, StructureChain* firstChain, bool isDirect)
-    {
-        list[0].set(vm, owner, stubRoutine, firstBase, firstChain, isDirect);
-    }
-
-    bool visitWeak(int count)
-    {
-        for (int i = 0; i < count; ++i) {
-            PolymorphicStubInfo& info = list[i];
-            if (!info.base) {
-                // We're being marked during initialisation of an entry
-                ASSERT(!info.u.proto);
-                continue;
-            }
-                
-            if (!Heap::isMarked(info.base.get()))
-                return false;
-            if (info.u.proto && !info.isChain
-                && !Heap::isMarked(info.u.proto.get()))
-                return false;
-            if (info.u.chain && info.isChain
-                && !Heap::isMarked(info.u.chain.get()))
-                return false;
-        }
-            
-        return true;
-    }
-};
-
-#endif // ENABLE(JIT)
-
-} // namespace JSC
-
-#endif // PolymorphicAccessStructureList_h
-
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
deleted file mode 100644
index 6a6ec8141..000000000
--- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#include "config.h"
-#include "PolymorphicPutByIdList.h"
-
-#if ENABLE(JIT)
-
-#include "StructureStubInfo.h"
-
-namespace JSC {
-
-PutByIdAccess PutByIdAccess::fromStructureStubInfo(
-    StructureStubInfo& stubInfo,
-    MacroAssemblerCodePtr initialSlowPath)
-{
-    PutByIdAccess result;
-    
-    switch (stubInfo.accessType) {
-    case access_put_by_id_replace:
-        result.m_type = Replace;
-        result.m_oldStructure.copyFrom(stubInfo.u.putByIdReplace.baseObjectStructure);
-        result.m_stubRoutine = JITStubRoutine::createSelfManagedRoutine(initialSlowPath);
-        break;
-        
-    case access_put_by_id_transition_direct:
-    case access_put_by_id_transition_normal:
-        result.m_type = Transition;
-        result.m_oldStructure.copyFrom(stubInfo.u.putByIdTransition.previousStructure);
-        result.m_newStructure.copyFrom(stubInfo.u.putByIdTransition.structure);
-        result.m_chain.copyFrom(stubInfo.u.putByIdTransition.chain);
-        result.m_stubRoutine = stubInfo.stubRoutine;
-        break;
-        
-    default:
-        RELEASE_ASSERT_NOT_REACHED();
-    }
-    
-    return result;
-}
-
-bool PutByIdAccess::visitWeak() const
-{
-    switch (m_type) {
-    case Replace:
-        if (!Heap::isMarked(m_oldStructure.get()))
-            return false;
-        break;
-    case Transition:
-        if (!Heap::isMarked(m_oldStructure.get()))
-            return false;
-        if (!Heap::isMarked(m_newStructure.get()))
-            return false;
-        if (!Heap::isMarked(m_chain.get()))
-            return false;
-        break;
-    default:
-        RELEASE_ASSERT_NOT_REACHED();
-        return false;
-    }
-    return true;
-}
-
-PolymorphicPutByIdList::PolymorphicPutByIdList(
-    PutKind putKind,
-    StructureStubInfo& stubInfo,
-    MacroAssemblerCodePtr initialSlowPath)
-    : m_kind(putKind)
-{
-    m_list.append(PutByIdAccess::fromStructureStubInfo(stubInfo, initialSlowPath));
-}
-
-PolymorphicPutByIdList* PolymorphicPutByIdList::from(
-    PutKind putKind,
-    StructureStubInfo& stubInfo,
-    MacroAssemblerCodePtr initialSlowPath)
-{
-    if (stubInfo.accessType == access_put_by_id_list)
-        return stubInfo.u.putByIdList.list;
-    
-    ASSERT(stubInfo.accessType == access_put_by_id_replace
-           || stubInfo.accessType == access_put_by_id_transition_normal
-           || stubInfo.accessType == access_put_by_id_transition_direct);
-    
-    PolymorphicPutByIdList* result =
-        new PolymorphicPutByIdList(putKind, stubInfo, initialSlowPath);
-    
-    stubInfo.initPutByIdList(result);
-    
-    return result;
-}
-
-PolymorphicPutByIdList::~PolymorphicPutByIdList() { }
-
-bool PolymorphicPutByIdList::isFull() const
-{
-    ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE);
-    return size() == POLYMORPHIC_LIST_CACHE_SIZE;
-}
-
-bool PolymorphicPutByIdList::isAlmostFull() const
-{
-    ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE);
-    return size() >= POLYMORPHIC_LIST_CACHE_SIZE - 1;
-}
-
-void PolymorphicPutByIdList::addAccess(const PutByIdAccess& putByIdAccess)
-{
-    ASSERT(!isFull());
-    // Make sure that the resizing optimizes for space, not time.
-    m_list.resize(m_list.size() + 1);
-    m_list.last() = putByIdAccess;
-}
-
-bool PolymorphicPutByIdList::visitWeak() const
-{
-    for (unsigned i = 0; i < size(); ++i) {
-        if (!at(i).visitWeak())
-            return false;
-    }
-    return true;
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
deleted file mode 100644
index d9fe2e7cf..000000000
--- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef PolymorphicPutByIdList_h
-#define PolymorphicPutByIdList_h
-
-#include 
-
-#if ENABLE(JIT)
-
-#include "CodeOrigin.h"
-#include "MacroAssembler.h"
-#include "Opcode.h"
-#include "PutKind.h"
-#include "Structure.h"
-#include 
-
-namespace JSC {
-
-class CodeBlock;
-struct StructureStubInfo;
-
-class PutByIdAccess {
-public:
-    enum AccessType {
-        Invalid,
-        Transition,
-        Replace
-    };
-    
-    PutByIdAccess()
-        : m_type(Invalid)
-    {
-    }
-    
-    static PutByIdAccess transition(
-        VM& vm,
-        JSCell* owner,
-        Structure* oldStructure,
-        Structure* newStructure,
-        StructureChain* chain,
-        PassRefPtr stubRoutine)
-    {
-        PutByIdAccess result;
-        result.m_type = Transition;
-        result.m_oldStructure.set(vm, owner, oldStructure);
-        result.m_newStructure.set(vm, owner, newStructure);
-        result.m_chain.set(vm, owner, chain);
-        result.m_stubRoutine = stubRoutine;
-        return result;
-    }
-    
-    static PutByIdAccess replace(
-        VM& vm,
-        JSCell* owner,
-        Structure* structure,
-        PassRefPtr stubRoutine)
-    {
-        PutByIdAccess result;
-        result.m_type = Replace;
-        result.m_oldStructure.set(vm, owner, structure);
-        result.m_stubRoutine = stubRoutine;
-        return result;
-    }
-    
-    static PutByIdAccess fromStructureStubInfo(
-        StructureStubInfo&,
-        MacroAssemblerCodePtr initialSlowPath);
-    
-    bool isSet() const { return m_type != Invalid; }
-    bool operator!() const { return !isSet(); }
-    
-    AccessType type() const { return m_type; }
-    
-    bool isTransition() const { return m_type == Transition; }
-    bool isReplace() const { return m_type == Replace; }
-    
-    Structure* oldStructure() const
-    {
-        // Using this instead of isSet() to make this assertion robust against the possibility
-        // of additional access types being added.
-        ASSERT(isTransition() || isReplace());
-        
-        return m_oldStructure.get();
-    }
-    
-    Structure* structure() const
-    {
-        ASSERT(isReplace());
-        return m_oldStructure.get();
-    }
-    
-    Structure* newStructure() const
-    {
-        ASSERT(isTransition());
-        return m_newStructure.get();
-    }
-    
-    StructureChain* chain() const
-    {
-        ASSERT(isTransition());
-        return m_chain.get();
-    }
-    
-    PassRefPtr stubRoutine() const
-    {
-        ASSERT(isTransition() || isReplace());
-        return m_stubRoutine;
-    }
-    
-    bool visitWeak() const;
-    
-private:
-    friend class CodeBlock;
-    
-    AccessType m_type;
-    WriteBarrier m_oldStructure;
-    WriteBarrier m_newStructure;
-    WriteBarrier m_chain;
-    RefPtr m_stubRoutine;
-};
-
-class PolymorphicPutByIdList {
-    WTF_MAKE_FAST_ALLOCATED;
-public:
-    // Initialize from a stub info; this will place one element in the list and it will
-    // be created by converting the stub info's put by id access information into our
-    // PutByIdAccess.
-    PolymorphicPutByIdList(
-        PutKind,
-        StructureStubInfo&,
-        MacroAssemblerCodePtr initialSlowPath);
-
-    // Either creates a new polymorphic put list, or returns the one that is already
-    // in place.
-    static PolymorphicPutByIdList* from(
-        PutKind,
-        StructureStubInfo&,
-        MacroAssemblerCodePtr initialSlowPath);
-    
-    ~PolymorphicPutByIdList();
-    
-    MacroAssemblerCodePtr currentSlowPathTarget() const
-    {
-        return m_list.last().stubRoutine()->code().code();
-    }
-    
-    void addAccess(const PutByIdAccess&);
-    
-    bool isEmpty() const { return m_list.isEmpty(); }
-    unsigned size() const { return m_list.size(); }
-    bool isFull() const;
-    bool isAlmostFull() const; // True if adding an element would make isFull() true.
-    const PutByIdAccess& at(unsigned i) const { return m_list[i]; }
-    const PutByIdAccess& operator[](unsigned i) const { return m_list[i]; }
-    
-    PutKind kind() const { return m_kind; }
-    
-    bool visitWeak() const;
-    
-private:
-    friend class CodeBlock;
-    
-    Vector m_list;
-    PutKind m_kind;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // PolymorphicPutByIdList_h
-
diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
index ede8a3643..9c06e7ec2 100644
--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
@@ -26,85 +26,48 @@
 #include "config.h"
 #include "PreciseJumpTargets.h"
 
+#include "InterpreterInlines.h"
+#include "JSCInlines.h"
+#include "PreciseJumpTargetsInlines.h"
+
 namespace JSC {
 
-template 
-static void getJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Interpreter* interpreter, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector& out)
+template 
+static void getJumpTargetsForBytecodeOffset(Block* codeBlock, Interpreter* interpreter, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector& out)
 {
-    OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
-    Instruction* current = instructionsBegin + bytecodeOffset;
-    switch (opcodeID) {
-    case op_jmp:
-        out.append(bytecodeOffset + current[1].u.operand);
-        break;
-    case op_jtrue:
-    case op_jfalse:
-    case op_jeq_null:
-    case op_jneq_null:
-        out.append(bytecodeOffset + current[2].u.operand);
-        break;
-    case op_jneq_ptr:
-    case op_jless:
-    case op_jlesseq:
-    case op_jgreater:
-    case op_jgreatereq:
-    case op_jnless:
-    case op_jnlesseq:
-    case op_jngreater:
-    case op_jngreatereq:
-        out.append(bytecodeOffset + current[3].u.operand);
-        break;
-    case op_switch_imm:
-    case op_switch_char: {
-        SimpleJumpTable& table = codeBlock->switchJumpTable(current[1].u.operand);
-        for (unsigned i = table.branchOffsets.size(); i--;)
-            out.append(bytecodeOffset + table.branchOffsets[i]);
-        out.append(bytecodeOffset + current[2].u.operand);
-        break;
-    }
-    case op_switch_string: {
-        StringJumpTable& table = codeBlock->stringSwitchJumpTable(current[1].u.operand);
-        StringJumpTable::StringOffsetTable::iterator iter = table.offsetTable.begin();
-        StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
-        for (; iter != end; ++iter)
-            out.append(bytecodeOffset + iter->value.branchOffset);
-        out.append(bytecodeOffset + current[2].u.operand);
-        break;
-    }
-    case op_get_pnames:
-        out.append(bytecodeOffset + current[5].u.operand);
-        break;
-    case op_next_pname:
-        out.append(bytecodeOffset + current[6].u.operand);
-        break;
-    case op_check_has_instance:
-        out.append(bytecodeOffset + current[4].u.operand);
-        break;
-    case op_loop_hint:
+    OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
+    extractStoredJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) {
+        out.append(bytecodeOffset + relativeOffset);
+    });
+    // op_loop_hint does not have jump target stored in bytecode instructions.
+    if (opcodeID == op_loop_hint)
         out.append(bytecodeOffset);
-        break;
-    default:
-        break;
-    }
 }
 
-void computePreciseJumpTargets(CodeBlock* codeBlock, Vector& out)
+enum class ComputePreciseJumpTargetsMode {
+    FollowCodeBlockClaim,
+    ForceCompute,
+};
+
+template
+void computePreciseJumpTargetsInternal(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector& out)
 {
     ASSERT(out.isEmpty());
     
     // We will derive a superset of the jump targets that the code block thinks it has.
     // So, if the code block claims there are none, then we are done.
-    if (!codeBlock->numberOfJumpTargets())
+    if (Mode == ComputePreciseJumpTargetsMode::FollowCodeBlockClaim && !codeBlock->numberOfJumpTargets())
         return;
     
-    for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;)
+    for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;) {
         out.append(codeBlock->exceptionHandler(i).target);
-    
+        out.append(codeBlock->exceptionHandler(i).start);
+        out.append(codeBlock->exceptionHandler(i).end);
+    }
+
     Interpreter* interpreter = codeBlock->vm()->interpreter;
-    Instruction* instructionsBegin = codeBlock->instructions().begin();
-    unsigned instructionCount = codeBlock->instructions().size();
     for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) {
-        OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
+        OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
         getJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, out);
         bytecodeOffset += opcodeLengths[opcodeID];
     }
@@ -123,13 +86,37 @@ void computePreciseJumpTargets(CodeBlock* codeBlock, Vector& out)
         lastValue = value;
     }
     out.resize(toIndex);
+    out.shrinkToFit();
 }
 
-void findJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, Vector& out)
+void computePreciseJumpTargets(CodeBlock* codeBlock, Vector& out)
 {
-    Interpreter* interpreter = codeBlock->vm()->interpreter;
-    Instruction* instructionsBegin = codeBlock->instructions().begin();
-    getJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, out);
+    computePreciseJumpTargetsInternal(codeBlock, codeBlock->instructions().begin(), codeBlock->instructions().size(), out);
+}
+
+void computePreciseJumpTargets(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector& out)
+{
+    computePreciseJumpTargetsInternal(codeBlock, instructionsBegin, instructionCount, out);
+}
+
+void computePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector& out)
+{
+    computePreciseJumpTargetsInternal(codeBlock, instructionsBegin, instructionCount, out);
+}
+
+void recomputePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector& out)
+{
+    computePreciseJumpTargetsInternal(codeBlock, instructionsBegin, instructionCount, out);
+}
+
+void findJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector& out)
+{
+    getJumpTargetsForBytecodeOffset(codeBlock, codeBlock->vm()->interpreter, instructionsBegin, bytecodeOffset, out);
+}
+
+void findJumpTargetsForBytecodeOffset(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector& out)
+{
+    getJumpTargetsForBytecodeOffset(codeBlock, codeBlock->vm()->interpreter, instructionsBegin, bytecodeOffset, out);
 }
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
index fb60f9b9b..bcc9346cd 100644
--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
@@ -23,17 +23,23 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef PreciseJumpTargets_h
-#define PreciseJumpTargets_h
+#pragma once
 
 #include "CodeBlock.h"
 
 namespace JSC {
 
+class UnlinkedCodeBlock;
+struct UnlinkedInstruction;
+
+// Return a sorted list of bytecode index that are the destination of a jump.
 void computePreciseJumpTargets(CodeBlock*, Vector& out);
-void findJumpTargetsForBytecodeOffset(CodeBlock*, unsigned bytecodeOffset, Vector& out);
+void computePreciseJumpTargets(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector& out);
+void computePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector& out);
 
-} // namespace JSC
+void recomputePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector& out);
 
-#endif // PreciseJumpTargets_h
+void findJumpTargetsForBytecodeOffset(CodeBlock*, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector& out);
+void findJumpTargetsForBytecodeOffset(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector& out);
 
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h
new file mode 100644
index 000000000..19fdcdceb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "InterpreterInlines.h"
+#include "Opcode.h"
+#include "PreciseJumpTargets.h"
+
+namespace JSC {
+
+template
+inline void extractStoredJumpTargetsForBytecodeOffset(Block* codeBlock, Interpreter* interpreter, Instruction* instructionsBegin, unsigned bytecodeOffset, Function function)
+{
+    OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
+    Instruction* current = instructionsBegin + bytecodeOffset;
+    switch (opcodeID) {
+    case op_jmp:
+        function(current[1].u.operand);
+        break;
+    case op_jtrue:
+    case op_jfalse:
+    case op_jeq_null:
+    case op_jneq_null:
+        function(current[2].u.operand);
+        break;
+    case op_jneq_ptr:
+    case op_jless:
+    case op_jlesseq:
+    case op_jgreater:
+    case op_jgreatereq:
+    case op_jnless:
+    case op_jnlesseq:
+    case op_jngreater:
+    case op_jngreatereq:
+        function(current[3].u.operand);
+        break;
+    case op_switch_imm:
+    case op_switch_char: {
+        auto& table = codeBlock->switchJumpTable(current[1].u.operand);
+        for (unsigned i = table.branchOffsets.size(); i--;)
+            function(table.branchOffsets[i]);
+        function(current[2].u.operand);
+        break;
+    }
+    case op_switch_string: {
+        auto& table = codeBlock->stringSwitchJumpTable(current[1].u.operand);
+        auto iter = table.offsetTable.begin();
+        auto end = table.offsetTable.end();
+        for (; iter != end; ++iter)
+            function(iter->value.branchOffset);
+        function(current[2].u.operand);
+        break;
+    }
+    default:
+        break;
+    }
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.cpp b/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.cpp
deleted file mode 100644
index edf8e228d..000000000
--- a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#include "config.h"
-#include "ProfiledCodeBlockJettisoningWatchpoint.h"
-
-#include "CodeBlock.h"
-#include "DFGCommon.h"
-#include "DFGExitProfile.h"
-
-namespace JSC {
-
-void ProfiledCodeBlockJettisoningWatchpoint::fireInternal()
-{
-    if (DFG::shouldShowDisassembly()) {
-        dataLog(
-            "Firing profiled watchpoint ", RawPointer(this), " on ", *m_codeBlock, " due to ",
-            m_exitKind, " at ", m_codeOrigin, "\n");
-    }
-    
-    // FIXME: Maybe this should call alternative().
-    // https://bugs.webkit.org/show_bug.cgi?id=123677
-    CodeBlock* machineBaselineCodeBlock = m_codeBlock->baselineAlternative();
-    CodeBlock* sourceBaselineCodeBlock =
-        baselineCodeBlockForOriginAndBaselineCodeBlock(
-            m_codeOrigin, machineBaselineCodeBlock);
-    
-    if (sourceBaselineCodeBlock) {
-        sourceBaselineCodeBlock->addFrequentExitSite(
-            DFG::FrequentExitSite(m_codeOrigin.bytecodeIndex, m_exitKind));
-    }
-    
-    m_codeBlock->jettison(CountReoptimization);
-    
-    if (isOnList())
-        remove();
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.h b/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.h
deleted file mode 100644
index 108e23a37..000000000
--- a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef ProfiledCodeBlockJettisoningWatchpoint_h
-#define ProfiledCodeBlockJettisoningWatchpoint_h
-
-#include "CodeOrigin.h"
-#include "ExitKind.h"
-#include "Watchpoint.h"
-
-namespace JSC {
-
-class CodeBlock;
-
-class ProfiledCodeBlockJettisoningWatchpoint : public Watchpoint {
-public:
-    ProfiledCodeBlockJettisoningWatchpoint()
-        : m_exitKind(ExitKindUnset)
-        , m_codeBlock(0)
-    {
-    }
-    
-    ProfiledCodeBlockJettisoningWatchpoint(
-        CodeOrigin codeOrigin, ExitKind exitKind, CodeBlock* codeBlock)
-        : m_codeOrigin(codeOrigin)
-        , m_exitKind(exitKind)
-        , m_codeBlock(codeBlock)
-    {
-    }
-    
-protected:
-    virtual void fireInternal() override;
-
-private:
-    CodeOrigin m_codeOrigin;
-    ExitKind m_exitKind;
-    CodeBlock* m_codeBlock;
-};
-
-} // namespace JSC
-
-#endif // ProfiledCodeBlockJettisoningWatchpoint_h
-
diff --git a/Source/JavaScriptCore/bytecode/ProgramCodeBlock.cpp b/Source/JavaScriptCore/bytecode/ProgramCodeBlock.cpp
new file mode 100644
index 000000000..b4fac570f
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ProgramCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ProgramCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo ProgramCodeBlock::s_info = {
+    "ProgramCodeBlock", &Base::s_info, 0,
+    CREATE_METHOD_TABLE(ProgramCodeBlock)
+};
+
+void ProgramCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~ProgramCodeBlock();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ProgramCodeBlock.h b/Source/JavaScriptCore/bytecode/ProgramCodeBlock.h
new file mode 100644
index 000000000..8504ac4a0
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ProgramCodeBlock.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GlobalCodeBlock.h"
+#include "UnlinkedProgramCodeBlock.h"
+
+namespace JSC {
+
+class ProgramCodeBlock : public GlobalCodeBlock {
+public:
+    typedef GlobalCodeBlock Base;
+    DECLARE_INFO;
+
+    static ProgramCodeBlock* create(VM* vm, CopyParsedBlockTag, ProgramCodeBlock& other)
+    {
+        ProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            ProgramCodeBlock(vm, vm->programCodeBlockStructure.get(), CopyParsedBlock, other);
+        instance->finishCreation(*vm, CopyParsedBlock, other);
+        return instance;
+    }
+
+    static ProgramCodeBlock* create(VM* vm, ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, RefPtr&& sourceProvider, unsigned firstLineColumnOffset)
+    {
+        ProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            ProgramCodeBlock(vm, vm->programCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), firstLineColumnOffset);
+        instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+        return instance;
+    }
+
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+    {
+        return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+    }
+
+private:
+    ProgramCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, ProgramCodeBlock& other)
+        : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+    {
+    }
+
+    ProgramCodeBlock(VM* vm, Structure* structure, ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, RefPtr&& sourceProvider, unsigned firstLineColumnOffset)
+        : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), 0, firstLineColumnOffset)
+    {
+    }
+
+    static void destroy(JSCell*);
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PropertyCondition.cpp b/Source/JavaScriptCore/bytecode/PropertyCondition.cpp
new file mode 100644
index 000000000..a8388df39
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PropertyCondition.cpp
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "PropertyCondition.h"
+
+#include "GetterSetter.h"
+#include "JSCInlines.h"
+#include "TrackedReferences.h"
+
+namespace JSC {
+
+static bool verbose = false;
+
+void PropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    if (!*this) {
+        out.print("");
+        return;
+    }
+    
+    out.print(m_kind, " of ", m_uid);
+    switch (m_kind) {
+    case Presence:
+        out.print(" at ", offset(), " with attributes ", attributes());
+        return;
+    case Absence:
+    case AbsenceOfSetter:
+        out.print(" with prototype ", inContext(JSValue(prototype()), context));
+        return;
+    case Equivalence:
+        out.print(" with ", inContext(requiredValue(), context));
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void PropertyCondition::dump(PrintStream& out) const
+{
+    dumpInContext(out, nullptr);
+}
+
+bool PropertyCondition::isStillValidAssumingImpurePropertyWatchpoint(
+    Structure* structure, JSObject* base) const
+{
+    if (verbose) {
+        dataLog(
+            "Determining validity of ", *this, " with structure ", pointerDump(structure), " and base ",
+            JSValue(base), " assuming impure property watchpoints are set.\n");
+    }
+    
+    if (!*this) {
+        if (verbose)
+            dataLog("Invalid because unset.\n");
+        return false;
+    }
+    
+    if (!structure->propertyAccessesAreCacheable()) {
+        if (verbose)
+            dataLog("Invalid because accesses are not cacheable.\n");
+        return false;
+    }
+    
+    switch (m_kind) {
+    case Presence: {
+        unsigned currentAttributes;
+        PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes);
+        if (currentOffset != offset() || currentAttributes != attributes()) {
+            if (verbose) {
+                dataLog(
+                    "Invalid because we need offset, attributes to be ", offset(), ", ", attributes(),
+                    " but they are ", currentOffset, ", ", currentAttributes, "\n");
+            }
+            return false;
+        }
+        return true;
+    }
+        
+    case Absence: {
+        if (structure->isDictionary()) {
+            if (verbose)
+                dataLog("Invalid because it's a dictionary.\n");
+            return false;
+        }
+
+        PropertyOffset currentOffset = structure->getConcurrently(uid());
+        if (currentOffset != invalidOffset) {
+            if (verbose)
+                dataLog("Invalid because the property exists at offset: ", currentOffset, "\n");
+            return false;
+        }
+        
+        if (structure->storedPrototypeObject() != prototype()) {
+            if (verbose) {
+                dataLog(
+                    "Invalid because the prototype is ", structure->storedPrototype(), " even though "
+                    "it should have been ", JSValue(prototype()), "\n");
+            }
+            return false;
+        }
+        
+        return true;
+    }
+    
+    case AbsenceOfSetter: {
+        if (structure->isDictionary()) {
+            if (verbose)
+                dataLog("Invalid because it's a dictionary.\n");
+            return false;
+        }
+        
+        unsigned currentAttributes;
+        PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes);
+        if (currentOffset != invalidOffset) {
+            if (currentAttributes & (Accessor | CustomAccessor)) {
+                if (verbose) {
+                    dataLog(
+                        "Invalid because we expected not to have a setter, but we have one at offset ",
+                        currentOffset, " with attributes ", currentAttributes, "\n");
+                }
+                return false;
+            }
+        }
+        
+        if (structure->storedPrototypeObject() != prototype()) {
+            if (verbose) {
+                dataLog(
+                    "Invalid because the prototype is ", structure->storedPrototype(), " even though "
+                    "it should have been ", JSValue(prototype()), "\n");
+            }
+            return false;
+        }
+        
+        return true;
+    }
+        
+    case Equivalence: {
+        if (!base || base->structure() != structure) {
+            // Conservatively return false, since we cannot verify this one without having the
+            // object.
+            if (verbose) {
+                dataLog(
+                    "Invalid because we don't have a base or the base has the wrong structure: ",
+                    RawPointer(base), "\n");
+            }
+            return false;
+        }
+        
+        // FIXME: This is somewhat racy, and maybe more risky than we want.
+        // https://bugs.webkit.org/show_bug.cgi?id=134641
+        
+        PropertyOffset currentOffset = structure->getConcurrently(uid());
+        if (currentOffset == invalidOffset) {
+            if (verbose) {
+                dataLog(
+                    "Invalid because the base no long appears to have ", uid(), " on its structure: ",
+                        RawPointer(base), "\n");
+            }
+            return false;
+        }
+
+        JSValue currentValue = base->getDirect(currentOffset);
+        if (currentValue != requiredValue()) {
+            if (verbose) {
+                dataLog(
+                    "Invalid because the value is ", currentValue, " but we require ", requiredValue(),
+                    "\n");
+            }
+            return false;
+        }
+        
+        return true;
+    } }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+    return false;
+}
+
+bool PropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const
+{
+    if (!*this)
+        return false;
+    
+    switch (m_kind) {
+    case Presence:
+    case Absence:
+    case Equivalence:
+        return structure->needImpurePropertyWatchpoint();
+    default:
+        return false;
+    }
+}
+
+bool PropertyCondition::isStillValid(Structure* structure, JSObject* base) const
+{
+    if (!isStillValidAssumingImpurePropertyWatchpoint(structure, base))
+        return false;
+
+    // Currently we assume that an impure property can cause a property to appear, and can also
+    // "shadow" an existing JS property on the same object. Hence it affects both presence and
+    // absence. It doesn't affect AbsenceOfSetter because impure properties aren't ever setters.
+    switch (m_kind) {
+    case Absence:
+        if (structure->typeInfo().getOwnPropertySlotIsImpure() || structure->typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence())
+            return false;
+        break;
+    case Presence:
+    case Equivalence:
+        if (structure->typeInfo().getOwnPropertySlotIsImpure())
+            return false;
+        break;
+    default:
+        break;
+    }
+    
+    return true;
+}
+
+bool PropertyCondition::isWatchableWhenValid(
+    Structure* structure, WatchabilityEffort effort) const
+{
+    if (structure->transitionWatchpointSetHasBeenInvalidated())
+        return false;
+    
+    switch (m_kind) {
+    case Equivalence: {
+        PropertyOffset offset = structure->getConcurrently(uid());
+        
+        // This method should only be called when some variant of isValid returned true, which
+        // implies that we already confirmed that the structure knows of the property. We should
+        // also have verified that the Structure is a cacheable dictionary, which means we
+        // shouldn't have a TOCTOU race either.
+        RELEASE_ASSERT(offset != invalidOffset);
+        
+        WatchpointSet* set = nullptr;
+        switch (effort) {
+        case MakeNoChanges:
+            set = structure->propertyReplacementWatchpointSet(offset);
+            break;
+        case EnsureWatchability:
+            set = structure->ensurePropertyReplacementWatchpointSet(
+                *Heap::heap(structure)->vm(), offset);
+            break;
+        }
+        
+        if (!set || !set->isStillValid())
+            return false;
+        
+        break;
+    }
+        
+    default:
+        break;
+    }
+    
+    return true;
+}
+
+bool PropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+    Structure* structure, JSObject* base, WatchabilityEffort effort) const
+{
+    return isStillValidAssumingImpurePropertyWatchpoint(structure, base)
+        && isWatchableWhenValid(structure, effort);
+}
+
+bool PropertyCondition::isWatchable(
+    Structure* structure, JSObject* base, WatchabilityEffort effort) const
+{
+    return isStillValid(structure, base)
+        && isWatchableWhenValid(structure, effort);
+}
+
+bool PropertyCondition::isStillLive() const
+{
+    if (hasPrototype() && prototype() && !Heap::isMarked(prototype()))
+        return false;
+    
+    if (hasRequiredValue()
+        && requiredValue()
+        && requiredValue().isCell()
+        && !Heap::isMarked(requiredValue().asCell()))
+        return false;
+    
+    return true;
+}
+
+void PropertyCondition::validateReferences(const TrackedReferences& tracked) const
+{
+    if (hasPrototype())
+        tracked.check(prototype());
+    
+    if (hasRequiredValue())
+        tracked.check(requiredValue());
+}
+
+bool PropertyCondition::isValidValueForAttributes(VM& vm, JSValue value, unsigned attributes)
+{
+    bool attributesClaimAccessor = !!(attributes & Accessor);
+    bool valueClaimsAccessor = !!jsDynamicCast(vm, value);
+    return attributesClaimAccessor == valueClaimsAccessor;
+}
+
+bool PropertyCondition::isValidValueForPresence(VM& vm, JSValue value) const
+{
+    return isValidValueForAttributes(vm, value, attributes());
+}
+
+PropertyCondition PropertyCondition::attemptToMakeEquivalenceWithoutBarrier(VM& vm, JSObject* base) const
+{
+    Structure* structure = base->structure();
+    if (!structure->isValidOffset(offset()))
+        return PropertyCondition();
+    JSValue value = base->getDirect(offset());
+    if (!isValidValueForPresence(vm, value))
+        return PropertyCondition();
+    return equivalenceWithoutBarrier(uid(), value);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::PropertyCondition::Kind condition)
+{
+    switch (condition) {
+    case JSC::PropertyCondition::Presence:
+        out.print("Presence");
+        return;
+    case JSC::PropertyCondition::Absence:
+        out.print("Absence");
+        return;
+    case JSC::PropertyCondition::AbsenceOfSetter:
+        out.print("Absence");
+        return;
+    case JSC::PropertyCondition::Equivalence:
+        out.print("Equivalence");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/PropertyCondition.h b/Source/JavaScriptCore/bytecode/PropertyCondition.h
new file mode 100644
index 000000000..163e8f3fb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PropertyCondition.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JSObject.h"
+#include 
+
+namespace JSC {
+
+class TrackedReferences;
+
+class PropertyCondition {
+public:
+    enum Kind {
+        Presence,
+        Absence,
+        AbsenceOfSetter,
+        Equivalence // An adaptive watchpoint on this will be a pair of watchpoints, and when the structure transitions, we will set the replacement watchpoint on the new structure.
+    };
+    
+    PropertyCondition()
+        : m_uid(nullptr)
+        , m_kind(Presence)
+    {
+        memset(&u, 0, sizeof(u));
+    }
+    
+    PropertyCondition(WTF::HashTableDeletedValueType)
+        : m_uid(nullptr)
+        , m_kind(Absence)
+    {
+        memset(&u, 0, sizeof(u));
+    }
+    
+    static PropertyCondition presenceWithoutBarrier(UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+    {
+        PropertyCondition result;
+        result.m_uid = uid;
+        result.m_kind = Presence;
+        result.u.presence.offset = offset;
+        result.u.presence.attributes = attributes;
+        return result;
+    }
+    
+    static PropertyCondition presence(
+        VM&, JSCell*, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+    {
+        return presenceWithoutBarrier(uid, offset, attributes);
+    }
+
+    // NOTE: The prototype is the storedPrototype not the prototypeForLookup.
+    static PropertyCondition absenceWithoutBarrier(UniquedStringImpl* uid, JSObject* prototype)
+    {
+        PropertyCondition result;
+        result.m_uid = uid;
+        result.m_kind = Absence;
+        result.u.absence.prototype = prototype;
+        return result;
+    }
+    
+    static PropertyCondition absence(
+        VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return absenceWithoutBarrier(uid, prototype);
+    }
+    
+    static PropertyCondition absenceOfSetterWithoutBarrier(
+        UniquedStringImpl* uid, JSObject* prototype)
+    {
+        PropertyCondition result;
+        result.m_uid = uid;
+        result.m_kind = AbsenceOfSetter;
+        result.u.absence.prototype = prototype;
+        return result;
+    }
+    
+    static PropertyCondition absenceOfSetter(
+        VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return absenceOfSetterWithoutBarrier(uid, prototype);
+    }
+    
+    static PropertyCondition equivalenceWithoutBarrier(
+        UniquedStringImpl* uid, JSValue value)
+    {
+        PropertyCondition result;
+        result.m_uid = uid;
+        result.m_kind = Equivalence;
+        result.u.equivalence.value = JSValue::encode(value);
+        return result;
+    }
+        
+    static PropertyCondition equivalence(
+        VM& vm, JSCell* owner, UniquedStringImpl* uid, JSValue value)
+    {
+        if (value.isCell() && owner)
+            vm.heap.writeBarrier(owner);
+        return equivalenceWithoutBarrier(uid, value);
+    }
+    
+    explicit operator bool() const { return m_uid || m_kind != Presence; }
+    
+    Kind kind() const { return m_kind; }
+    UniquedStringImpl* uid() const { return m_uid; }
+    
+    bool hasOffset() const { return !!*this && m_kind == Presence; };
+    PropertyOffset offset() const
+    {
+        ASSERT(hasOffset());
+        return u.presence.offset;
+    }
+    bool hasAttributes() const { return !!*this && m_kind == Presence; };
+    unsigned attributes() const
+    {
+        ASSERT(hasAttributes());
+        return u.presence.attributes;
+    }
+    
+    bool hasPrototype() const { return !!*this && (m_kind == Absence || m_kind == AbsenceOfSetter); }
+    JSObject* prototype() const
+    {
+        ASSERT(hasPrototype());
+        return u.absence.prototype;
+    }
+    
+    bool hasRequiredValue() const { return !!*this && m_kind == Equivalence; }
+    JSValue requiredValue() const
+    {
+        ASSERT(hasRequiredValue());
+        return JSValue::decode(u.equivalence.value);
+    }
+    
+    void dumpInContext(PrintStream&, DumpContext*) const;
+    void dump(PrintStream&) const;
+    
+    unsigned hash() const
+    {
+        unsigned result = WTF::PtrHash::hash(m_uid) + static_cast(m_kind);
+        switch (m_kind) {
+        case Presence:
+            result ^= u.presence.offset;
+            result ^= u.presence.attributes;
+            break;
+        case Absence:
+        case AbsenceOfSetter:
+            result ^= WTF::PtrHash::hash(u.absence.prototype);
+            break;
+        case Equivalence:
+            result ^= EncodedJSValueHash::hash(u.equivalence.value);
+            break;
+        }
+        return result;
+    }
+    
+    bool operator==(const PropertyCondition& other) const
+    {
+        if (m_uid != other.m_uid)
+            return false;
+        if (m_kind != other.m_kind)
+            return false;
+        switch (m_kind) {
+        case Presence:
+            return u.presence.offset == other.u.presence.offset
+                && u.presence.attributes == other.u.presence.attributes;
+        case Absence:
+        case AbsenceOfSetter:
+            return u.absence.prototype == other.u.absence.prototype;
+        case Equivalence:
+            return u.equivalence.value == other.u.equivalence.value;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return false;
+    }
+    
+    bool isHashTableDeletedValue() const
+    {
+        return !m_uid && m_kind == Absence;
+    }
+    
+    // Two conditions are compatible if they are identical or if they speak of different uids. If
+    // false is returned, you have to decide how to resolve the conflict - for example if there is
+    // a Presence and an Equivalence then in some cases you'll want the more general of the two
+    // while in other cases you'll want the more specific of the two. This will also return false
+    // for contradictions, like Presence and Absence on the same uid. By convention, invalid
+    // conditions aren't compatible with anything.
+    bool isCompatibleWith(const PropertyCondition& other) const
+    {
+        if (!*this || !other)
+            return false;
+        return *this == other || uid() != other.uid();
+    }
+    
+    // Checks if the object's structure claims that the property won't be intercepted.
+    bool isStillValidAssumingImpurePropertyWatchpoint(Structure*, JSObject* base = nullptr) const;
+    
+    // Returns true if we need an impure property watchpoint to ensure validity even if
+    // isStillValidAccordingToStructure() returned true.
+    bool validityRequiresImpurePropertyWatchpoint(Structure*) const;
+    
+    // Checks if the condition is still valid right now for the given object and structure.
+    // May conservatively return false, if the object and structure alone don't guarantee the
+    // condition. This happens for an Absence condition on an object that may have impure
+    // properties. If the object is not supplied, then a "true" return indicates that checking if
+    // an object has the given structure guarantees the condition still holds. If an object is
+    // supplied, then you may need to use some other watchpoints on the object to guarantee the
+    // condition in addition to the structure check.
+    bool isStillValid(Structure*, JSObject* base = nullptr) const;
+    
+    // In some cases, the condition is not watchable, but could be made watchable by enabling the
+    // appropriate watchpoint. For example, replacement watchpoints are enabled only when some
+    // access is cached on the property in some structure. This is mainly to save space for
+    // dictionary properties or properties that never get very hot. But, it's always safe to
+    // enable watching, provided that this is called from the main thread.
+    enum WatchabilityEffort {
+        // This is the default. It means that we don't change the state of any Structure or
+        // object, and implies that if the property happens not to be watchable then we don't make
+        // it watchable. This is mandatory if calling from a JIT thread. This is also somewhat
+        // preferable when first deciding whether to watch a condition for the first time (i.e.
+        // not from a watchpoint fire that causes us to see if we should adapt), since a
+        // watchpoint not being initialized for watching implies that maybe we don't know enough
+        // yet to make it profitable to watch -- as in, the thing being watched may not have
+        // stabilized yet. We prefer to only assume that a condition will hold if it has been
+        // known to hold for a while already.
+        MakeNoChanges,
+        
+        // Do what it takes to ensure that the property can be watched, if doing so has no
+        // user-observable effect. For now this just means that we will ensure that a property
+        // replacement watchpoint is enabled if it hadn't been enabled already. Do not use this
+        // from JIT threads, since the act of enabling watchpoints is not thread-safe.
+        EnsureWatchability
+    };
+    
+    // This means that it's still valid and we could enforce validity by setting a transition
+    // watchpoint on the structure and possibly an impure property watchpoint.
+    bool isWatchableAssumingImpurePropertyWatchpoint(
+        Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const;
+    
+    // This means that it's still valid and we could enforce validity by setting a transition
+    // watchpoint on the structure.
+    bool isWatchable(
+        Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const;
+    
+    bool watchingRequiresStructureTransitionWatchpoint() const
+    {
+        // Currently, this is required for all of our conditions.
+        return !!*this;
+    }
+    bool watchingRequiresReplacementWatchpoint() const
+    {
+        return !!*this && m_kind == Equivalence;
+    }
+    
+    // This means that the objects involved in this are still live.
+    bool isStillLive() const;
+    
+    void validateReferences(const TrackedReferences&) const;
+
+    static bool isValidValueForAttributes(VM&, JSValue, unsigned attributes);
+
+    bool isValidValueForPresence(VM&, JSValue) const;
+
+    PropertyCondition attemptToMakeEquivalenceWithoutBarrier(VM&, JSObject* base) const;
+
+private:
+    bool isWatchableWhenValid(Structure*, WatchabilityEffort) const;
+
+    UniquedStringImpl* m_uid;
+    Kind m_kind;
+    union {
+        struct {
+            PropertyOffset offset;
+            unsigned attributes;
+        } presence;
+        struct {
+            JSObject* prototype;
+        } absence;
+        struct {
+            EncodedJSValue value;
+        } equivalence;
+    } u;
+};
+
+struct PropertyConditionHash {
+    static unsigned hash(const PropertyCondition& key) { return key.hash(); }
+    static bool equal(
+        const PropertyCondition& a, const PropertyCondition& b)
+    {
+        return a == b;
+    }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::PropertyCondition::Kind);
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::PropertyConditionHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits { };
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ProxyableAccessCase.cpp b/Source/JavaScriptCore/bytecode/ProxyableAccessCase.cpp
new file mode 100644
index 000000000..63879c4de
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ProxyableAccessCase.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ProxyableAccessCase.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ProxyableAccessCase::ProxyableAccessCase(VM& vm, JSCell* owner, AccessType accessType, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
+    : Base(vm, owner, accessType, offset, structure, conditionSet)
+    , m_viaProxy(viaProxy)
+    , m_additionalSet(additionalSet)
+{
+}
+
+std::unique_ptr ProxyableAccessCase::create(VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
+{
+    ASSERT(type == Load || type == Miss || type == GetGetter);
+    return std::unique_ptr(new ProxyableAccessCase(vm, owner, type, offset, structure, conditionSet, viaProxy, additionalSet));
+}
+
+ProxyableAccessCase::~ProxyableAccessCase()
+{
+}
+
+std::unique_ptr ProxyableAccessCase::clone() const
+{
+    std::unique_ptr result(new ProxyableAccessCase(*this));
+    result->resetState();
+    return WTFMove(result);
+}
+
+void ProxyableAccessCase::dumpImpl(PrintStream& out, CommaPrinter& comma) const
+{
+    Base::dumpImpl(out, comma);
+    out.print(comma, "viaProxy = ", viaProxy());
+    out.print(comma, "additionalSet = ", RawPointer(additionalSet()));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/ProxyableAccessCase.h b/Source/JavaScriptCore/bytecode/ProxyableAccessCase.h
new file mode 100644
index 000000000..578be2228
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ProxyableAccessCase.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "AccessCase.h"
+
+namespace JSC {
+
+class ProxyableAccessCase : public AccessCase {
+public:
+    typedef AccessCase Base;
+
+    bool viaProxy() const override { return m_viaProxy; }
+    WatchpointSet* additionalSet() const override { return m_additionalSet.get(); }
+
+    static std::unique_ptr create(VM&, JSCell*, AccessType, PropertyOffset, Structure*, const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+        bool viaProxy = false, WatchpointSet* additionalSet = nullptr);
+
+    void dumpImpl(PrintStream&, CommaPrinter&) const override;
+    std::unique_ptr clone() const override;
+
+    ~ProxyableAccessCase();
+
+protected:
+    ProxyableAccessCase(VM&, JSCell*, AccessType, PropertyOffset, Structure*, const ObjectPropertyConditionSet&, bool viaProxy, WatchpointSet* additionalSet);
+
+private:
+    bool m_viaProxy;
+    RefPtr m_additionalSet;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp b/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp
new file mode 100644
index 000000000..f28090049
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "PutByIdFlags.h"
+
+#include "InferredType.h"
+#include 
+#include 
+#include 
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, PutByIdFlags flags) {
+    CommaPrinter comma("|");
+    if (flags & PutByIdIsDirect)
+        out.print(comma, "IsDirect");
+
+    InferredType::Kind kind = InferredType::kindForFlags(flags);
+    out.print(comma, kind);
+    if (InferredType::hasStructure(kind))
+        out.print(":", bitwise_cast(decodeStructureID(flags)));
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/PutByIdFlags.h b/Source/JavaScriptCore/bytecode/PutByIdFlags.h
new file mode 100644
index 000000000..7decfb2eb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdFlags.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "StructureIDTable.h"
+
+namespace JSC {
+
+enum PutByIdFlags : int32_t {
+    PutByIdNone = 0,
+
+    // This flag indicates that the put_by_id is direct. That means that we store the property without
+    // checking if the prototype chain has a setter.
+    PutByIdIsDirect = 0x1,
+    PutByIdPersistentFlagsMask = 0x1,
+
+    // NOTE: The values below must be in sync with what is in LowLevelInterpreter.asm.
+
+    // Determining the required inferred type involves first checking the primary type mask, and then
+    // using that to figure out the meaning of the secondary mask:
+    // switch (flags & PutByIdPrimaryTypeMask) {
+    // case PutByIdPrimaryTypeSecondary:
+    //     switch (flags & PutByIdSecondaryTypeMask) {
+    //     ...
+    //     }
+    //     break;
+    // case PutByIdPrimaryTypeObjectWithStructure:
+    // case PutByIdPrimaryTypeObjectWithStructureOrOther:
+    //     StructureID structureID = decodeStructureID(flags);
+    //     break;
+    // }
+    PutByIdPrimaryTypeMask = 0x6,
+    PutByIdPrimaryTypeSecondary = 0x0, // Need to check the secondary type mask for the type.
+    PutByIdPrimaryTypeObjectWithStructure = 0x2, // Secondary type has structure ID.
+    PutByIdPrimaryTypeObjectWithStructureOrOther = 0x4, // Secondary type has structure ID.
+
+    PutByIdSecondaryTypeMask = -0x8,
+    PutByIdSecondaryTypeBottom = 0x0,
+    PutByIdSecondaryTypeBoolean = 0x8,
+    PutByIdSecondaryTypeOther = 0x10,
+    PutByIdSecondaryTypeInt32 = 0x18,
+    PutByIdSecondaryTypeNumber = 0x20,
+    PutByIdSecondaryTypeString = 0x28,
+    PutByIdSecondaryTypeSymbol = 0x30,
+    PutByIdSecondaryTypeObject = 0x38,
+    PutByIdSecondaryTypeObjectOrOther = 0x40,
+    PutByIdSecondaryTypeTop = 0x48
+};
+
+inline PutByIdFlags encodeStructureID(StructureID id)
+{
+#if USE(JSVALUE64)
+    return static_cast(static_cast(id) << 3);
+#else
+    PutByIdFlags result = bitwise_cast(id);
+    ASSERT(!(result & ~PutByIdSecondaryTypeMask));
+    return result;
+#endif
+}
+
+inline StructureID decodeStructureID(PutByIdFlags flags)
+{
+#if USE(JSVALUE64)
+    return static_cast(flags >> 3);
+#else
+    return bitwise_cast(flags & PutByIdSecondaryTypeMask);
+#endif
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::PutByIdFlags);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
index 17cf70897..fdadf7022 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -27,206 +27,393 @@
 #include "PutByIdStatus.h"
 
 #include "CodeBlock.h"
+#include "ComplexGetStatus.h"
+#include "GetterSetterAccessCase.h"
 #include "LLIntData.h"
 #include "LowLevelInterpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include "PolymorphicAccess.h"
 #include "Structure.h"
 #include "StructureChain.h"
+#include "StructureStubInfo.h"
+#include 
 
 namespace JSC {
 
-PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid)
+bool PutByIdStatus::appendVariant(const PutByIdVariant& variant)
+{
+    for (unsigned i = 0; i < m_variants.size(); ++i) {
+        if (m_variants[i].attemptToMerge(variant))
+            return true;
+    }
+    for (unsigned i = 0; i < m_variants.size(); ++i) {
+        if (m_variants[i].oldStructure().overlaps(variant.oldStructure()))
+            return false;
+    }
+    m_variants.append(variant);
+    return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool PutByIdStatus::hasExitSite(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+    return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+        || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+    
+}
+#endif
+
+PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
 {
     UNUSED_PARAM(profiledBlock);
     UNUSED_PARAM(bytecodeIndex);
     UNUSED_PARAM(uid);
-#if ENABLE(LLINT)
+
+    VM& vm = *profiledBlock->vm();
+    
     Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
 
-    Structure* structure = instruction[4].u.structure.get();
-    if (!structure)
-        return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+    StructureID structureID = instruction[4].u.structureID;
+    if (!structureID)
+        return PutByIdStatus(NoInformation);
     
-    if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id)
-        || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_out_of_line)) {
-        PropertyOffset offset = structure->getConcurrently(*profiledBlock->vm(), uid);
+    Structure* structure = vm.heap.structureIDTable().get(structureID);
+
+    StructureID newStructureID = instruction[6].u.structureID;
+    if (!newStructureID) {
+        PropertyOffset offset = structure->getConcurrently(uid);
         if (!isValidOffset(offset))
-            return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+            return PutByIdStatus(NoInformation);
         
-        return PutByIdStatus(SimpleReplace, structure, 0, 0, offset);
+        return PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
     }
+
+    Structure* newStructure = vm.heap.structureIDTable().get(newStructureID);
     
     ASSERT(structure->transitionWatchpointSetHasBeenInvalidated());
     
-    ASSERT(instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct)
-           || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal)
-           || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line)
-           || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line));
-    
-    Structure* newStructure = instruction[6].u.structure.get();
-    StructureChain* chain = instruction[7].u.structureChain.get();
-    ASSERT(newStructure);
-    ASSERT(chain);
-    
-    PropertyOffset offset = newStructure->getConcurrently(*profiledBlock->vm(), uid);
+    PropertyOffset offset = newStructure->getConcurrently(uid);
     if (!isValidOffset(offset))
-        return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+        return PutByIdStatus(NoInformation);
     
-    return PutByIdStatus(
-        SimpleTransition, structure, newStructure,
-        chain ? adoptRef(new IntendedStructureChain(profiledBlock, structure, chain)) : 0,
-        offset);
-#else
-    return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
-#endif
+    ObjectPropertyConditionSet conditionSet;
+    if (!(instruction[8].u.putByIdFlags & PutByIdIsDirect)) {
+        conditionSet =
+            generateConditionsForPropertySetterMissConcurrently(
+                *profiledBlock->vm(), profiledBlock->globalObject(), structure, uid);
+        if (!conditionSet.isValid())
+            return PutByIdStatus(NoInformation);
+    }
+    
+    return PutByIdVariant::transition(
+        structure, newStructure, conditionSet, offset, newStructure->inferredTypeDescriptorFor(uid));
 }
 
-PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
 {
-    ConcurrentJITLocker locker(profiledBlock->m_lock);
+    ConcurrentJSLocker locker(profiledBlock->m_lock);
     
     UNUSED_PARAM(profiledBlock);
     UNUSED_PARAM(bytecodeIndex);
     UNUSED_PARAM(uid);
-#if ENABLE(JIT)
-    if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
-        return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+#if ENABLE(DFG_JIT)
+    if (hasExitSite(locker, profiledBlock, bytecodeIndex))
+        return PutByIdStatus(TakesSlowPath);
     
     StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
-    if (!stubInfo || !stubInfo->seen)
+    PutByIdStatus result = computeForStubInfo(
+        locker, profiledBlock, stubInfo, uid,
+        CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
+    if (!result)
         return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
     
-    if (stubInfo->resetByGC)
-        return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+    return result;
+#else // ENABLE(JIT)
+    UNUSED_PARAM(map);
+    return PutByIdStatus(NoInformation);
+#endif // ENABLE(JIT)
+}
 
-    switch (stubInfo->accessType) {
-    case access_unset:
-        // If the JIT saw it but didn't optimize it, then assume that this takes slow path.
-        return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+#if ENABLE(JIT)
+PutByIdStatus PutByIdStatus::computeForStubInfo(const ConcurrentJSLocker& locker, CodeBlock* baselineBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+    return computeForStubInfo(
+        locker, baselineBlock, stubInfo, uid,
+        CallLinkStatus::computeExitSiteData(locker, baselineBlock, codeOrigin.bytecodeIndex));
+}
+
+PutByIdStatus PutByIdStatus::computeForStubInfo(
+    const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo,
+    UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData)
+{
+    if (!stubInfo || !stubInfo->everConsidered)
+        return PutByIdStatus();
+    
+    if (stubInfo->tookSlowPath)
+        return PutByIdStatus(TakesSlowPath);
+    
+    switch (stubInfo->cacheType) {
+    case CacheType::Unset:
+        // This means that we attempted to cache but failed for some reason.
+        return PutByIdStatus(TakesSlowPath);
         
-    case access_put_by_id_replace: {
+    case CacheType::PutByIdReplace: {
         PropertyOffset offset =
-            stubInfo->u.putByIdReplace.baseObjectStructure->getConcurrently(
-                *profiledBlock->vm(), uid);
+            stubInfo->u.byIdSelf.baseObjectStructure->getConcurrently(uid);
         if (isValidOffset(offset)) {
-            return PutByIdStatus(
-                SimpleReplace,
-                stubInfo->u.putByIdReplace.baseObjectStructure.get(),
-                0, 0,
-                offset);
+            return PutByIdVariant::replace(
+                stubInfo->u.byIdSelf.baseObjectStructure.get(), offset, InferredType::Top);
         }
-        return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+        return PutByIdStatus(TakesSlowPath);
     }
         
-    case access_put_by_id_transition_normal:
-    case access_put_by_id_transition_direct: {
-        ASSERT(stubInfo->u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated());
-        PropertyOffset offset = 
-            stubInfo->u.putByIdTransition.structure->getConcurrently(
-                *profiledBlock->vm(), uid);
-        if (isValidOffset(offset)) {
-            return PutByIdStatus(
-                SimpleTransition,
-                stubInfo->u.putByIdTransition.previousStructure.get(),
-                stubInfo->u.putByIdTransition.structure.get(),
-                stubInfo->u.putByIdTransition.chain ? adoptRef(new IntendedStructureChain(
-                    profiledBlock, stubInfo->u.putByIdTransition.previousStructure.get(),
-                    stubInfo->u.putByIdTransition.chain.get())) : 0,
-                offset);
+    case CacheType::Stub: {
+        PolymorphicAccess* list = stubInfo->u.stub;
+        
+        PutByIdStatus result;
+        result.m_state = Simple;
+        
+        State slowPathState = TakesSlowPath;
+        for (unsigned i = 0; i < list->size(); ++i) {
+            const AccessCase& access = list->at(i);
+            if (access.doesCalls())
+                slowPathState = MakesCalls;
         }
-        return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+        
+        for (unsigned i = 0; i < list->size(); ++i) {
+            const AccessCase& access = list->at(i);
+            if (access.viaProxy())
+                return PutByIdStatus(slowPathState);
+            
+            PutByIdVariant variant;
+            
+            switch (access.type()) {
+            case AccessCase::Replace: {
+                Structure* structure = access.structure();
+                PropertyOffset offset = structure->getConcurrently(uid);
+                if (!isValidOffset(offset))
+                    return PutByIdStatus(slowPathState);
+                variant = PutByIdVariant::replace(
+                    structure, offset, structure->inferredTypeDescriptorFor(uid));
+                break;
+            }
+                
+            case AccessCase::Transition: {
+                PropertyOffset offset =
+                    access.newStructure()->getConcurrently(uid);
+                if (!isValidOffset(offset))
+                    return PutByIdStatus(slowPathState);
+                ObjectPropertyConditionSet conditionSet = access.conditionSet();
+                if (!conditionSet.structuresEnsureValidity())
+                    return PutByIdStatus(slowPathState);
+                variant = PutByIdVariant::transition(
+                    access.structure(), access.newStructure(), conditionSet, offset,
+                    access.newStructure()->inferredTypeDescriptorFor(uid));
+                break;
+            }
+                
+            case AccessCase::Setter: {
+                Structure* structure = access.structure();
+                
+                ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+                    structure, access.conditionSet(), uid);
+                
+                switch (complexGetStatus.kind()) {
+                case ComplexGetStatus::ShouldSkip:
+                    continue;
+                    
+                case ComplexGetStatus::TakesSlowPath:
+                    return PutByIdStatus(slowPathState);
+                    
+                case ComplexGetStatus::Inlineable: {
+                    std::unique_ptr callLinkStatus =
+                        std::make_unique();
+                    if (CallLinkInfo* callLinkInfo = access.as().callLinkInfo()) {
+                        *callLinkStatus = CallLinkStatus::computeFor(
+                            locker, profiledBlock, *callLinkInfo, callExitSiteData);
+                    }
+                    
+                    variant = PutByIdVariant::setter(
+                        structure, complexGetStatus.offset(), complexGetStatus.conditionSet(),
+                        WTFMove(callLinkStatus));
+                } }
+                break;
+            }
+                
+            case AccessCase::CustomValueSetter:
+            case AccessCase::CustomAccessorSetter:
+                return PutByIdStatus(MakesCalls);
+
+            default:
+                return PutByIdStatus(slowPathState);
+            }
+            
+            if (!result.appendVariant(variant))
+                return PutByIdStatus(slowPathState);
+        }
+        
+        return result;
     }
         
     default:
-        // FIXME: We should handle polymorphic PutById. We probably have some interesting things
-        // we could do about it.
-        return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+        return PutByIdStatus(TakesSlowPath);
     }
-#else // ENABLE(JIT)
-    UNUSED_PARAM(map);
-    return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
-#endif // ENABLE(JIT)
 }
+#endif
 
-PutByIdStatus PutByIdStatus::computeFor(VM& vm, JSGlobalObject* globalObject, Structure* structure, StringImpl* uid, bool isDirect)
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
 {
-    if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
-        return PutByIdStatus(TakesSlowPath);
+#if ENABLE(DFG_JIT)
+    if (dfgBlock) {
+        CallLinkStatus::ExitSiteData exitSiteData;
+        {
+            ConcurrentJSLocker locker(baselineBlock->m_lock);
+            if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex))
+                return PutByIdStatus(TakesSlowPath);
+            exitSiteData = CallLinkStatus::computeExitSiteData(
+                locker, baselineBlock, codeOrigin.bytecodeIndex);
+        }
+            
+        PutByIdStatus result;
+        {
+            ConcurrentJSLocker locker(dfgBlock->m_lock);
+            result = computeForStubInfo(
+                locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+        }
+        
+        // We use TakesSlowPath in some cases where the stub was unset. That's weird and
+        // it would be better not to do that. But it means that we have to defend
+        // ourselves here.
+        if (result.isSimple())
+            return result;
+    }
+#else
+    UNUSED_PARAM(dfgBlock);
+    UNUSED_PARAM(dfgMap);
+#endif
+
+    return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
+}
 
-    if (!structure)
+PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect)
+{
+    if (parseIndex(*uid))
         return PutByIdStatus(TakesSlowPath);
+
+    if (set.isEmpty())
+        return PutByIdStatus();
     
-    if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
-        return PutByIdStatus(TakesSlowPath);
+    PutByIdStatus result;
+    result.m_state = Simple;
+    for (unsigned i = 0; i < set.size(); ++i) {
+        Structure* structure = set[i];
+        
+        if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+            return PutByIdStatus(TakesSlowPath);
 
-    if (!structure->propertyAccessesAreCacheable())
-        return PutByIdStatus(TakesSlowPath);
+        if (!structure->propertyAccessesAreCacheable())
+            return PutByIdStatus(TakesSlowPath);
+    
+        unsigned attributes;
+        PropertyOffset offset = structure->getConcurrently(uid, attributes);
+        if (isValidOffset(offset)) {
+            if (attributes & CustomAccessor)
+                return PutByIdStatus(MakesCalls);
+
+            if (attributes & (Accessor | ReadOnly))
+                return PutByIdStatus(TakesSlowPath);
+            
+            WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset);
+            if (!replaceSet || replaceSet->isStillValid()) {
+                // When this executes, it'll create, and fire, this replacement watchpoint set.
+                // That means that  this has probably never executed or that something fishy is
+                // going on. Also, we cannot create or fire the watchpoint set from the concurrent
+                // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy.
+                // So, better leave this alone and take slow path.
+                return PutByIdStatus(TakesSlowPath);
+            }
+
+            PutByIdVariant variant =
+                PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
+            if (!result.appendVariant(variant))
+                return PutByIdStatus(TakesSlowPath);
+            continue;
+        }
+    
+        // Our hypothesis is that we're doing a transition. Before we prove that this is really
+        // true, we want to do some sanity checks.
     
-    unsigned attributes;
-    JSCell* specificValue;
-    PropertyOffset offset = structure->getConcurrently(vm, uid, attributes, specificValue);
-    if (isValidOffset(offset)) {
-        if (attributes & (Accessor | ReadOnly))
+        // Don't cache put transitions on dictionaries.
+        if (structure->isDictionary())
             return PutByIdStatus(TakesSlowPath);
-        if (specificValue) {
-            // We need the PutById slow path to verify that we're storing the right value into
-            // the specialized slot.
+
+        // If the structure corresponds to something that isn't an object, then give up, since
+        // we don't want to be adding properties to strings.
+        if (!structure->typeInfo().isObject())
             return PutByIdStatus(TakesSlowPath);
+    
+        ObjectPropertyConditionSet conditionSet;
+        if (!isDirect) {
+            conditionSet = generateConditionsForPropertySetterMissConcurrently(
+                globalObject->vm(), globalObject, structure, uid);
+            if (!conditionSet.isValid())
+                return PutByIdStatus(TakesSlowPath);
         }
-        return PutByIdStatus(SimpleReplace, structure, 0, 0, offset);
-    }
     
-    // Our hypothesis is that we're doing a transition. Before we prove that this is really
-    // true, we want to do some sanity checks.
+        // We only optimize if there is already a structure that the transition is cached to.
+        Structure* transition =
+            Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset);
+        if (!transition)
+            return PutByIdStatus(TakesSlowPath);
+        ASSERT(isValidOffset(offset));
     
-    // Don't cache put transitions on dictionaries.
-    if (structure->isDictionary())
-        return PutByIdStatus(TakesSlowPath);
+        bool didAppend = result.appendVariant(
+            PutByIdVariant::transition(
+                structure, transition, conditionSet, offset,
+                transition->inferredTypeDescriptorFor(uid)));
+        if (!didAppend)
+            return PutByIdStatus(TakesSlowPath);
+    }
+    
+    return result;
+}
 
-    // If the structure corresponds to something that isn't an object, then give up, since
-    // we don't want to be adding properties to strings.
-    if (structure->typeInfo().type() == StringType)
-        return PutByIdStatus(TakesSlowPath);
+bool PutByIdStatus::makesCalls() const
+{
+    if (m_state == MakesCalls)
+        return true;
+    
+    if (m_state != Simple)
+        return false;
+    
+    for (unsigned i = m_variants.size(); i--;) {
+        if (m_variants[i].makesCalls())
+            return true;
+    }
     
-    RefPtr chain;
-    if (!isDirect) {
-        chain = adoptRef(new IntendedStructureChain(globalObject, structure));
+    return false;
+}
+
+void PutByIdStatus::dump(PrintStream& out) const
+{
+    switch (m_state) {
+    case NoInformation:
+        out.print("(NoInformation)");
+        return;
         
-        // If the prototype chain has setters or read-only properties, then give up.
-        if (chain->mayInterceptStoreTo(vm, uid))
-            return PutByIdStatus(TakesSlowPath);
+    case Simple:
+        out.print("(", listDump(m_variants), ")");
+        return;
         
-        // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries)
-        // then give up. The dictionary case would only happen if this structure has not been
-        // used in an optimized put_by_id transition. And really the only reason why we would
-        // bail here is that I don't really feel like having the optimizing JIT go and flatten
-        // dictionaries if we have evidence to suggest that those objects were never used as
-        // prototypes in a cacheable prototype access - i.e. there's a good chance that some of
-        // the other checks below will fail.
-        if (!chain->isNormalized())
-            return PutByIdStatus(TakesSlowPath);
+    case TakesSlowPath:
+        out.print("(TakesSlowPath)");
+        return;
+    case MakesCalls:
+        out.print("(MakesCalls)");
+        return;
     }
     
-    // We only optimize if there is already a structure that the transition is cached to.
-    // Among other things, this allows us to guard against a transition with a specific
-    // value.
-    //
-    // - If we're storing a value that could be specific: this would only be a problem if
-    //   the existing transition did have a specific value already, since if it didn't,
-    //   then we would behave "as if" we were not storing a specific value. If it did
-    //   have a specific value, then we'll know - the fact that we pass 0 for
-    //   specificValue will tell us.
-    //
-    // - If we're not storing a value that could be specific: again, this would only be a
-    //   problem if the existing transition did have a specific value, which we check for
-    //   by passing 0 for the specificValue.
-    Structure* transition = Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, 0, offset);
-    if (!transition)
-        return PutByIdStatus(TakesSlowPath); // This occurs in bizarre cases only. See above.
-    ASSERT(!transition->transitionDidInvolveSpecificValue());
-    ASSERT(isValidOffset(offset));
-    
-    return PutByIdStatus(SimpleTransition, structure, transition, chain.release(), offset);
+    RELEASE_ASSERT_NOT_REACHED();
 }
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.h b/Source/JavaScriptCore/bytecode/PutByIdStatus.h
index c0a1bc35c..1dd95cde4 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.h
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,12 +23,11 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef PutByIdStatus_h
-#define PutByIdStatus_h
+#pragma once
 
-#include "IntendedStructureChain.h"
-#include "PropertyOffset.h"
-#include "StructureStubInfo.h"
+#include "CallLinkStatus.h"
+#include "ExitingJITType.h"
+#include "PutByIdVariant.h"
 #include 
 
 namespace JSC {
@@ -38,86 +37,79 @@ class VM;
 class JSGlobalObject;
 class Structure;
 class StructureChain;
+class StructureStubInfo;
+
+typedef HashMap StubInfoMap;
 
 class PutByIdStatus {
 public:
     enum State {
         // It's uncached so we have no information.
         NoInformation,
-        // It's cached as a direct store into an object property for cases where the object
-        // already has the property.
-        SimpleReplace,
-        // It's cached as a transition from one structure that lacks the property to one that
-        // includes the property, and a direct store to this new property.
-        SimpleTransition,
+        // It's cached as a simple store of some kind.
+        Simple,
         // It's known to often take slow path.
-        TakesSlowPath
+        TakesSlowPath,
+        // It's known to take paths that make calls.
+        MakesCalls
     };
     
     PutByIdStatus()
         : m_state(NoInformation)
-        , m_oldStructure(0)
-        , m_newStructure(0)
-        , m_structureChain(0)
-        , m_offset(invalidOffset)
     {
     }
     
     explicit PutByIdStatus(State state)
         : m_state(state)
-        , m_oldStructure(0)
-        , m_newStructure(0)
-        , m_structureChain(0)
-        , m_offset(invalidOffset)
     {
-        ASSERT(m_state == NoInformation || m_state == TakesSlowPath);
+        ASSERT(m_state == NoInformation || m_state == TakesSlowPath || m_state == MakesCalls);
     }
     
-    PutByIdStatus(
-        State state,
-        Structure* oldStructure,
-        Structure* newStructure,
-        PassRefPtr structureChain,
-        PropertyOffset offset)
-        : m_state(state)
-        , m_oldStructure(oldStructure)
-        , m_newStructure(newStructure)
-        , m_structureChain(structureChain)
-        , m_offset(offset)
+    PutByIdStatus(const PutByIdVariant& variant)
+        : m_state(Simple)
     {
-        ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == !m_oldStructure);
-        ASSERT((m_state != SimpleTransition) == !m_newStructure);
-        ASSERT(!((m_state != SimpleTransition) && m_structureChain));
-        ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == (m_offset == invalidOffset));
+        m_variants.append(variant);
     }
     
-    static PutByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, StringImpl* uid);
-    static PutByIdStatus computeFor(VM&, JSGlobalObject*, Structure*, StringImpl* uid, bool isDirect);
+    static PutByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid);
+    static PutByIdStatus computeFor(JSGlobalObject*, const StructureSet&, UniquedStringImpl* uid, bool isDirect);
+    
+    static PutByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid);
+
+#if ENABLE(JIT)
+    static PutByIdStatus computeForStubInfo(const ConcurrentJSLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid);
+#endif
     
     State state() const { return m_state; }
     
     bool isSet() const { return m_state != NoInformation; }
     bool operator!() const { return m_state == NoInformation; }
-    bool isSimpleReplace() const { return m_state == SimpleReplace; }
-    bool isSimpleTransition() const { return m_state == SimpleTransition; }
-    bool takesSlowPath() const { return m_state == TakesSlowPath; }
+    bool isSimple() const { return m_state == Simple; }
+    bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; }
+    bool makesCalls() const;
     
-    Structure* oldStructure() const { return m_oldStructure; }
-    Structure* newStructure() const { return m_newStructure; }
-    IntendedStructureChain* structureChain() const { return m_structureChain.get(); }
-    PropertyOffset offset() const { return m_offset; }
+    size_t numVariants() const { return m_variants.size(); }
+    const Vector& variants() const { return m_variants; }
+    const PutByIdVariant& at(size_t index) const { return m_variants[index]; }
+    const PutByIdVariant& operator[](size_t index) const { return at(index); }
+    
+    void dump(PrintStream&) const;
     
 private:
-    static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, StringImpl* uid);
+#if ENABLE(DFG_JIT)
+    static bool hasExitSite(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+#endif
+#if ENABLE(JIT)
+    static PutByIdStatus computeForStubInfo(
+        const ConcurrentJSLocker&, CodeBlock*, StructureStubInfo*, UniquedStringImpl* uid,
+        CallLinkStatus::ExitSiteData);
+#endif
+    static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid);
+    
+    bool appendVariant(const PutByIdVariant&);
     
     State m_state;
-    Structure* m_oldStructure;
-    Structure* m_newStructure;
-    RefPtr m_structureChain;
-    PropertyOffset m_offset;
+    Vector m_variants;
 };
 
 } // namespace JSC
-
-#endif // PutByIdStatus_h
-
diff --git a/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp b/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp
new file mode 100644
index 000000000..9904c625b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "PutByIdVariant.h"
+
+#include "CallLinkStatus.h"
+#include "JSCInlines.h"
+#include 
+
+namespace JSC {
+
+PutByIdVariant::PutByIdVariant(const PutByIdVariant& other)
+    : PutByIdVariant()
+{
+    *this = other;
+}
+
+PutByIdVariant& PutByIdVariant::operator=(const PutByIdVariant& other)
+{
+    m_kind = other.m_kind;
+    m_oldStructure = other.m_oldStructure;
+    m_newStructure = other.m_newStructure;
+    m_conditionSet = other.m_conditionSet;
+    m_offset = other.m_offset;
+    m_requiredType = other.m_requiredType;
+    if (other.m_callLinkStatus)
+        m_callLinkStatus = std::make_unique(*other.m_callLinkStatus);
+    else
+        m_callLinkStatus = nullptr;
+    return *this;
+}
+
+PutByIdVariant PutByIdVariant::replace(
+    const StructureSet& structure, PropertyOffset offset, const InferredType::Descriptor& requiredType)
+{
+    PutByIdVariant result;
+    result.m_kind = Replace;
+    result.m_oldStructure = structure;
+    result.m_offset = offset;
+    result.m_requiredType = requiredType;
+    return result;
+}
+
+PutByIdVariant PutByIdVariant::transition(
+    const StructureSet& oldStructure, Structure* newStructure,
+    const ObjectPropertyConditionSet& conditionSet, PropertyOffset offset,
+    const InferredType::Descriptor& requiredType)
+{
+    PutByIdVariant result;
+    result.m_kind = Transition;
+    result.m_oldStructure = oldStructure;
+    result.m_newStructure = newStructure;
+    result.m_conditionSet = conditionSet;
+    result.m_offset = offset;
+    result.m_requiredType = requiredType;
+    return result;
+}
+
+PutByIdVariant PutByIdVariant::setter(
+    const StructureSet& structure, PropertyOffset offset,
+    const ObjectPropertyConditionSet& conditionSet,
+    std::unique_ptr callLinkStatus)
+{
+    PutByIdVariant result;
+    result.m_kind = Setter;
+    result.m_oldStructure = structure;
+    result.m_conditionSet = conditionSet;
+    result.m_offset = offset;
+    result.m_callLinkStatus = WTFMove(callLinkStatus);
+    result.m_requiredType = InferredType::Top;
+    return result;
+}
+
+Structure* PutByIdVariant::oldStructureForTransition() const
+{
+    ASSERT(kind() == Transition);
+    ASSERT(m_oldStructure.size() <= 2);
+    for (unsigned i = m_oldStructure.size(); i--;) {
+        Structure* structure = m_oldStructure[i];
+        if (structure != m_newStructure)
+            return structure;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+
+    return nullptr;
+}
+
+bool PutByIdVariant::writesStructures() const
+{
+    switch (kind()) {
+    case Transition:
+    case Setter:
+        return true;
+    default:
+        return false;
+    }
+}
+
+bool PutByIdVariant::reallocatesStorage() const
+{
+    switch (kind()) {
+    case Transition:
+        return oldStructureForTransition()->outOfLineCapacity() != newStructure()->outOfLineCapacity();
+    case Setter:
+        return true;
+    default:
+        return false;
+    }
+}
+
+bool PutByIdVariant::makesCalls() const
+{
+    return kind() == Setter;
+}
+
+bool PutByIdVariant::attemptToMerge(const PutByIdVariant& other)
+{
+    if (m_offset != other.m_offset)
+        return false;
+
+    if (m_requiredType != other.m_requiredType)
+        return false;
+    
+    switch (m_kind) {
+    case Replace: {
+        switch (other.m_kind) {
+        case Replace: {
+            ASSERT(m_conditionSet.isEmpty());
+            ASSERT(other.m_conditionSet.isEmpty());
+            
+            m_oldStructure.merge(other.m_oldStructure);
+            return true;
+        }
+            
+        case Transition: {
+            PutByIdVariant newVariant = other;
+            if (newVariant.attemptToMergeTransitionWithReplace(*this)) {
+                *this = newVariant;
+                return true;
+            }
+            return false;
+        }
+            
+        default:
+            return false;
+        }
+    }
+        
+    case Transition:
+        switch (other.m_kind) {
+        case Replace:
+            return attemptToMergeTransitionWithReplace(other);
+            
+        default:
+            return false;
+        }
+        
+    default:
+        return false;
+    }
+}
+
+bool PutByIdVariant::attemptToMergeTransitionWithReplace(const PutByIdVariant& replace)
+{
+    ASSERT(m_kind == Transition);
+    ASSERT(replace.m_kind == Replace);
+    ASSERT(m_offset == replace.m_offset);
+    ASSERT(!replace.writesStructures());
+    ASSERT(!replace.reallocatesStorage());
+    ASSERT(replace.conditionSet().isEmpty());
+    
+    // This sort of merging only works when we have one path along which we add a new field which
+    // transitions to structure S while the other path was already on structure S. This doesn't
+    // work if we need to reallocate anything or if the replace path is polymorphic.
+    
+    if (reallocatesStorage())
+        return false;
+    
+    if (replace.m_oldStructure.onlyStructure() != m_newStructure)
+        return false;
+    
+    m_oldStructure.merge(m_newStructure);
+    return true;
+}
+
+void PutByIdVariant::dump(PrintStream& out) const
+{
+    dumpInContext(out, 0);
+}
+
+void PutByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    switch (kind()) {
+    case NotSet:
+        out.print("");
+        return;
+        
+    case Replace:
+        out.print(
+            "");
+        return;
+        
+    case Transition:
+        out.print(
+            " ",
+            pointerDumpInContext(newStructure(), context), ", [",
+            inContext(m_conditionSet, context), "], offset = ", offset(), ", ",
+            inContext(requiredType(), context), ">");
+        return;
+        
+    case Setter:
+        out.print(
+            "");
+        return;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/PutByIdVariant.h b/Source/JavaScriptCore/bytecode/PutByIdVariant.h
new file mode 100644
index 000000000..bda17bbf9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdVariant.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+#include "StructureSet.h"
+
+namespace JSC {
+
+class CallLinkStatus;
+
+class PutByIdVariant {
+public:
+    enum Kind {
+        NotSet,
+        Replace,
+        Transition,
+        Setter
+    };
+    
+    PutByIdVariant()
+        : m_kind(NotSet)
+        , m_newStructure(nullptr)
+        , m_offset(invalidOffset)
+    {
+    }
+    
+    PutByIdVariant(const PutByIdVariant&);
+    PutByIdVariant& operator=(const PutByIdVariant&);
+
+    static PutByIdVariant replace(const StructureSet&, PropertyOffset, const InferredType::Descriptor&);
+    
+    static PutByIdVariant transition(
+        const StructureSet& oldStructure, Structure* newStructure,
+        const ObjectPropertyConditionSet&, PropertyOffset, const InferredType::Descriptor&);
+    
+    static PutByIdVariant setter(
+        const StructureSet&, PropertyOffset, const ObjectPropertyConditionSet&,
+        std::unique_ptr);
+    
+    Kind kind() const { return m_kind; }
+    
+    bool isSet() const { return kind() != NotSet; }
+    bool operator!() const { return !isSet(); }
+    
+    const StructureSet& structure() const
+    {
+        ASSERT(kind() == Replace || kind() == Setter);
+        return m_oldStructure;
+    }
+    
+    const StructureSet& structureSet() const
+    {
+        return structure();
+    }
+    
+    const StructureSet& oldStructure() const
+    {
+        ASSERT(kind() == Transition || kind() == Replace || kind() == Setter);
+        return m_oldStructure;
+    }
+    
+    StructureSet& oldStructure()
+    {
+        ASSERT(kind() == Transition || kind() == Replace || kind() == Setter);
+        return m_oldStructure;
+    }
+    
+    Structure* oldStructureForTransition() const;
+    
+    Structure* newStructure() const
+    {
+        ASSERT(kind() == Transition);
+        return m_newStructure;
+    }
+
+    InferredType::Descriptor requiredType() const
+    {
+        return m_requiredType;
+    }
+
+    bool writesStructures() const;
+    bool reallocatesStorage() const;
+    bool makesCalls() const;
+    
+    const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+    
+    // We don't support intrinsics for Setters (it would be sweet if we did) but we need this for templated helpers.
+    Intrinsic intrinsic() const { return NoIntrinsic; }
+
+    // This is needed for templated helpers.
+    bool isPropertyUnset() const { return false; }
+
+    PropertyOffset offset() const
+    {
+        ASSERT(isSet());
+        return m_offset;
+    }
+    
+    CallLinkStatus* callLinkStatus() const
+    {
+        ASSERT(kind() == Setter);
+        return m_callLinkStatus.get();
+    }
+
+    bool attemptToMerge(const PutByIdVariant& other);
+    
+    void dump(PrintStream&) const;
+    void dumpInContext(PrintStream&, DumpContext*) const;
+
+private:
+    bool attemptToMergeTransitionWithReplace(const PutByIdVariant& replace);
+    
+    Kind m_kind;
+    StructureSet m_oldStructure;
+    Structure* m_newStructure;
+    ObjectPropertyConditionSet m_conditionSet;
+    PropertyOffset m_offset;
+    InferredType::Descriptor m_requiredType;
+    std::unique_ptr m_callLinkStatus;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PutKind.h b/Source/JavaScriptCore/bytecode/PutKind.h
index 7a1dd642e..611279f60 100644
--- a/Source/JavaScriptCore/bytecode/PutKind.h
+++ b/Source/JavaScriptCore/bytecode/PutKind.h
@@ -23,14 +23,10 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef PutKind_h
-#define PutKind_h
+#pragma once
 
 namespace JSC {
 
 enum PutKind { Direct, NotDirect };
 
 } // namespace JSC
-
-#endif // PutKind_h
-
diff --git a/Source/JavaScriptCore/bytecode/ReduceWhitespace.h b/Source/JavaScriptCore/bytecode/ReduceWhitespace.h
index 121caf2c2..fcb86c0d0 100644
--- a/Source/JavaScriptCore/bytecode/ReduceWhitespace.h
+++ b/Source/JavaScriptCore/bytecode/ReduceWhitespace.h
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ReduceWhitespace_h
-#define ReduceWhitespace_h
+#pragma once
 
 #include 
 
@@ -34,5 +33,3 @@ namespace JSC {
 CString reduceWhitespace(const CString&);
 
 } // namespace JSC
-
-#endif // ReduceWhitespace_h
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.cpp b/Source/JavaScriptCore/bytecode/SamplingTool.cpp
deleted file mode 100644
index d18dbc1ff..000000000
--- a/Source/JavaScriptCore/bytecode/SamplingTool.cpp
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1.  Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 2.  Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- *     its contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "SamplingTool.h"
-
-#include "CodeBlock.h"
-#include "Interpreter.h"
-#include "Opcode.h"
-
-#if !OS(WINDOWS)
-#include 
-#endif
-
-namespace JSC {
-
-#if ENABLE(SAMPLING_FLAGS)
-
-void SamplingFlags::sample()
-{
-    uint32_t mask = static_cast(1 << 31);
-    unsigned index;
-
-    for (index = 0; index < 32; ++index) {
-        if (mask & s_flags)
-            break;
-        mask >>= 1;
-    }
-
-    s_flagCounts[32 - index]++;
-}
-
-void SamplingFlags::start()
-{
-    for (unsigned i = 0; i <= 32; ++i)
-        s_flagCounts[i] = 0;
-}
-void SamplingFlags::stop()
-{
-    uint64_t total = 0;
-    for (unsigned i = 0; i <= 32; ++i)
-        total += s_flagCounts[i];
-
-    if (total) {
-        dataLogF("\nSamplingFlags: sample counts with flags set: (%lld total)\n", total);
-        for (unsigned i = 0; i <= 32; ++i) {
-            if (s_flagCounts[i])
-                dataLogF("  [ %02d ] : %lld\t\t(%03.2f%%)\n", i, s_flagCounts[i], (100.0 * s_flagCounts[i]) / total);
-        }
-        dataLogF("\n");
-    } else
-    dataLogF("\nSamplingFlags: no samples.\n\n");
-}
-uint64_t SamplingFlags::s_flagCounts[33];
-
-#else
-void SamplingFlags::start() {}
-void SamplingFlags::stop() {}
-#endif
-
-#if ENABLE(SAMPLING_REGIONS)
-volatile uintptr_t SamplingRegion::s_currentOrReserved;
-Spectrum* SamplingRegion::s_spectrum;
-unsigned long SamplingRegion::s_noneOfTheAbove;
-unsigned SamplingRegion::s_numberOfSamplesSinceDump;
-
-SamplingRegion::Locker::Locker()
-{
-    uintptr_t previous;
-    while (true) {
-        previous = s_currentOrReserved;
-        if (previous & 1) {
-#if OS(UNIX)
-            sched_yield();
-#endif
-            continue;
-        }
-        if (WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, previous | 1))
-            break;
-    }
-}
-
-SamplingRegion::Locker::~Locker()
-{
-    // We don't need the CAS, but we do it out of an
-    // abundance of caution (and because it gives us a memory fence, which is
-    // never bad).
-    uintptr_t previous;
-    do {
-        previous = s_currentOrReserved;
-    } while (!WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, previous & ~1));
-}
-
-void SamplingRegion::sample()
-{
-    // Make sure we lock s_current.
-    Locker locker;
-    
-    // Create a spectrum if we don't have one already.
-    if (!s_spectrum)
-        s_spectrum = new Spectrum();
-    
-    ASSERT(s_currentOrReserved & 1);
-    
-    // Walk the region stack, and record each region we see.
-    SamplingRegion* region = bitwise_cast(s_currentOrReserved & ~1);
-    if (region) {
-        for (; region; region = region->m_previous)
-            s_spectrum->add(region->m_name);
-    } else
-        s_noneOfTheAbove++;
-    
-    if (s_numberOfSamplesSinceDump++ == SamplingThread::s_hertz) {
-        s_numberOfSamplesSinceDump = 0;
-        dumpInternal();
-    }
-}
-
-void SamplingRegion::dump()
-{
-    Locker locker;
-    
-    dumpInternal();
-}
-
-void SamplingRegion::dumpInternal()
-{
-    if (!s_spectrum) {
-        dataLogF("\nSamplingRegion: was never sampled.\n\n");
-        return;
-    }
-    
-    Vector::KeyAndCount> list = s_spectrum->buildList();
-    
-    unsigned long total = s_noneOfTheAbove;
-    for (unsigned i = list.size(); i--;)
-        total += list[i].count;
-    
-    dataLogF("\nSamplingRegion: sample counts for regions: (%lu samples)\n", total);
-
-    for (unsigned i = list.size(); i--;)
-        dataLogF("    %3.2lf%%  %s\n", (100.0 * list[i].count) / total, list[i].key);
-}
-#else // ENABLE(SAMPLING_REGIONS)
-void SamplingRegion::dump() { }
-#endif // ENABLE(SAMPLING_REGIONS)
-
-/*
-  Start with flag 16 set.
-  By doing this the monitoring of lower valued flags will be masked out
-  until flag 16 is explictly cleared.
-*/
-uint32_t SamplingFlags::s_flags = 1 << 15;
-
-
-#if OS(WINDOWS)
-
-static void sleepForMicroseconds(unsigned us)
-{
-    unsigned ms = us / 1000;
-    if (us && !ms)
-        ms = 1;
-    Sleep(ms);
-}
-
-#else 
-
-static void sleepForMicroseconds(unsigned us)
-{
-    usleep(us);
-}
-
-#endif
-
-static inline unsigned hertz2us(unsigned hertz)
-{
-    return 1000000 / hertz;
-}
-
-
-SamplingTool* SamplingTool::s_samplingTool = 0;
-
-
-bool SamplingThread::s_running = false;
-unsigned SamplingThread::s_hertz = 10000;
-ThreadIdentifier SamplingThread::s_samplingThread;
-
-void SamplingThread::threadStartFunc(void*)
-{
-    while (s_running) {
-        sleepForMicroseconds(hertz2us(s_hertz));
-
-#if ENABLE(SAMPLING_FLAGS)
-        SamplingFlags::sample();
-#endif
-#if ENABLE(SAMPLING_REGIONS)
-        SamplingRegion::sample();
-#endif
-#if ENABLE(OPCODE_SAMPLING)
-        SamplingTool::sample();
-#endif
-    }
-}
-
-
-void SamplingThread::start(unsigned hertz)
-{
-    ASSERT(!s_running);
-    s_running = true;
-    s_hertz = hertz;
-
-    s_samplingThread = createThread(threadStartFunc, 0, "JavaScriptCore::Sampler");
-}
-
-void SamplingThread::stop()
-{
-    ASSERT(s_running);
-    s_running = false;
-    waitForThreadCompletion(s_samplingThread);
-}
-
-
-void ScriptSampleRecord::sample(CodeBlock* codeBlock, Instruction* vPC)
-{
-    if (!m_samples) {
-        m_size = codeBlock->instructions().size();
-        m_samples = static_cast(calloc(m_size, sizeof(int)));
-        m_codeBlock = codeBlock;
-    }
-
-    ++m_sampleCount;
-
-    unsigned offest = vPC - codeBlock->instructions().begin();
-    // Since we don't read and write codeBlock and vPC atomically, this check
-    // can fail if we sample mid op_call / op_ret.
-    if (offest < m_size) {
-        m_samples[offest]++;
-        m_opcodeSampleCount++;
-    }
-}
-
-void SamplingTool::doRun()
-{
-    Sample sample(m_sample, m_codeBlock);
-    ++m_sampleCount;
-
-    if (sample.isNull())
-        return;
-
-    if (!sample.inHostFunction()) {
-        unsigned opcodeID = m_interpreter->getOpcodeID(sample.vPC()[0].u.opcode);
-
-        ++m_opcodeSampleCount;
-        ++m_opcodeSamples[opcodeID];
-
-        if (sample.inCTIFunction())
-            m_opcodeSamplesInCTIFunctions[opcodeID]++;
-    }
-
-#if ENABLE(CODEBLOCK_SAMPLING)
-    if (CodeBlock* codeBlock = sample.codeBlock()) {
-        MutexLocker locker(m_scriptSampleMapMutex);
-        ScriptSampleRecord* record = m_scopeSampleMap->get(codeBlock->ownerExecutable());
-        ASSERT(record);
-        record->sample(codeBlock, sample.vPC());
-    }
-#endif
-}
-
-void SamplingTool::sample()
-{
-    s_samplingTool->doRun();
-}
-
-void SamplingTool::notifyOfScope(VM& vm, ScriptExecutable* script)
-{
-#if ENABLE(CODEBLOCK_SAMPLING)
-    MutexLocker locker(m_scriptSampleMapMutex);
-    m_scopeSampleMap->set(script, adoptPtr(new ScriptSampleRecord(vm, script)));
-#else
-    UNUSED_PARAM(vm);
-    UNUSED_PARAM(script);
-#endif
-}
-
-void SamplingTool::setup()
-{
-    s_samplingTool = this;
-}
-
-#if ENABLE(OPCODE_SAMPLING)
-
-struct OpcodeSampleInfo {
-    OpcodeID opcode;
-    long long count;
-    long long countInCTIFunctions;
-};
-
-struct LineCountInfo {
-    unsigned line;
-    unsigned count;
-};
-
-static int compareOpcodeIndicesSampling(const void* left, const void* right)
-{
-    const OpcodeSampleInfo* leftSampleInfo = reinterpret_cast(left);
-    const OpcodeSampleInfo* rightSampleInfo = reinterpret_cast(right);
-
-    return (leftSampleInfo->count < rightSampleInfo->count) ? 1 : (leftSampleInfo->count > rightSampleInfo->count) ? -1 : 0;
-}
-
-#if ENABLE(CODEBLOCK_SAMPLING)
-static int compareLineCountInfoSampling(const void* left, const void* right)
-{
-    const LineCountInfo* leftLineCount = reinterpret_cast(left);
-    const LineCountInfo* rightLineCount = reinterpret_cast(right);
-
-    return (leftLineCount->line > rightLineCount->line) ? 1 : (leftLineCount->line < rightLineCount->line) ? -1 : 0;
-}
-
-static int compareScriptSampleRecords(const void* left, const void* right)
-{
-    const ScriptSampleRecord* const leftValue = *static_cast(left);
-    const ScriptSampleRecord* const rightValue = *static_cast(right);
-
-    return (leftValue->m_sampleCount < rightValue->m_sampleCount) ? 1 : (leftValue->m_sampleCount > rightValue->m_sampleCount) ? -1 : 0;
-}
-#endif
-
-void SamplingTool::dump(ExecState* exec)
-{
-    // Tidies up SunSpider output by removing short scripts - such a small number of samples would likely not be useful anyhow.
-    if (m_sampleCount < 10)
-        return;
-    
-    // (1) Build and sort 'opcodeSampleInfo' array.
-
-    OpcodeSampleInfo opcodeSampleInfo[numOpcodeIDs];
-    for (int i = 0; i < numOpcodeIDs; ++i) {
-        opcodeSampleInfo[i].opcode = static_cast(i);
-        opcodeSampleInfo[i].count = m_opcodeSamples[i];
-        opcodeSampleInfo[i].countInCTIFunctions = m_opcodeSamplesInCTIFunctions[i];
-    }
-
-    qsort(opcodeSampleInfo, numOpcodeIDs, sizeof(OpcodeSampleInfo), compareOpcodeIndicesSampling);
-
-    // (2) Print Opcode sampling results.
-
-    dataLogF("\nBytecode samples [*]\n");
-    dataLogF("                             sample   %% of       %% of     |   cti     cti %%\n");
-    dataLogF("opcode                       count     VM        total    |  count   of self\n");
-    dataLogF("-------------------------------------------------------   |  ----------------\n");
-
-    for (int i = 0; i < numOpcodeIDs; ++i) {
-        long long count = opcodeSampleInfo[i].count;
-        if (!count)
-            continue;
-
-        OpcodeID opcodeID = opcodeSampleInfo[i].opcode;
-        
-        const char* opcodeName = opcodeNames[opcodeID];
-        const char* opcodePadding = padOpcodeName(opcodeID, 28);
-        double percentOfVM = (static_cast(count) * 100) / m_opcodeSampleCount;
-        double percentOfTotal = (static_cast(count) * 100) / m_sampleCount;
-        long long countInCTIFunctions = opcodeSampleInfo[i].countInCTIFunctions;
-        double percentInCTIFunctions = (static_cast(countInCTIFunctions) * 100) / count;
-        debugDebugPrintf("%s:%s%-6lld %.3f%%\t%.3f%%\t  |   %-6lld %.3f%%\n", opcodeName, opcodePadding, count, percentOfVM, percentOfTotal, countInCTIFunctions, percentInCTIFunctions);
-    }
-    
-    dataLogF("\n[*] Samples inside host code are not charged to any Bytecode.\n\n");
-    dataLogF("\tSamples inside VM:\t\t%lld / %lld (%.3f%%)\n", m_opcodeSampleCount, m_sampleCount, (static_cast(m_opcodeSampleCount) * 100) / m_sampleCount);
-    dataLogF("\tSamples inside host code:\t%lld / %lld (%.3f%%)\n\n", m_sampleCount - m_opcodeSampleCount, m_sampleCount, (static_cast(m_sampleCount - m_opcodeSampleCount) * 100) / m_sampleCount);
-    dataLogF("\tsample count:\tsamples inside this opcode\n");
-    dataLogF("\t%% of VM:\tsample count / all opcode samples\n");
-    dataLogF("\t%% of total:\tsample count / all samples\n");
-    dataLogF("\t--------------\n");
-    dataLogF("\tcti count:\tsamples inside a CTI function called by this opcode\n");
-    dataLogF("\tcti %% of self:\tcti count / sample count\n");
-    
-#if ENABLE(CODEBLOCK_SAMPLING)
-
-    // (3) Build and sort 'codeBlockSamples' array.
-
-    int scopeCount = m_scopeSampleMap->size();
-    Vector codeBlockSamples(scopeCount);
-    ScriptSampleRecordMap::iterator iter = m_scopeSampleMap->begin();
-    for (int i = 0; i < scopeCount; ++i, ++iter)
-        codeBlockSamples[i] = iter->value.get();
-
-    qsort(codeBlockSamples.begin(), scopeCount, sizeof(ScriptSampleRecord*), compareScriptSampleRecords);
-
-    // (4) Print data from 'codeBlockSamples' array.
-
-    dataLogF("\nCodeBlock samples\n\n"); 
-
-    for (int i = 0; i < scopeCount; ++i) {
-        ScriptSampleRecord* record = codeBlockSamples[i];
-        CodeBlock* codeBlock = record->m_codeBlock;
-
-        double blockPercent = (record->m_sampleCount * 100.0) / m_sampleCount;
-
-        if (blockPercent >= 1) {
-            //Instruction* code = codeBlock->instructions().begin();
-            dataLogF("#%d: %s:%d: %d / %lld (%.3f%%)\n", i + 1, record->m_executable->sourceURL().utf8().data(), codeBlock->lineNumberForBytecodeOffset(0), record->m_sampleCount, m_sampleCount, blockPercent);
-            if (i < 10) {
-                HashMap lineCounts;
-                codeBlock->dump(exec);
-
-                dataLogF("    Opcode and line number samples [*]\n\n");
-                for (unsigned op = 0; op < record->m_size; ++op) {
-                    int count = record->m_samples[op];
-                    if (count) {
-                        dataLogF("    [% 4d] has sample count: % 4d\n", op, count);
-                        unsigned line = codeBlock->lineNumberForBytecodeOffset(op);
-                        lineCounts.set(line, (lineCounts.contains(line) ? lineCounts.get(line) : 0) + count);
-                    }
-                }
-                dataLogF("\n");
-
-                int linesCount = lineCounts.size();
-                Vector lineCountInfo(linesCount);
-                int lineno = 0;
-                for (HashMap::iterator iter = lineCounts.begin(); iter != lineCounts.end(); ++iter, ++lineno) {
-                    lineCountInfo[lineno].line = iter->key;
-                    lineCountInfo[lineno].count = iter->value;
-                }
-
-                qsort(lineCountInfo.begin(), linesCount, sizeof(LineCountInfo), compareLineCountInfoSampling);
-
-                for (lineno = 0; lineno < linesCount; ++lineno) {
-                    dataLogF("    Line #%d has sample count %d.\n", lineCountInfo[lineno].line, lineCountInfo[lineno].count);
-                }
-                dataLogF("\n");
-                dataLogF("    [*] Samples inside host code are charged to the calling Bytecode.\n");
-                dataLogF("        Samples on a call / return boundary are not charged to a specific opcode or line.\n\n");
-                dataLogF("            Samples on a call / return boundary: %d / %d (%.3f%%)\n\n", record->m_sampleCount - record->m_opcodeSampleCount, record->m_sampleCount, (static_cast(record->m_sampleCount - record->m_opcodeSampleCount) * 100) / record->m_sampleCount);
-            }
-        }
-    }
-#else
-    UNUSED_PARAM(exec);
-#endif
-}
-
-#else
-
-void SamplingTool::dump(ExecState*)
-{
-}
-
-#endif
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.h b/Source/JavaScriptCore/bytecode/SamplingTool.h
deleted file mode 100644
index 1dfb8ecca..000000000
--- a/Source/JavaScriptCore/bytecode/SamplingTool.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1.  Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 2.  Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- *     its contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef SamplingTool_h
-#define SamplingTool_h
-
-#include "Strong.h"
-#include "Opcode.h"
-#include "SamplingCounter.h"
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-
-namespace JSC {
-
-    class ScriptExecutable;
-
-    class SamplingFlags {
-    public:
-        JS_EXPORT_PRIVATE static void start();
-        JS_EXPORT_PRIVATE static void stop();
-
-#if ENABLE(SAMPLING_FLAGS)
-        static void setFlag(unsigned flag)
-        {
-            ASSERT(flag >= 1);
-            ASSERT(flag <= 32);
-            s_flags |= 1u << (flag - 1);
-        }
-
-        static void clearFlag(unsigned flag)
-        {
-            ASSERT(flag >= 1);
-            ASSERT(flag <= 32);
-            s_flags &= ~(1u << (flag - 1));
-        }
-
-        static void sample();
-
-        class ScopedFlag {
-        public:
-            ScopedFlag(int flag)
-                : m_flag(flag)
-            {
-                setFlag(flag);
-            }
-
-            ~ScopedFlag()
-            {
-                clearFlag(m_flag);
-            }
-
-        private:
-            int m_flag;
-        };
-    
-        static const void* addressOfFlags()
-        {
-            return &s_flags;
-        }
-
-#endif
-    private:
-        JS_EXPORTDATA static uint32_t s_flags;
-#if ENABLE(SAMPLING_FLAGS)
-        static uint64_t s_flagCounts[33];
-#endif
-    };
-
-#if ENABLE(SAMPLING_REGIONS)
-    class SamplingRegion {
-    public:
-        // Create a scoped sampling region using a C string constant name that describes
-        // what you are doing. This must be a string constant that persists for the
-        // lifetime of the process and is immutable.
-        SamplingRegion(const char* name)
-        {
-            if (!isMainThread()) {
-                m_name = 0;
-                return;
-            }
-            
-            m_name = name;
-            exchangeCurrent(this, &m_previous);
-            ASSERT(!m_previous || m_previous > this);
-        }
-        
-        ~SamplingRegion()
-        {
-            if (!m_name)
-                return;
-            
-            ASSERT(bitwise_cast(s_currentOrReserved & ~1) == this);
-            exchangeCurrent(m_previous);
-        }
-        
-        static void sample();
-        
-        JS_EXPORT_PRIVATE static void dump();
-        
-    private:
-        const char* m_name;
-        SamplingRegion* m_previous;
-
-        static void exchangeCurrent(SamplingRegion* current, SamplingRegion** previousPtr = 0)
-        {
-            uintptr_t previous;
-            while (true) {
-                previous = s_currentOrReserved;
-                
-                // If it's reserved (i.e. sampling thread is reading it), loop around.
-                if (previous & 1) {
-#if OS(UNIX)
-                    sched_yield();
-#endif
-                    continue;
-                }
-                
-                // If we're going to CAS, then make sure previous is set.
-                if (previousPtr)
-                    *previousPtr = bitwise_cast(previous);
-                
-                if (WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, bitwise_cast(current)))
-                    break;
-            }
-        }
-        
-        static void dumpInternal();
-
-        class Locker {
-        public:
-            Locker();
-            ~Locker();
-        };
-
-        static volatile uintptr_t s_currentOrReserved;
-        
-        // rely on identity hashing of string constants
-        static Spectrum* s_spectrum;
-        
-        static unsigned long s_noneOfTheAbove;
-        
-        static unsigned s_numberOfSamplesSinceDump;
-    };
-#else // ENABLE(SAMPLING_REGIONS)
-    class SamplingRegion {
-    public:
-        SamplingRegion(const char*) { }
-        JS_EXPORT_PRIVATE void dump();
-    };
-#endif // ENABLE(SAMPLING_REGIONS)
-
-    class CodeBlock;
-    class ExecState;
-    class Interpreter;
-    class ScopeNode;
-    struct Instruction;
-
-    struct ScriptSampleRecord {
-        ScriptSampleRecord(VM& vm, ScriptExecutable* executable)
-            : m_executable(vm, executable)
-            , m_codeBlock(0)
-            , m_sampleCount(0)
-            , m_opcodeSampleCount(0)
-            , m_samples(0)
-            , m_size(0)
-        {
-        }
-        
-        ~ScriptSampleRecord()
-        {
-            if (m_samples)
-                free(m_samples);
-        }
-        
-        void sample(CodeBlock*, Instruction*);
-
-        Strong m_executable;
-        CodeBlock* m_codeBlock;
-        int m_sampleCount;
-        int m_opcodeSampleCount;
-        int* m_samples;
-        unsigned m_size;
-    };
-
-    typedef HashMap> ScriptSampleRecordMap;
-
-    class SamplingThread {
-    public:
-        // Sampling thread state.
-        static bool s_running;
-        static unsigned s_hertz;
-        static ThreadIdentifier s_samplingThread;
-
-        JS_EXPORT_PRIVATE static void start(unsigned hertz=10000);
-        JS_EXPORT_PRIVATE static void stop();
-
-        static void threadStartFunc(void*);
-    };
-
-    class SamplingTool {
-    public:
-        friend struct CallRecord;
-        
-#if ENABLE(OPCODE_SAMPLING)
-        class CallRecord {
-            WTF_MAKE_NONCOPYABLE(CallRecord);
-        public:
-            CallRecord(SamplingTool* samplingTool, bool isHostCall = false)
-                : m_samplingTool(samplingTool)
-                , m_savedSample(samplingTool->m_sample)
-                , m_savedCodeBlock(samplingTool->m_codeBlock)
-            {
-                if (isHostcall)
-                    samplingTool->m_sample |= 0x1;
-            }
-
-            ~CallRecord()
-            {
-                m_samplingTool->m_sample = m_savedSample;
-                m_samplingTool->m_codeBlock = m_savedCodeBlock;
-            }
-
-        private:
-            SamplingTool* m_samplingTool;
-            intptr_t m_savedSample;
-            CodeBlock* m_savedCodeBlock;
-        };
-#else
-        class CallRecord {
-            WTF_MAKE_NONCOPYABLE(CallRecord);
-        public:
-            CallRecord(SamplingTool*, bool = false)
-            {
-            }
-        };
-#endif
-
-        SamplingTool(Interpreter* interpreter)
-            : m_interpreter(interpreter)
-            , m_codeBlock(0)
-            , m_sample(0)
-            , m_sampleCount(0)
-            , m_opcodeSampleCount(0)
-#if ENABLE(CODEBLOCK_SAMPLING)
-            , m_scopeSampleMap(adoptPtr(new ScriptSampleRecordMap))
-#endif
-        {
-            memset(m_opcodeSamples, 0, sizeof(m_opcodeSamples));
-            memset(m_opcodeSamplesInCTIFunctions, 0, sizeof(m_opcodeSamplesInCTIFunctions));
-        }
-
-        JS_EXPORT_PRIVATE void setup();
-        void dump(ExecState*);
-
-        void notifyOfScope(VM&, ScriptExecutable* scope);
-
-        void sample(CodeBlock* codeBlock, Instruction* vPC)
-        {
-            ASSERT(!(reinterpret_cast(vPC) & 0x3));
-            m_codeBlock = codeBlock;
-            m_sample = reinterpret_cast(vPC);
-        }
-
-        CodeBlock** codeBlockSlot() { return &m_codeBlock; }
-        intptr_t* sampleSlot() { return &m_sample; }
-
-        void* encodeSample(Instruction* vPC, bool inCTIFunction = false, bool inHostFunction = false)
-        {
-            ASSERT(!(reinterpret_cast(vPC) & 0x3));
-            return reinterpret_cast(reinterpret_cast(vPC) | (static_cast(inCTIFunction) << 1) | static_cast(inHostFunction));
-        }
-
-        static void sample();
-
-    private:
-        class Sample {
-        public:
-            Sample(volatile intptr_t sample, CodeBlock* volatile codeBlock)
-                : m_sample(sample)
-                , m_codeBlock(codeBlock)
-            {
-            }
-            
-            bool isNull() { return !m_sample; }
-            CodeBlock* codeBlock() { return m_codeBlock; }
-            Instruction* vPC() { return reinterpret_cast(m_sample & ~0x3); }
-            bool inHostFunction() { return m_sample & 0x1; }
-            bool inCTIFunction() { return m_sample & 0x2; }
-
-        private:
-            intptr_t m_sample;
-            CodeBlock* m_codeBlock;
-        };
-
-        void doRun();
-        static SamplingTool* s_samplingTool;
-        
-        Interpreter* m_interpreter;
-        
-        // State tracked by the main thread, used by the sampling thread.
-        CodeBlock* m_codeBlock;
-        intptr_t m_sample;
-
-        // Gathered sample data.
-        long long m_sampleCount;
-        long long m_opcodeSampleCount;
-        unsigned m_opcodeSamples[numOpcodeIDs];
-        unsigned m_opcodeSamplesInCTIFunctions[numOpcodeIDs];
-        
-#if ENABLE(CODEBLOCK_SAMPLING)
-        Mutex m_scriptSampleMapMutex;
-        OwnPtr m_scopeSampleMap;
-#endif
-    };
-
-} // namespace JSC
-
-#endif // SamplingTool_h
diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp
index 7789653f0..dc5a363b6 100644
--- a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp
+++ b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp
@@ -28,6 +28,7 @@
 
 #include "CodeBlock.h"
 #include "JSGlobalObject.h"
+#include "JSCInlines.h"
 
 namespace JSC {
 
diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.h b/Source/JavaScriptCore/bytecode/SpecialPointer.h
index c18a6e904..21329ec43 100644
--- a/Source/JavaScriptCore/bytecode/SpecialPointer.h
+++ b/Source/JavaScriptCore/bytecode/SpecialPointer.h
@@ -23,8 +23,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef SpecialPointer_h
-#define SpecialPointer_h
+#pragma once
 
 namespace JSC {
 
@@ -41,6 +40,11 @@ enum Pointer {
 };
 } // namespace Special
 
+enum class LinkTimeConstant {
+    ThrowTypeErrorFunction,
+};
+const unsigned LinkTimeConstantCount = 1;
+
 inline bool pointerIsFunction(Special::Pointer pointer)
 {
     ASSERT_UNUSED(pointer, pointer < Special::TableSize);
@@ -57,6 +61,3 @@ void* actualPointerFor(JSGlobalObject*, Special::Pointer);
 void* actualPointerFor(CodeBlock*, Special::Pointer);
 
 } // namespace JSC
-
-#endif // SpecialPointer_h
-
diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
index 3917cca0f..45846e8c7 100644
--- a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
+++ b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2013, 2015-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -29,13 +29,17 @@
 #include "config.h"
 #include "SpeculatedType.h"
 
-#include "Arguments.h"
+#include "DirectArguments.h"
 #include "JSArray.h"
+#include "JSCInlines.h"
 #include "JSFunction.h"
-#include "Operations.h"
+#include "JSMap.h"
+#include "JSSet.h"
+#include "ProxyObject.h"
+#include "RegExpObject.h"
+#include "ScopedArguments.h"
 #include "StringObject.h"
 #include "ValueProfile.h"
-#include 
 #include 
 
 namespace JSC {
@@ -127,8 +131,13 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
             else
                 isTop = false;
     
-            if (value & SpecArguments)
-                myOut.print("Arguments");
+            if (value & SpecDirectArguments)
+                myOut.print("Directarguments");
+            else
+                isTop = false;
+    
+            if (value & SpecScopedArguments)
+                myOut.print("Scopedarguments");
             else
                 isTop = false;
     
@@ -136,6 +145,31 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
                 myOut.print("Stringobject");
             else
                 isTop = false;
+    
+            if (value & SpecRegExpObject)
+                myOut.print("Regexpobject");
+            else
+                isTop = false;
+
+            if (value & SpecMapObject)
+                myOut.print("Mapobject");
+            else
+                isTop = false;
+
+            if (value & SpecSetObject)
+                myOut.print("Setobject");
+            else
+                isTop = false;
+
+            if (value & SpecProxyObject)
+                myOut.print("Proxyobject");
+            else
+                isTop = false;
+
+            if (value & SpecDerivedArray)
+                myOut.print("Derivedarray");
+            else
+                isTop = false;
         }
 
         if ((value & SpecString) == SpecString)
@@ -151,21 +185,35 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
             else
                 isTop = false;
         }
+
+        if (value & SpecSymbol)
+            myOut.print("Symbol");
+        else
+            isTop = false;
     }
     
-    if (value & SpecInt32)
+    if (value == SpecInt32Only)
         myOut.print("Int32");
-    else
-        isTop = false;
+    else {
+        if (value & SpecBoolInt32)
+            myOut.print("Boolint32");
+        else
+            isTop = false;
+        
+        if (value & SpecNonBoolInt32)
+            myOut.print("Nonboolint32");
+        else
+            isTop = false;
+    }
     
-    if (value & SpecInt52)
+    if (value & SpecInt52Only)
         myOut.print("Int52");
         
-    if ((value & SpecDouble) == SpecDouble)
-        myOut.print("Double");
+    if ((value & SpecBytecodeDouble) == SpecBytecodeDouble)
+        myOut.print("Bytecodedouble");
     else {
-        if (value & SpecInt52AsDouble)
-            myOut.print("Int52asdouble");
+        if (value & SpecAnyIntAsDouble)
+            myOut.print("AnyIntAsDouble");
         else
             isTop = false;
         
@@ -174,12 +222,15 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
         else
             isTop = false;
         
-        if (value & SpecDoubleNaN)
-            myOut.print("Doublenan");
+        if (value & SpecDoublePureNaN)
+            myOut.print("Doublepurenan");
         else
             isTop = false;
     }
     
+    if (value & SpecDoubleImpureNaN)
+        out.print("Doubleimpurenan");
+    
     if (value & SpecBoolean)
         myOut.print("Bool");
     else
@@ -229,24 +280,30 @@ static const char* speculationToAbbreviatedString(SpeculatedType prediction)
         return "";
     if (isFloat64ArraySpeculation(prediction))
         return "";
-    if (isArgumentsSpeculation(prediction))
-        return "";
+    if (isDirectArgumentsSpeculation(prediction))
+        return "";
+    if (isScopedArgumentsSpeculation(prediction))
+        return "";
     if (isStringObjectSpeculation(prediction))
         return "";
+    if (isRegExpObjectSpeculation(prediction))
+        return "";
     if (isStringOrStringObjectSpeculation(prediction))
         return "";
     if (isObjectSpeculation(prediction))
         return "";
     if (isCellSpeculation(prediction))
         return "";
+    if (isBoolInt32Speculation(prediction))
+        return "";
     if (isInt32Speculation(prediction))
         return "";
-    if (isInt52AsDoubleSpeculation(prediction))
-        return "";
+    if (isAnyIntAsDoubleSpeculation(prediction))
+        return "";
     if (isInt52Speculation(prediction))
         return "";
-    if (isMachineIntSpeculation(prediction))
-        return "";
+    if (isAnyIntSpeculation(prediction))
+        return "";
     if (isDoubleSpeculation(prediction))
         return "";
     if (isFullNumberSpeculation(prediction))
@@ -255,6 +312,8 @@ static const char* speculationToAbbreviatedString(SpeculatedType prediction)
         return "";
     if (isOtherSpeculation(prediction))
         return "";
+    if (isMiscSpeculation(prediction))
+        return "";
     return "";
 }
 
@@ -294,23 +353,47 @@ SpeculatedType speculationFromTypedArrayType(TypedArrayType type)
 
 SpeculatedType speculationFromClassInfo(const ClassInfo* classInfo)
 {
+    if (classInfo == JSString::info())
+        return SpecString;
+
+    if (classInfo == Symbol::info())
+        return SpecSymbol;
+
     if (classInfo == JSFinalObject::info())
         return SpecFinalObject;
     
     if (classInfo == JSArray::info())
         return SpecArray;
     
-    if (classInfo == Arguments::info())
-        return SpecArguments;
+    if (classInfo == DirectArguments::info())
+        return SpecDirectArguments;
+    
+    if (classInfo == ScopedArguments::info())
+        return SpecScopedArguments;
     
     if (classInfo == StringObject::info())
         return SpecStringObject;
+
+    if (classInfo == RegExpObject::info())
+        return SpecRegExpObject;
+
+    if (classInfo == JSMap::info())
+        return SpecMapObject;
+
+    if (classInfo == JSSet::info())
+        return SpecSetObject;
+
+    if (classInfo == ProxyObject::info())
+        return SpecProxyObject;
     
     if (classInfo->isSubClassOf(JSFunction::info()))
         return SpecFunction;
     
     if (isTypedView(classInfo->typedArrayStorageType))
         return speculationFromTypedArrayType(classInfo->typedArrayStorageType);
+
+    if (classInfo->isSubClassOf(JSArray::info()))
+        return SpecDerivedArray;
     
     if (classInfo->isSubClassOf(JSObject::info()))
         return SpecObjectOther;
@@ -322,14 +405,19 @@ SpeculatedType speculationFromStructure(Structure* structure)
 {
     if (structure->typeInfo().type() == StringType)
         return SpecString;
+    if (structure->typeInfo().type() == SymbolType)
+        return SpecSymbol;
+    if (structure->typeInfo().type() == DerivedArrayType)
+        return SpecDerivedArray;
     return speculationFromClassInfo(structure->classInfo());
 }
 
 SpeculatedType speculationFromCell(JSCell* cell)
 {
-    if (JSString* string = jsDynamicCast(cell)) {
+    if (cell->isString()) {
+        JSString* string = jsCast(cell);
         if (const StringImpl* impl = string->tryGetValueImpl()) {
-            if (impl->isIdentifier())
+            if (impl->isAtomic())
                 return SpecStringIdent;
         }
         return SpecStringVar;
@@ -341,14 +429,17 @@ SpeculatedType speculationFromValue(JSValue value)
 {
     if (value.isEmpty())
         return SpecEmpty;
-    if (value.isInt32())
-        return SpecInt32;
+    if (value.isInt32()) {
+        if (value.asInt32() & ~1)
+            return SpecNonBoolInt32;
+        return SpecBoolInt32;
+    }
     if (value.isDouble()) {
         double number = value.asNumber();
         if (number != number)
-            return SpecDoubleNaN;
-        if (value.isMachineInt())
-            return SpecInt52AsDouble;
+            return SpecDoublePureNaN;
+        if (value.isAnyInt())
+            return SpecAnyIntAsDouble;
         return SpecNonIntAsDouble;
     }
     if (value.isCell())
@@ -391,5 +482,163 @@ TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType type)
     return NotTypedArray;
 }
 
+SpeculatedType speculationFromJSType(JSType type)
+{
+    switch (type) {
+    case StringType:
+        return SpecString;
+    case SymbolType:
+        return SpecSymbol;
+    case ArrayType:
+        return SpecArray;
+    case DerivedArrayType:
+        return SpecDerivedArray;
+    case RegExpObjectType:
+        return SpecRegExpObject;
+    case ProxyObjectType:
+        return SpecProxyObject;
+    case JSMapType:
+        return SpecMapObject;
+    case JSSetType:
+        return SpecSetObject;
+    default:
+        ASSERT_NOT_REACHED();
+    }
+    return SpecNone;
+}
+
+SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType type)
+{
+    if (type & (SpecAnyInt | SpecAnyIntAsDouble))
+        type |= (SpecAnyInt | SpecAnyIntAsDouble);
+    if (type & SpecString)
+        type |= SpecString;
+    return type;
+}
+
+bool valuesCouldBeEqual(SpeculatedType a, SpeculatedType b)
+{
+    a = leastUpperBoundOfStrictlyEquivalentSpeculations(a);
+    b = leastUpperBoundOfStrictlyEquivalentSpeculations(b);
+    
+    // Anything could be equal to a string.
+    if (a & SpecString)
+        return true;
+    if (b & SpecString)
+        return true;
+    
+    // If both sides are definitely only objects, then equality is fairly sane.
+    if (isObjectSpeculation(a) && isObjectSpeculation(b))
+        return !!(a & b);
+    
+    // If either side could be an object or not, then we could call toString or
+    // valueOf, which could return anything.
+    if (a & SpecObject)
+        return true;
+    if (b & SpecObject)
+        return true;
+    
+    // Neither side is an object or string, so the world is relatively sane.
+    return !!(a & b);
+}
+
+SpeculatedType typeOfDoubleSum(SpeculatedType a, SpeculatedType b)
+{
+    SpeculatedType result = a | b;
+    // Impure NaN could become pure NaN during addition because addition may clear bits.
+    if (result & SpecDoubleImpureNaN)
+        result |= SpecDoublePureNaN;
+    // Values could overflow, or fractions could become integers.
+    if (result & SpecDoubleReal)
+        result |= SpecDoubleReal;
+    return result;
+}
+
+SpeculatedType typeOfDoubleDifference(SpeculatedType a, SpeculatedType b)
+{
+    return typeOfDoubleSum(a, b);
+}
+
+SpeculatedType typeOfDoubleProduct(SpeculatedType a, SpeculatedType b)
+{
+    return typeOfDoubleSum(a, b);
+}
+
+static SpeculatedType polluteDouble(SpeculatedType value)
+{
+    // Impure NaN could become pure NaN because the operation could clear some bits.
+    if (value & SpecDoubleImpureNaN)
+        value |= SpecDoubleNaN;
+    // Values could overflow, fractions could become integers, or an error could produce
+    // PureNaN.
+    if (value & SpecDoubleReal)
+        value |= SpecDoubleReal | SpecDoublePureNaN;
+    return value;
+}
+
+SpeculatedType typeOfDoubleQuotient(SpeculatedType a, SpeculatedType b)
+{
+    return polluteDouble(a | b);
+}
+
+SpeculatedType typeOfDoubleMinMax(SpeculatedType a, SpeculatedType b)
+{
+    SpeculatedType result = a | b;
+    // Impure NaN could become pure NaN during addition because addition may clear bits.
+    if (result & SpecDoubleImpureNaN)
+        result |= SpecDoublePureNaN;
+    return result;
+}
+
+SpeculatedType typeOfDoubleNegation(SpeculatedType value)
+{
+    // Changing bits can make pure NaN impure and vice versa:
+    // 0xefff000000000000 (pure) - 0xffff000000000000 (impure)
+    if (value & SpecDoubleNaN)
+        value |= SpecDoubleNaN;
+    // We could get negative zero, which mixes SpecAnyIntAsDouble and SpecNotIntAsDouble.
+    // We could also overflow a large negative int into something that is no longer
+    // representable as an int.
+    if (value & SpecDoubleReal)
+        value |= SpecDoubleReal;
+    return value;
+}
+
+SpeculatedType typeOfDoubleAbs(SpeculatedType value)
+{
+    return typeOfDoubleNegation(value);
+}
+
+SpeculatedType typeOfDoubleRounding(SpeculatedType value)
+{
+    // Double Pure NaN can becomes impure when converted back from Float.
+    // and vice versa.
+    if (value & SpecDoubleNaN)
+        value |= SpecDoubleNaN;
+    // We might lose bits, which leads to a value becoming integer-representable.
+    if (value & SpecNonIntAsDouble)
+        value |= SpecAnyIntAsDouble;
+    return value;
+}
+
+SpeculatedType typeOfDoublePow(SpeculatedType xValue, SpeculatedType yValue)
+{
+    // Math.pow() always return NaN if the exponent is NaN, unlike std::pow().
+    // We always set a pure NaN in that case.
+    if (yValue & SpecDoubleNaN)
+        xValue |= SpecDoublePureNaN;
+    return polluteDouble(xValue);
+}
+
+SpeculatedType typeOfDoubleBinaryOp(SpeculatedType a, SpeculatedType b)
+{
+    return polluteDouble(a | b);
+}
+
+SpeculatedType typeOfDoubleUnaryOp(SpeculatedType value)
+{
+    return polluteDouble(value);
+}
+
 } // namespace JSC
 
diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.h b/Source/JavaScriptCore/bytecode/SpeculatedType.h
index eaf0af37a..e23fd2c5f 100644
--- a/Source/JavaScriptCore/bytecode/SpeculatedType.h
+++ b/Source/JavaScriptCore/bytecode/SpeculatedType.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -26,8 +26,7 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef SpeculatedType_h
-#define SpeculatedType_h
+#pragma once
 
 #include "JSCJSValue.h"
 #include "TypedArrayType.h"
@@ -37,49 +36,62 @@ namespace JSC {
 
 class Structure;
 
-typedef uint32_t SpeculatedType;
-static const SpeculatedType SpecNone               = 0x00000000; // We don't know anything yet.
-static const SpeculatedType SpecFinalObject        = 0x00000001; // It's definitely a JSFinalObject.
-static const SpeculatedType SpecArray              = 0x00000002; // It's definitely a JSArray.
-static const SpeculatedType SpecFunction           = 0x00000008; // It's definitely a JSFunction or one of its subclasses.
-static const SpeculatedType SpecInt8Array          = 0x00000010; // It's definitely an Int8Array or one of its subclasses.
-static const SpeculatedType SpecInt16Array         = 0x00000020; // It's definitely an Int16Array or one of its subclasses.
-static const SpeculatedType SpecInt32Array         = 0x00000040; // It's definitely an Int32Array or one of its subclasses.
-static const SpeculatedType SpecUint8Array         = 0x00000080; // It's definitely an Uint8Array or one of its subclasses.
-static const SpeculatedType SpecUint8ClampedArray  = 0x00000100; // It's definitely an Uint8ClampedArray or one of its subclasses.
-static const SpeculatedType SpecUint16Array        = 0x00000200; // It's definitely an Uint16Array or one of its subclasses.
-static const SpeculatedType SpecUint32Array        = 0x00000400; // It's definitely an Uint32Array or one of its subclasses.
-static const SpeculatedType SpecFloat32Array       = 0x00000800; // It's definitely an Uint16Array or one of its subclasses.
-static const SpeculatedType SpecFloat64Array       = 0x00001000; // It's definitely an Uint16Array or one of its subclasses.
+typedef uint64_t SpeculatedType;
+static const SpeculatedType SpecNone               = 0; // We don't know anything yet.
+static const SpeculatedType SpecFinalObject        = 1ull << 0; // It's definitely a JSFinalObject.
+static const SpeculatedType SpecArray              = 1ull << 1; // It's definitely a JSArray.
+static const SpeculatedType SpecFunction           = 1ull << 2; // It's definitely a JSFunction.
+static const SpeculatedType SpecInt8Array          = 1ull << 3; // It's definitely an Int8Array or one of its subclasses.
+static const SpeculatedType SpecInt16Array         = 1ull << 4; // It's definitely an Int16Array or one of its subclasses.
+static const SpeculatedType SpecInt32Array         = 1ull << 5; // It's definitely an Int32Array or one of its subclasses.
+static const SpeculatedType SpecUint8Array         = 1ull << 6; // It's definitely an Uint8Array or one of its subclasses.
+static const SpeculatedType SpecUint8ClampedArray  = 1ull << 7; // It's definitely an Uint8ClampedArray or one of its subclasses.
+static const SpeculatedType SpecUint16Array        = 1ull << 8; // It's definitely an Uint16Array or one of its subclasses.
+static const SpeculatedType SpecUint32Array        = 1ull << 9; // It's definitely an Uint32Array or one of its subclasses.
+static const SpeculatedType SpecFloat32Array       = 1ull << 10; // It's definitely an Uint16Array or one of its subclasses.
+static const SpeculatedType SpecFloat64Array       = 1ull << 11; // It's definitely an Uint16Array or one of its subclasses.
 static const SpeculatedType SpecTypedArrayView     = SpecInt8Array | SpecInt16Array | SpecInt32Array | SpecUint8Array | SpecUint8ClampedArray | SpecUint16Array | SpecUint32Array | SpecFloat32Array | SpecFloat64Array;
-static const SpeculatedType SpecArguments          = 0x00002000; // It's definitely an Arguments object.
-static const SpeculatedType SpecStringObject       = 0x00004000; // It's definitely a StringObject.
-static const SpeculatedType SpecObjectOther        = 0x00008000; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction.
-static const SpeculatedType SpecObject             = 0x0000ffff; // Bitmask used for testing for any kind of object prediction.
-static const SpeculatedType SpecStringIdent        = 0x00010000; // It's definitely a JSString, and it's an identifier.
-static const SpeculatedType SpecStringVar          = 0x00020000; // It's definitely a JSString, and it's not an identifier.
-static const SpeculatedType SpecString             = 0x00030000; // It's definitely a JSString.
-static const SpeculatedType SpecCellOther          = 0x00040000; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString.
-static const SpeculatedType SpecCell               = 0x0007ffff; // It's definitely a JSCell.
-static const SpeculatedType SpecInt32              = 0x00800000; // It's definitely an Int32.
-static const SpeculatedType SpecInt52              = 0x01000000; // It's definitely an Int52 and we intend it to unbox it.
-static const SpeculatedType SpecMachineInt         = 0x01800000; // It's something that we can do machine int arithmetic on.
-static const SpeculatedType SpecInt52AsDouble      = 0x02000000; // It's definitely an Int52 and it's inside a double.
-static const SpeculatedType SpecInteger            = 0x03800000; // It's definitely some kind of integer.
-static const SpeculatedType SpecNonIntAsDouble     = 0x04000000; // It's definitely not an Int52 but it's a real number and it's a double.
-static const SpeculatedType SpecDoubleReal         = 0x06000000; // It's definitely a non-NaN double.
-static const SpeculatedType SpecDoubleNaN          = 0x08000000; // It's definitely a NaN.
-static const SpeculatedType SpecDouble             = 0x0e000000; // It's either a non-NaN or a NaN double.
-static const SpeculatedType SpecBytecodeRealNumber = 0x06800000; // It's either an Int32 or a DoubleReal.
-static const SpeculatedType SpecFullRealNumber     = 0x07800000; // It's either an Int32 or a DoubleReal, or a Int52.
-static const SpeculatedType SpecBytecodeNumber     = 0x0e800000; // It's either an Int32 or a Double.
-static const SpeculatedType SpecFullNumber         = 0x0f800000; // It's either an Int32, Int52, or a Double.
-static const SpeculatedType SpecBoolean            = 0x10000000; // It's definitely a Boolean.
-static const SpeculatedType SpecOther              = 0x20000000; // It's definitely none of the above.
-static const SpeculatedType SpecHeapTop            = 0x3effffff; // It can be any of the above, except for SpecInt52.
-static const SpeculatedType SpecEmpty              = 0x40000000; // It's definitely an empty value marker.
-static const SpeculatedType SpecBytecodeTop        = 0x7effffff; // It can be any of the above, except for SpecInt52.
-static const SpeculatedType SpecFullTop            = 0x7fffffff; // It can be any of the above plus anything the DFG chooses.
+static const SpeculatedType SpecDirectArguments    = 1ull << 12; // It's definitely a DirectArguments object.
+static const SpeculatedType SpecScopedArguments    = 1ull << 13; // It's definitely a ScopedArguments object.
+static const SpeculatedType SpecStringObject       = 1ull << 14; // It's definitely a StringObject.
+static const SpeculatedType SpecRegExpObject       = 1ull << 15; // It's definitely a RegExpObject (and not any subclass of RegExpObject).
+static const SpeculatedType SpecMapObject          = 1ull << 16; // It's definitely a Map object or one of its subclasses.
+static const SpeculatedType SpecSetObject          = 1ull << 17; // It's definitely a Set object or one of its subclasses.
+static const SpeculatedType SpecProxyObject        = 1ull << 18; // It's definitely a Proxy object or one of its subclasses.
+static const SpeculatedType SpecDerivedArray       = 1ull << 19; // It's definitely a DerivedArray object.
+static const SpeculatedType SpecObjectOther        = 1ull << 20; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction.
+static const SpeculatedType SpecObject             = SpecFinalObject | SpecArray | SpecFunction | SpecTypedArrayView | SpecDirectArguments | SpecScopedArguments | SpecStringObject | SpecRegExpObject | SpecMapObject | SpecSetObject | SpecProxyObject | SpecDerivedArray | SpecObjectOther; // Bitmask used for testing for any kind of object prediction.
+static const SpeculatedType SpecStringIdent        = 1ull << 21; // It's definitely a JSString, and it's an identifier.
+static const SpeculatedType SpecStringVar          = 1ull << 22; // It's definitely a JSString, and it's not an identifier.
+static const SpeculatedType SpecString             = SpecStringIdent | SpecStringVar; // It's definitely a JSString.
+static const SpeculatedType SpecSymbol             = 1ull << 23; // It's definitely a Symbol.
+static const SpeculatedType SpecCellOther          = 1ull << 24; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString or a Symbol. FIXME: This shouldn't be part of heap-top or bytecode-top. https://bugs.webkit.org/show_bug.cgi?id=133078
+static const SpeculatedType SpecCell               = SpecObject | SpecString | SpecSymbol | SpecCellOther; // It's definitely a JSCell.
+static const SpeculatedType SpecBoolInt32          = 1ull << 25; // It's definitely an Int32 with value 0 or 1.
+static const SpeculatedType SpecNonBoolInt32       = 1ull << 26; // It's definitely an Int32 with value other than 0 or 1.
+static const SpeculatedType SpecInt32Only          = SpecBoolInt32 | SpecNonBoolInt32; // It's definitely an Int32.
+static const SpeculatedType SpecInt52Only          = 1ull << 27; // It's definitely an Int52 and we intend it to unbox it. It's also definitely not an Int32.
+static const SpeculatedType SpecAnyInt             = SpecInt32Only | SpecInt52Only; // It's something that we can do machine int arithmetic on.
+static const SpeculatedType SpecAnyIntAsDouble     = 1ull << 28; // It's definitely an Int52 and it's inside a double.
+static const SpeculatedType SpecNonIntAsDouble     = 1ull << 29; // It's definitely not an Int52 but it's a real number and it's a double.
+static const SpeculatedType SpecDoubleReal         = SpecNonIntAsDouble | SpecAnyIntAsDouble; // It's definitely a non-NaN double.
+static const SpeculatedType SpecDoublePureNaN      = 1ull << 30; // It's definitely a NaN that is sae to tag (i.e. pure).
+static const SpeculatedType SpecDoubleImpureNaN    = 1ull << 31; // It's definitely a NaN that is unsafe to tag (i.e. impure).
+static const SpeculatedType SpecDoubleNaN          = SpecDoublePureNaN | SpecDoubleImpureNaN; // It's definitely some kind of NaN.
+static const SpeculatedType SpecBytecodeDouble     = SpecDoubleReal | SpecDoublePureNaN; // It's either a non-NaN or a NaN double, but it's definitely not impure NaN.
+static const SpeculatedType SpecFullDouble         = SpecDoubleReal | SpecDoubleNaN; // It's either a non-NaN or a NaN double.
+static const SpeculatedType SpecBytecodeRealNumber = SpecInt32Only | SpecDoubleReal; // It's either an Int32 or a DoubleReal.
+static const SpeculatedType SpecFullRealNumber     = SpecAnyInt | SpecDoubleReal; // It's either an Int32 or a DoubleReal, or a Int52.
+static const SpeculatedType SpecBytecodeNumber     = SpecInt32Only | SpecBytecodeDouble; // It's either an Int32 or a Double, and the Double cannot be an impure NaN.
+static const SpeculatedType SpecFullNumber         = SpecAnyInt | SpecFullDouble; // It's either an Int32, Int52, or a Double, and the Double can be impure NaN.
+static const SpeculatedType SpecBoolean            = 1ull << 32; // It's definitely a Boolean.
+static const SpeculatedType SpecOther              = 1ull << 33; // It's definitely either Null or Undefined.
+static const SpeculatedType SpecMisc               = SpecBoolean | SpecOther; // It's definitely either a boolean, Null, or Undefined.
+static const SpeculatedType SpecHeapTop            = SpecCell | SpecBytecodeNumber | SpecMisc; // It can be any of the above, except for SpecInt52Only and SpecDoubleImpureNaN.
+static const SpeculatedType SpecPrimitive          = SpecString | SpecSymbol | SpecBytecodeNumber | SpecMisc; // It's any non-Object JSValue.
+static const SpeculatedType SpecEmpty              = 1ull << 34; // It's definitely an empty value marker.
+static const SpeculatedType SpecBytecodeTop        = SpecHeapTop | SpecEmpty; // It can be any of the above, except for SpecInt52Only and SpecDoubleImpureNaN. Corresponds to what could be found in a bytecode local.
+static const SpeculatedType SpecFullTop            = SpecBytecodeTop | SpecFullNumber; // It can be anything that bytecode could see plus exotic encodings of numbers.
 
 typedef bool (*SpeculatedTypeChecker)(SpeculatedType);
 
@@ -94,6 +106,16 @@ inline bool isCellSpeculation(SpeculatedType value)
     return !!(value & SpecCell) && !(value & ~SpecCell);
 }
 
+inline bool isCellOrOtherSpeculation(SpeculatedType value)
+{
+    return !!value && !(value & ~(SpecCell | SpecOther));
+}
+
+inline bool isNotCellSpeculation(SpeculatedType value)
+{
+    return !(value & SpecCell) && value;
+}
+
 inline bool isObjectSpeculation(SpeculatedType value)
 {
     return !!(value & SpecObject) && !(value & ~SpecObject);
@@ -119,11 +141,31 @@ inline bool isStringIdentSpeculation(SpeculatedType value)
     return value == SpecStringIdent;
 }
 
+inline bool isNotStringVarSpeculation(SpeculatedType value)
+{
+    return !(value & SpecStringVar);
+}
+
 inline bool isStringSpeculation(SpeculatedType value)
 {
     return !!value && (value & SpecString) == value;
 }
 
+inline bool isNotStringSpeculation(SpeculatedType value)
+{
+    return value && !(value & SpecString);
+}
+
+inline bool isStringOrOtherSpeculation(SpeculatedType value)
+{
+    return !!value && (value & (SpecString | SpecOther)) == value;
+}
+
+inline bool isSymbolSpeculation(SpeculatedType value)
+{
+    return value == SpecSymbol;
+}
+
 inline bool isArraySpeculation(SpeculatedType value)
 {
     return value == SpecArray;
@@ -134,6 +176,16 @@ inline bool isFunctionSpeculation(SpeculatedType value)
     return value == SpecFunction;
 }
 
+inline bool isProxyObjectSpeculation(SpeculatedType value)
+{
+    return value == SpecProxyObject;
+}
+
+inline bool isDerivedArraySpeculation(SpeculatedType value)
+{
+    return value == SpecDerivedArray;
+}
+
 inline bool isInt8ArraySpeculation(SpeculatedType value)
 {
     return value == SpecInt8Array;
@@ -179,9 +231,14 @@ inline bool isFloat64ArraySpeculation(SpeculatedType value)
     return value == SpecFloat64Array;
 }
 
-inline bool isArgumentsSpeculation(SpeculatedType value)
+inline bool isDirectArgumentsSpeculation(SpeculatedType value)
+{
+    return value == SpecDirectArguments;
+}
+
+inline bool isScopedArgumentsSpeculation(SpeculatedType value)
 {
-    return !!value && (value & SpecArguments) == value;
+    return value == SpecScopedArguments;
 }
 
 inline bool isActionableIntMutableArraySpeculation(SpeculatedType value)
@@ -210,13 +267,14 @@ inline bool isActionableTypedMutableArraySpeculation(SpeculatedType value)
 inline bool isActionableMutableArraySpeculation(SpeculatedType value)
 {
     return isArraySpeculation(value)
-        || isArgumentsSpeculation(value)
         || isActionableTypedMutableArraySpeculation(value);
 }
 
 inline bool isActionableArraySpeculation(SpeculatedType value)
 {
     return isStringSpeculation(value)
+        || isDirectArgumentsSpeculation(value)
+        || isScopedArgumentsSpeculation(value)
         || isActionableMutableArraySpeculation(value);
 }
 
@@ -235,49 +293,59 @@ inline bool isStringOrStringObjectSpeculation(SpeculatedType value)
     return !!value && !(value & ~(SpecString | SpecStringObject));
 }
 
+inline bool isRegExpObjectSpeculation(SpeculatedType value)
+{
+    return value == SpecRegExpObject;
+}
+
+inline bool isBoolInt32Speculation(SpeculatedType value)
+{
+    return value == SpecBoolInt32;
+}
+
 inline bool isInt32Speculation(SpeculatedType value)
 {
-    return value == SpecInt32;
+    return value && !(value & ~SpecInt32Only);
 }
 
-inline bool isInt32SpeculationForArithmetic(SpeculatedType value)
+inline bool isNotInt32Speculation(SpeculatedType value)
 {
-    return !(value & (SpecDouble | SpecInt52));
+    return value && !(value & SpecInt32Only);
 }
 
-inline bool isInt32SpeculationExpectingDefined(SpeculatedType value)
+inline bool isInt32OrBooleanSpeculation(SpeculatedType value)
 {
-    return isInt32Speculation(value & ~SpecOther);
+    return value && !(value & ~(SpecBoolean | SpecInt32Only));
 }
 
-inline bool isInt52Speculation(SpeculatedType value)
+inline bool isInt32SpeculationForArithmetic(SpeculatedType value)
 {
-    return value == SpecInt52;
+    return !(value & (SpecFullDouble | SpecInt52Only));
 }
 
-inline bool isMachineIntSpeculation(SpeculatedType value)
+inline bool isInt32OrBooleanSpeculationForArithmetic(SpeculatedType value)
 {
-    return !!value && (value & SpecMachineInt) == value;
+    return !(value & (SpecFullDouble | SpecInt52Only));
 }
 
-inline bool isMachineIntSpeculationExpectingDefined(SpeculatedType value)
+inline bool isInt32OrBooleanSpeculationExpectingDefined(SpeculatedType value)
 {
-    return isMachineIntSpeculation(value & ~SpecOther);
+    return isInt32OrBooleanSpeculation(value & ~SpecOther);
 }
 
-inline bool isMachineIntSpeculationForArithmetic(SpeculatedType value)
+inline bool isInt52Speculation(SpeculatedType value)
 {
-    return !(value & SpecDouble);
+    return value == SpecInt52Only;
 }
 
-inline bool isInt52AsDoubleSpeculation(SpeculatedType value)
+inline bool isAnyIntSpeculation(SpeculatedType value)
 {
-    return value == SpecInt52AsDouble;
+    return !!value && (value & SpecAnyInt) == value;
 }
 
-inline bool isIntegerSpeculation(SpeculatedType value)
+inline bool isAnyIntAsDoubleSpeculation(SpeculatedType value)
 {
-    return !!value && (value & SpecInteger) == value;
+    return value == SpecAnyIntAsDouble;
 }
 
 inline bool isDoubleRealSpeculation(SpeculatedType value)
@@ -287,12 +355,12 @@ inline bool isDoubleRealSpeculation(SpeculatedType value)
 
 inline bool isDoubleSpeculation(SpeculatedType value)
 {
-    return !!value && (value & SpecDouble) == value;
+    return !!value && (value & SpecFullDouble) == value;
 }
 
 inline bool isDoubleSpeculationForArithmetic(SpeculatedType value)
 {
-    return !!(value & SpecDouble);
+    return !!(value & SpecFullDouble);
 }
 
 inline bool isBytecodeRealNumberSpeculation(SpeculatedType value)
@@ -315,14 +383,14 @@ inline bool isFullNumberSpeculation(SpeculatedType value)
     return !!(value & SpecFullNumber) && !(value & ~SpecFullNumber);
 }
 
-inline bool isBytecodeNumberSpeculationExpectingDefined(SpeculatedType value)
+inline bool isFullNumberOrBooleanSpeculation(SpeculatedType value)
 {
-    return isBytecodeNumberSpeculation(value & ~SpecOther);
+    return value && !(value & ~(SpecFullNumber | SpecBoolean));
 }
 
-inline bool isFullNumberSpeculationExpectingDefined(SpeculatedType value)
+inline bool isFullNumberOrBooleanSpeculationExpectingDefined(SpeculatedType value)
 {
-    return isFullNumberSpeculation(value & ~SpecOther);
+    return isFullNumberOrBooleanSpeculation(value & ~SpecOther);
 }
 
 inline bool isBooleanSpeculation(SpeculatedType value)
@@ -330,11 +398,21 @@ inline bool isBooleanSpeculation(SpeculatedType value)
     return value == SpecBoolean;
 }
 
+inline bool isNotBooleanSpeculation(SpeculatedType value)
+{
+    return value && !(value & SpecBoolean);
+}
+
 inline bool isOtherSpeculation(SpeculatedType value)
 {
     return value == SpecOther;
 }
 
+inline bool isMiscSpeculation(SpeculatedType value)
+{
+    return !!value && !(value & ~SpecMisc);
+}
+
 inline bool isOtherOrEmptySpeculation(SpeculatedType value)
 {
     return !value || value == SpecOther;
@@ -345,6 +423,16 @@ inline bool isEmptySpeculation(SpeculatedType value)
     return value == SpecEmpty;
 }
 
+inline bool isUntypedSpeculationForArithmetic(SpeculatedType value)
+{
+    return !!(value & ~(SpecFullNumber | SpecBoolean));
+}
+
+inline bool isUntypedSpeculationForBitOps(SpeculatedType value)
+{
+    return !!(value & ~(SpecFullNumber | SpecBoolean | SpecOther));
+}
+
 void dumpSpeculation(PrintStream&, SpeculatedType);
 void dumpSpeculationAbbreviated(PrintStream&, SpeculatedType);
 
@@ -378,10 +466,30 @@ SpeculatedType speculationFromClassInfo(const ClassInfo*);
 SpeculatedType speculationFromStructure(Structure*);
 SpeculatedType speculationFromCell(JSCell*);
 SpeculatedType speculationFromValue(JSValue);
+SpeculatedType speculationFromJSType(JSType);
 
 SpeculatedType speculationFromTypedArrayType(TypedArrayType); // only valid for typed views.
 TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType);
 
-} // namespace JSC
+SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType);
+
+bool valuesCouldBeEqual(SpeculatedType, SpeculatedType);
 
-#endif // SpeculatedType_h
+// Precise computation of the type of the result of a double computation after we
+// already know that the inputs are doubles and that the result must be a double. Use
+// the closest one of these that applies.
+SpeculatedType typeOfDoubleSum(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleDifference(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleProduct(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleQuotient(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleMinMax(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleNegation(SpeculatedType);
+SpeculatedType typeOfDoubleAbs(SpeculatedType);
+SpeculatedType typeOfDoubleRounding(SpeculatedType);
+SpeculatedType typeOfDoublePow(SpeculatedType, SpeculatedType);
+
+// This conservatively models the behavior of arbitrary double operations.
+SpeculatedType typeOfDoubleBinaryOp(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleUnaryOp(SpeculatedType);
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/StructureSet.cpp b/Source/JavaScriptCore/bytecode/StructureSet.cpp
new file mode 100644
index 000000000..2ccb8f0ba
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/StructureSet.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "StructureSet.h"
+
+#include "TrackedReferences.h"
+#include 
+
+namespace JSC {
+
+void StructureSet::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    CommaPrinter comma;
+    out.print("[");
+    forEach([&] (Structure* structure) { out.print(comma, inContext(*structure, context)); });
+    out.print("]");
+}
+
+void StructureSet::dump(PrintStream& out) const
+{
+    dumpInContext(out, nullptr);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/StructureSet.h b/Source/JavaScriptCore/bytecode/StructureSet.h
index 4cdcd01cb..8654ca500 100644
--- a/Source/JavaScriptCore/bytecode/StructureSet.h
+++ b/Source/JavaScriptCore/bytecode/StructureSet.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,165 +23,46 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef StructureSet_h
-#define StructureSet_h
+#pragma once
 
 #include "ArrayProfile.h"
+#include "DumpContext.h"
 #include "SpeculatedType.h"
 #include "Structure.h"
-#include "DumpContext.h"
-#include 
-#include 
+#include 
 
 namespace JSC {
 
-namespace DFG {
-class StructureAbstractValue;
-}
+class TrackedReferences;
 
-class StructureSet {
+class StructureSet : public TinyPtrSet {
 public:
-    StructureSet() { }
-    
-    StructureSet(Structure* structure)
-    {
-        m_structures.append(structure);
-    }
-    
-    void clear()
-    {
-        m_structures.clear();
-    }
-    
-    void add(Structure* structure)
-    {
-        ASSERT(!contains(structure));
-        m_structures.append(structure);
-    }
-    
-    bool addAll(const StructureSet& other)
-    {
-        bool changed = false;
-        for (size_t i = 0; i < other.size(); ++i) {
-            if (contains(other[i]))
-                continue;
-            add(other[i]);
-            changed = true;
-        }
-        return changed;
-    }
+    // I really want to do this:
+    // using TinyPtrSet::TinyPtrSet;
+    //
+    // But I can't because Windows.
     
-    void remove(Structure* structure)
+    StructureSet()
     {
-        for (size_t i = 0; i < m_structures.size(); ++i) {
-            if (m_structures[i] != structure)
-                continue;
-            
-            m_structures[i] = m_structures.last();
-            m_structures.removeLast();
-            return;
-        }
     }
     
-    bool contains(Structure* structure) const
-    {
-        for (size_t i = 0; i < m_structures.size(); ++i) {
-            if (m_structures[i] == structure)
-                return true;
-        }
-        return false;
-    }
-    
-    bool containsOnly(Structure* structure) const
-    {
-        if (size() != 1)
-            return false;
-        return singletonStructure() == structure;
-    }
-    
-    bool isSubsetOf(const StructureSet& other) const
-    {
-        for (size_t i = 0; i < m_structures.size(); ++i) {
-            if (!other.contains(m_structures[i]))
-                return false;
-        }
-        return true;
-    }
-    
-    bool isSupersetOf(const StructureSet& other) const
-    {
-        return other.isSubsetOf(*this);
-    }
-    
-    size_t size() const { return m_structures.size(); }
-    
-    // Call this if you know that the structure set must consist of exactly
-    // one structure.
-    Structure* singletonStructure() const
-    {
-        ASSERT(m_structures.size() == 1);
-        return m_structures[0];
-    }
-    
-    Structure* at(size_t i) const { return m_structures.at(i); }
-    
-    Structure* operator[](size_t i) const { return at(i); }
-    
-    Structure* last() const { return m_structures.last(); }
-
-    SpeculatedType speculationFromStructures() const
-    {
-        SpeculatedType result = SpecNone;
-        
-        for (size_t i = 0; i < m_structures.size(); ++i)
-            mergeSpeculation(result, speculationFromStructure(m_structures[i]));
-        
-        return result;
-    }
-    
-    ArrayModes arrayModesFromStructures() const
-    {
-        ArrayModes result = 0;
-        
-        for (size_t i = 0; i < m_structures.size(); ++i)
-            mergeArrayModes(result, asArrayModes(m_structures[i]->indexingType()));
-        
-        return result;
-    }
-    
-    bool operator==(const StructureSet& other) const
+    StructureSet(Structure* structure)
+        : TinyPtrSet(structure)
     {
-        if (m_structures.size() != other.m_structures.size())
-            return false;
-        
-        for (size_t i = 0; i < m_structures.size(); ++i) {
-            if (!other.contains(m_structures[i]))
-                return false;
-        }
-        
-        return true;
     }
     
-    void dumpInContext(PrintStream& out, DumpContext* context) const
+    ALWAYS_INLINE StructureSet(const StructureSet& other)
+        : TinyPtrSet(other)
     {
-        CommaPrinter comma;
-        out.print("[");
-        for (size_t i = 0; i < m_structures.size(); ++i)
-            out.print(comma, inContext(*m_structures[i], context));
-        out.print("]");
     }
     
-    void dump(PrintStream& out) const
+    Structure* onlyStructure() const
     {
-        dumpInContext(out, 0);
+        return onlyEntry();
     }
     
-private:
-    friend class DFG::StructureAbstractValue;
-    
-    Vector m_structures;
+    void dumpInContext(PrintStream&, DumpContext*) const;
+    void dump(PrintStream&) const;
 };
 
 } // namespace JSC
-
-#endif // StructureSet_h
diff --git a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp
index 5cfb3d1e8..f27e507b7 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -29,49 +29,67 @@
 #if ENABLE(JIT)
 
 #include "CodeBlock.h"
+#include "JSCInlines.h"
 #include "StructureStubInfo.h"
 
 namespace JSC {
 
-StructureStubClearingWatchpoint::~StructureStubClearingWatchpoint() { }
+StructureStubClearingWatchpoint::~StructureStubClearingWatchpoint()
+{
+    for (auto current = WTFMove(m_next); current; current = WTFMove(current->m_next)) { }
+}
 
 StructureStubClearingWatchpoint* StructureStubClearingWatchpoint::push(
+    const ObjectPropertyCondition& key,
     WatchpointsOnStructureStubInfo& holder,
-    OwnPtr& head)
+    std::unique_ptr& head)
 {
-    head = adoptPtr(new StructureStubClearingWatchpoint(holder, head.release()));
+    head = std::make_unique(key, holder, WTFMove(head));
     return head.get();
 }
 
-void StructureStubClearingWatchpoint::fireInternal()
+void StructureStubClearingWatchpoint::fireInternal(const FireDetail&)
 {
-    // This will implicitly cause my own demise: stub reset removes all watchpoints.
-    // That works, because deleting a watchpoint removes it from the set's list, and
-    // the set's list traversal for firing is robust against the set changing.
-    m_holder.codeBlock()->resetStub(*m_holder.stubInfo());
+    if (!m_key || !m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+        // This will implicitly cause my own demise: stub reset removes all watchpoints.
+        // That works, because deleting a watchpoint removes it from the set's list, and
+        // the set's list traversal for firing is robust against the set changing.
+        ConcurrentJSLocker locker(m_holder.codeBlock()->m_lock);
+        m_holder.stubInfo()->reset(m_holder.codeBlock());
+        return;
+    }
+
+    if (m_key.kind() == PropertyCondition::Presence) {
+        // If this was a presence condition, let's watch the property for replacements. This is profitable
+        // for the DFG, which will want the replacement set to be valid in order to do constant folding.
+        VM& vm = *Heap::heap(m_key.object())->vm();
+        m_key.object()->structure()->startWatchingPropertyForReplacements(vm, m_key.offset());
+    }
+
+    m_key.object()->structure()->addTransitionWatchpoint(this);
 }
 
 WatchpointsOnStructureStubInfo::~WatchpointsOnStructureStubInfo()
 {
 }
 
-StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::addWatchpoint()
+StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::addWatchpoint(const ObjectPropertyCondition& key)
 {
-    return StructureStubClearingWatchpoint::push(*this, m_head);
+    return StructureStubClearingWatchpoint::push(key, *this, m_head);
 }
 
 StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
-    RefPtr& holderRef, CodeBlock* codeBlock,
-    StructureStubInfo* stubInfo)
+    std::unique_ptr& holderRef, CodeBlock* codeBlock,
+    StructureStubInfo* stubInfo, const ObjectPropertyCondition& key)
 {
     if (!holderRef)
-        holderRef = adoptRef(new WatchpointsOnStructureStubInfo(codeBlock, stubInfo));
+        holderRef = std::make_unique(codeBlock, stubInfo);
     else {
         ASSERT(holderRef->m_codeBlock == codeBlock);
         ASSERT(holderRef->m_stubInfo == stubInfo);
     }
     
-    return holderRef->addWatchpoint();
+    return holderRef->addWatchpoint(key);
 }
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h
index 4c6bdecf4..665c56a98 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h
+++ b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,60 +23,55 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef StructureStubClearingWatchpoint_h
-#define StructureStubClearingWatchpoint_h
+#pragma once
 
+#include "ObjectPropertyCondition.h"
 #include "Watchpoint.h"
-#include 
 
 #if ENABLE(JIT)
 
 #include 
 #include 
-#include 
-#include 
-#include 
-#include 
 
 namespace JSC {
 
 class CodeBlock;
+class StructureStubInfo;
 class WatchpointsOnStructureStubInfo;
-struct StructureStubInfo;
 
 class StructureStubClearingWatchpoint : public Watchpoint {
     WTF_MAKE_NONCOPYABLE(StructureStubClearingWatchpoint);
     WTF_MAKE_FAST_ALLOCATED;
 public:
     StructureStubClearingWatchpoint(
-        WatchpointsOnStructureStubInfo& holder)
-        : m_holder(holder)
-    {
-    }
-    
-    StructureStubClearingWatchpoint(
+        const ObjectPropertyCondition& key,
         WatchpointsOnStructureStubInfo& holder,
-        PassOwnPtr next)
-        : m_holder(holder)
-        , m_next(next)
+        std::unique_ptr next)
+        : m_key(key)
+        , m_holder(holder)
+        , m_next(WTFMove(next))
     {
     }
     
     virtual ~StructureStubClearingWatchpoint();
     
     static StructureStubClearingWatchpoint* push(
+        const ObjectPropertyCondition& key,
         WatchpointsOnStructureStubInfo& holder,
-        OwnPtr& head);
+        std::unique_ptr& head);
 
 protected:
-    virtual void fireInternal() override;
+    void fireInternal(const FireDetail&) override;
 
 private:
+    ObjectPropertyCondition m_key;
     WatchpointsOnStructureStubInfo& m_holder;
-    OwnPtr m_next;
+    std::unique_ptr m_next;
 };
 
-class WatchpointsOnStructureStubInfo : public RefCounted {
+class WatchpointsOnStructureStubInfo {
+    WTF_MAKE_NONCOPYABLE(WatchpointsOnStructureStubInfo);
+    WTF_MAKE_FAST_ALLOCATED;
 public:
     WatchpointsOnStructureStubInfo(CodeBlock* codeBlock, StructureStubInfo* stubInfo)
         : m_codeBlock(codeBlock)
@@ -86,11 +81,11 @@ public:
     
     ~WatchpointsOnStructureStubInfo();
     
-    StructureStubClearingWatchpoint* addWatchpoint();
+    StructureStubClearingWatchpoint* addWatchpoint(const ObjectPropertyCondition& key);
     
     static StructureStubClearingWatchpoint* ensureReferenceAndAddWatchpoint(
-        RefPtr& holderRef,
-        CodeBlock*, StructureStubInfo*);
+        std::unique_ptr& holderRef,
+        CodeBlock*, StructureStubInfo*, const ObjectPropertyCondition& key);
     
     CodeBlock* codeBlock() const { return m_codeBlock; }
     StructureStubInfo* stubInfo() const { return m_stubInfo; }
@@ -98,12 +93,9 @@ public:
 private:
     CodeBlock* m_codeBlock;
     StructureStubInfo* m_stubInfo;
-    OwnPtr m_head;
+    std::unique_ptr m_head;
 };
 
 } // namespace JSC
 
 #endif // ENABLE(JIT)
-
-#endif // StructureStubClearingWatchpoint_h
-
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
index 91413dfbf..70b767c57 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2014-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -27,108 +27,264 @@
 #include "StructureStubInfo.h"
 
 #include "JSObject.h"
-#include "PolymorphicPutByIdList.h"
-
+#include "JSCInlines.h"
+#include "PolymorphicAccess.h"
+#include "Repatch.h"
 
 namespace JSC {
 
 #if ENABLE(JIT)
+
+static const bool verbose = false;
+
+StructureStubInfo::StructureStubInfo(AccessType accessType)
+    : callSiteIndex(UINT_MAX)
+    , accessType(accessType)
+    , cacheType(CacheType::Unset)
+    , countdown(1) // For a totally clear stub, we'll patch it after the first execution.
+    , repatchCount(0)
+    , numberOfCoolDowns(0)
+    , bufferingCountdown(Options::repatchBufferingCountdown())
+    , resetByGC(false)
+    , tookSlowPath(false)
+    , everConsidered(false)
+{
+}
+
+StructureStubInfo::~StructureStubInfo()
+{
+}
+
+void StructureStubInfo::initGetByIdSelf(CodeBlock* codeBlock, Structure* baseObjectStructure, PropertyOffset offset)
+{
+    cacheType = CacheType::GetByIdSelf;
+    
+    u.byIdSelf.baseObjectStructure.set(
+        *codeBlock->vm(), codeBlock, baseObjectStructure);
+    u.byIdSelf.offset = offset;
+}
+
+void StructureStubInfo::initArrayLength()
+{
+    cacheType = CacheType::ArrayLength;
+}
+
+void StructureStubInfo::initPutByIdReplace(CodeBlock* codeBlock, Structure* baseObjectStructure, PropertyOffset offset)
+{
+    cacheType = CacheType::PutByIdReplace;
+    
+    u.byIdSelf.baseObjectStructure.set(
+        *codeBlock->vm(), codeBlock, baseObjectStructure);
+    u.byIdSelf.offset = offset;
+}
+
+void StructureStubInfo::initStub(CodeBlock*, std::unique_ptr stub)
+{
+    cacheType = CacheType::Stub;
+    u.stub = stub.release();
+}
+
 void StructureStubInfo::deref()
 {
-    switch (accessType) {
-    case access_get_by_id_self_list: {
-        PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList;
-        delete polymorphicStructures;
+    switch (cacheType) {
+    case CacheType::Stub:
+        delete u.stub;
         return;
-    }
-    case access_get_by_id_proto_list: {
-        PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList;
-        delete polymorphicStructures;
+    case CacheType::Unset:
+    case CacheType::GetByIdSelf:
+    case CacheType::PutByIdReplace:
+    case CacheType::ArrayLength:
         return;
     }
-    case access_put_by_id_list:
-        delete u.putByIdList.list;
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void StructureStubInfo::aboutToDie()
+{
+    switch (cacheType) {
+    case CacheType::Stub:
+        u.stub->aboutToDie();
         return;
-    case access_in_list: {
-        PolymorphicAccessStructureList* polymorphicStructures = u.inList.structureList;
-        delete polymorphicStructures;
+    case CacheType::Unset:
+    case CacheType::GetByIdSelf:
+    case CacheType::PutByIdReplace:
+    case CacheType::ArrayLength:
         return;
     }
-    case access_get_by_id_self:
-    case access_get_by_id_proto:
-    case access_get_by_id_chain:
-    case access_put_by_id_transition_normal:
-    case access_put_by_id_transition_direct:
-    case access_put_by_id_replace:
-    case access_unset:
-    case access_get_by_id_generic:
-    case access_put_by_id_generic:
-    case access_get_array_length:
-    case access_get_string_length:
-        // These instructions don't have to release any allocated memory
-        return;
-    default:
-        RELEASE_ASSERT_NOT_REACHED();
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+AccessGenerationResult StructureStubInfo::addAccessCase(
+    CodeBlock* codeBlock, const Identifier& ident, std::unique_ptr accessCase)
+{
+    VM& vm = *codeBlock->vm();
+    
+    if (verbose)
+        dataLog("Adding access case: ", accessCase, "\n");
+    
+    if (!accessCase)
+        return AccessGenerationResult::GaveUp;
+    
+    AccessGenerationResult result;
+    
+    if (cacheType == CacheType::Stub) {
+        result = u.stub->addCase(vm, codeBlock, *this, ident, WTFMove(accessCase));
+        
+        if (verbose)
+            dataLog("Had stub, result: ", result, "\n");
+
+        if (!result.buffered()) {
+            bufferedStructures.clear();
+            return result;
+        }
+    } else {
+        std::unique_ptr access = std::make_unique();
+        
+        Vector, 2> accessCases;
+        
+        std::unique_ptr previousCase =
+            AccessCase::fromStructureStubInfo(vm, codeBlock, *this);
+        if (previousCase)
+            accessCases.append(WTFMove(previousCase));
+        
+        accessCases.append(WTFMove(accessCase));
+        
+        result = access->addCases(vm, codeBlock, *this, ident, WTFMove(accessCases));
+        
+        if (verbose)
+            dataLog("Created stub, result: ", result, "\n");
+
+        if (!result.buffered()) {
+            bufferedStructures.clear();
+            return result;
+        }
+        
+        initStub(codeBlock, WTFMove(access));
+    }
+    
+    RELEASE_ASSERT(!result.generatedSomeCode());
+    
+    // If we didn't buffer any cases then bail. If this made no changes then we'll just try again
+    // subject to cool-down.
+    if (!result.buffered()) {
+        if (verbose)
+            dataLog("Didn't buffer anything, bailing.\n");
+        bufferedStructures.clear();
+        return result;
     }
+    
+    // The buffering countdown tells us if we should be repatching now.
+    if (bufferingCountdown) {
+        if (verbose)
+            dataLog("Countdown is too high: ", bufferingCountdown, ".\n");
+        return result;
+    }
+    
+    // Forget the buffered structures so that all future attempts to cache get fully handled by the
+    // PolymorphicAccess.
+    bufferedStructures.clear();
+    
+    result = u.stub->regenerate(vm, codeBlock, *this, ident);
+    
+    if (verbose)
+        dataLog("Regeneration result: ", result, "\n");
+    
+    RELEASE_ASSERT(!result.buffered());
+    
+    if (!result.generatedSomeCode())
+        return result;
+    
+    // If we generated some code then we don't want to attempt to repatch in the future until we
+    // gather enough cases.
+    bufferingCountdown = Options::repatchBufferingCountdown();
+    return result;
 }
 
-bool StructureStubInfo::visitWeakReferences()
+void StructureStubInfo::reset(CodeBlock* codeBlock)
 {
+    bufferedStructures.clear();
+    
+    if (cacheType == CacheType::Unset)
+        return;
+    
+    if (Options::verboseOSR()) {
+        // This can be called from GC destructor calls, so we don't try to do a full dump
+        // of the CodeBlock.
+        dataLog("Clearing structure cache (kind ", static_cast(accessType), ") in ", RawPointer(codeBlock), ".\n");
+    }
+
     switch (accessType) {
-    case access_get_by_id_self:
-        if (!Heap::isMarked(u.getByIdSelf.baseObjectStructure.get()))
-            return false;
+    case AccessType::TryGet:
+        resetGetByID(codeBlock, *this, GetByIDKind::Try);
         break;
-    case access_get_by_id_proto:
-        if (!Heap::isMarked(u.getByIdProto.baseObjectStructure.get())
-            || !Heap::isMarked(u.getByIdProto.prototypeStructure.get()))
-            return false;
+    case AccessType::Get:
+        resetGetByID(codeBlock, *this, GetByIDKind::Normal);
         break;
-    case access_get_by_id_chain:
-        if (!Heap::isMarked(u.getByIdChain.baseObjectStructure.get())
-            || !Heap::isMarked(u.getByIdChain.chain.get()))
-            return false;
+    case AccessType::Put:
+        resetPutByID(codeBlock, *this);
         break;
-    case access_get_by_id_self_list: {
-        PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList;
-        if (!polymorphicStructures->visitWeak(u.getByIdSelfList.listSize))
-            return false;
-        break;
-    }
-    case access_get_by_id_proto_list: {
-        PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList;
-        if (!polymorphicStructures->visitWeak(u.getByIdProtoList.listSize))
-            return false;
+    case AccessType::In:
+        resetIn(codeBlock, *this);
         break;
     }
-    case access_put_by_id_transition_normal:
-    case access_put_by_id_transition_direct:
-        if (!Heap::isMarked(u.putByIdTransition.previousStructure.get())
-            || !Heap::isMarked(u.putByIdTransition.structure.get())
-            || !Heap::isMarked(u.putByIdTransition.chain.get()))
-            return false;
-        break;
-    case access_put_by_id_replace:
-        if (!Heap::isMarked(u.putByIdReplace.baseObjectStructure.get()))
-            return false;
-        break;
-    case access_put_by_id_list:
-        if (!u.putByIdList.list->visitWeak())
-            return false;
+    
+    deref();
+    cacheType = CacheType::Unset;
+}
+
+void StructureStubInfo::visitWeakReferences(CodeBlock* codeBlock)
+{
+    VM& vm = *codeBlock->vm();
+    
+    bufferedStructures.genericFilter(
+        [&] (Structure* structure) -> bool {
+            return Heap::isMarked(structure);
+        });
+
+    switch (cacheType) {
+    case CacheType::GetByIdSelf:
+    case CacheType::PutByIdReplace:
+        if (Heap::isMarked(u.byIdSelf.baseObjectStructure.get()))
+            return;
         break;
-    case access_in_list: {
-        PolymorphicAccessStructureList* polymorphicStructures = u.inList.structureList;
-        if (!polymorphicStructures->visitWeak(u.inList.listSize))
-            return false;
+    case CacheType::Stub:
+        if (u.stub->visitWeak(vm))
+            return;
         break;
-    }
     default:
-        // The rest of the instructions don't require references, so there is no need to
-        // do anything.
-        break;
+        return;
     }
+
+    reset(codeBlock);
+    resetByGC = true;
+}
+
+bool StructureStubInfo::propagateTransitions(SlotVisitor& visitor)
+{
+    switch (cacheType) {
+    case CacheType::Unset:
+    case CacheType::ArrayLength:
+        return true;
+    case CacheType::GetByIdSelf:
+    case CacheType::PutByIdReplace:
+        return u.byIdSelf.baseObjectStructure->markIfCheap(visitor);
+    case CacheType::Stub:
+        return u.stub->propagateTransitions(visitor);
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
     return true;
 }
-#endif
+
+bool StructureStubInfo::containsPC(void* pc) const
+{
+    if (cacheType != CacheType::Stub)
+        return false;
+    return u.stub->containsPC(pc);
+}
+
+#endif // ENABLE(JIT)
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
index 5463f3e95..b091e2157 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.h
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,266 +23,194 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef StructureStubInfo_h
-#define StructureStubInfo_h
-
-#include 
+#pragma once
 
+#include "CodeBlock.h"
 #include "CodeOrigin.h"
 #include "Instruction.h"
 #include "JITStubRoutine.h"
 #include "MacroAssembler.h"
-#include "Opcode.h"
-#include "PolymorphicAccessStructureList.h"
+#include "ObjectPropertyConditionSet.h"
+#include "Options.h"
 #include "RegisterSet.h"
 #include "Structure.h"
+#include "StructureSet.h"
 #include "StructureStubClearingWatchpoint.h"
-#include 
 
 namespace JSC {
 
 #if ENABLE(JIT)
 
-class PolymorphicPutByIdList;
+class AccessCase;
+class AccessGenerationResult;
+class PolymorphicAccess;
 
-enum AccessType {
-    access_get_by_id_self,
-    access_get_by_id_proto,
-    access_get_by_id_chain,
-    access_get_by_id_self_list,
-    access_get_by_id_proto_list,
-    access_put_by_id_transition_normal,
-    access_put_by_id_transition_direct,
-    access_put_by_id_replace,
-    access_put_by_id_list,
-    access_unset,
-    access_get_by_id_generic,
-    access_put_by_id_generic,
-    access_get_array_length,
-    access_get_string_length,
-    access_in_list
+enum class AccessType : int8_t {
+    Get,
+    TryGet,
+    Put,
+    In
 };
 
-inline bool isGetByIdAccess(AccessType accessType)
-{
-    switch (accessType) {
-    case access_get_by_id_self:
-    case access_get_by_id_proto:
-    case access_get_by_id_chain:
-    case access_get_by_id_self_list:
-    case access_get_by_id_proto_list:
-    case access_get_by_id_generic:
-    case access_get_array_length:
-    case access_get_string_length:
-        return true;
-    default:
-        return false;
-    }
-}
-    
-inline bool isPutByIdAccess(AccessType accessType)
-{
-    switch (accessType) {
-    case access_put_by_id_transition_normal:
-    case access_put_by_id_transition_direct:
-    case access_put_by_id_replace:
-    case access_put_by_id_list:
-    case access_put_by_id_generic:
-        return true;
-    default:
-        return false;
-    }
-}
-
-inline bool isInAccess(AccessType accessType)
-{
-    switch (accessType) {
-    case access_in_list:
-        return true;
-    default:
-        return false;
-    }
-}
-
-struct StructureStubInfo {
-    StructureStubInfo()
-        : accessType(access_unset)
-        , seen(false)
-        , resetByGC(false)
-    {
-    }
-
-    void initGetByIdSelf(VM& vm, JSCell* owner, Structure* baseObjectStructure)
-    {
-        accessType = access_get_by_id_self;
-
-        u.getByIdSelf.baseObjectStructure.set(vm, owner, baseObjectStructure);
-    }
-
-    void initGetByIdChain(VM& vm, JSCell* owner, Structure* baseObjectStructure, StructureChain* chain, unsigned count, bool isDirect)
-    {
-        accessType = access_get_by_id_chain;
-
-        u.getByIdChain.baseObjectStructure.set(vm, owner, baseObjectStructure);
-        u.getByIdChain.chain.set(vm, owner, chain);
-        u.getByIdChain.count = count;
-        u.getByIdChain.isDirect = isDirect;
-    }
-
-    void initGetByIdSelfList(PolymorphicAccessStructureList* structureList, int listSize, bool didSelfPatching = false)
-    {
-        accessType = access_get_by_id_self_list;
-
-        u.getByIdSelfList.structureList = structureList;
-        u.getByIdSelfList.listSize = listSize;
-        u.getByIdSelfList.didSelfPatching = didSelfPatching;
-    }
+enum class CacheType : int8_t {
+    Unset,
+    GetByIdSelf,
+    PutByIdReplace,
+    Stub,
+    ArrayLength
+};
 
-    void initGetByIdProtoList(PolymorphicAccessStructureList* structureList, int listSize)
-    {
-        accessType = access_get_by_id_proto_list;
+class StructureStubInfo {
+    WTF_MAKE_NONCOPYABLE(StructureStubInfo);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    StructureStubInfo(AccessType);
+    ~StructureStubInfo();
 
-        u.getByIdProtoList.structureList = structureList;
-        u.getByIdProtoList.listSize = listSize;
-    }
+    void initGetByIdSelf(CodeBlock*, Structure* baseObjectStructure, PropertyOffset);
+    void initArrayLength();
+    void initPutByIdReplace(CodeBlock*, Structure* baseObjectStructure, PropertyOffset);
+    void initStub(CodeBlock*, std::unique_ptr);
 
-    // PutById*
+    AccessGenerationResult addAccessCase(CodeBlock*, const Identifier&, std::unique_ptr);
 
-    void initPutByIdTransition(VM& vm, JSCell* owner, Structure* previousStructure, Structure* structure, StructureChain* chain, bool isDirect)
-    {
-        if (isDirect)
-            accessType = access_put_by_id_transition_direct;
-        else
-            accessType = access_put_by_id_transition_normal;
+    void reset(CodeBlock*);
 
-        u.putByIdTransition.previousStructure.set(vm, owner, previousStructure);
-        u.putByIdTransition.structure.set(vm, owner, structure);
-        u.putByIdTransition.chain.set(vm, owner, chain);
-    }
+    void deref();
+    void aboutToDie();
 
-    void initPutByIdReplace(VM& vm, JSCell* owner, Structure* baseObjectStructure)
-    {
-        accessType = access_put_by_id_replace;
+    // Check if the stub has weak references that are dead. If it does, then it resets itself,
+    // either entirely or just enough to ensure that those dead pointers don't get used anymore.
+    void visitWeakReferences(CodeBlock*);
     
-        u.putByIdReplace.baseObjectStructure.set(vm, owner, baseObjectStructure);
-    }
+    // This returns true if it has marked everything that it will ever mark.
+    bool propagateTransitions(SlotVisitor&);
         
-    void initPutByIdList(PolymorphicPutByIdList* list)
-    {
-        accessType = access_put_by_id_list;
-        u.putByIdList.list = list;
-    }
-    
-    void initInList(PolymorphicAccessStructureList* list, int listSize)
+    ALWAYS_INLINE bool considerCaching(CodeBlock* codeBlock, Structure* structure)
     {
-        accessType = access_in_list;
-        u.inList.structureList = list;
-        u.inList.listSize = listSize;
-    }
+        // We never cache non-cells.
+        if (!structure)
+            return false;
         
-    void reset()
-    {
-        deref();
-        accessType = access_unset;
-        stubRoutine.clear();
-        watchpoints.clear();
-    }
-
-    void deref();
-
-    bool visitWeakReferences();
+        // This method is called from the Optimize variants of IC slow paths. The first part of this
+        // method tries to determine if the Optimize variant should really behave like the
+        // non-Optimize variant and leave the IC untouched.
+        //
+        // If we determine that we should do something to the IC then the next order of business is
+        // to determine if this Structure would impact the IC at all. We know that it won't, if we
+        // have already buffered something on its behalf. That's what the bufferedStructures set is
+        // for.
         
-    bool seenOnce()
-    {
-        return seen;
+        everConsidered = true;
+        if (!countdown) {
+            // Check if we have been doing repatching too frequently. If so, then we should cool off
+            // for a while.
+            WTF::incrementWithSaturation(repatchCount);
+            if (repatchCount > Options::repatchCountForCoolDown()) {
+                // We've been repatching too much, so don't do it now.
+                repatchCount = 0;
+                // The amount of time we require for cool-down depends on the number of times we've
+                // had to cool down in the past. The relationship is exponential. The max value we
+                // allow here is 2^256 - 2, since the slow paths may increment the count to indicate
+                // that they'd like to temporarily skip patching just this once.
+                countdown = WTF::leftShiftWithSaturation(
+                    static_cast(Options::initialCoolDownCount()),
+                    numberOfCoolDowns,
+                    static_cast(std::numeric_limits::max() - 1));
+                WTF::incrementWithSaturation(numberOfCoolDowns);
+                
+                // We may still have had something buffered. Trigger generation now.
+                bufferingCountdown = 0;
+                return true;
+            }
+            
+            // We don't want to return false due to buffering indefinitely.
+            if (!bufferingCountdown) {
+                // Note that when this returns true, it's possible that we will not even get an
+                // AccessCase because this may cause Repatch.cpp to simply do an in-place
+                // repatching.
+                return true;
+            }
+            
+            bufferingCountdown--;
+            
+            // Now protect the IC buffering. We want to proceed only if this is a structure that
+            // we don't already have a case buffered for. Note that if this returns true but the
+            // bufferingCountdown is not zero then we will buffer the access case for later without
+            // immediately generating code for it.
+            bool isNewlyAdded = bufferedStructures.add(structure);
+            if (isNewlyAdded) {
+                VM& vm = *codeBlock->vm();
+                vm.heap.writeBarrier(codeBlock);
+            }
+            return isNewlyAdded;
+        }
+        countdown--;
+        return false;
     }
 
-    void setSeen()
-    {
-        seen = true;
-    }
-        
-    StructureStubClearingWatchpoint* addWatchpoint(CodeBlock* codeBlock)
-    {
-        return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
-            watchpoints, codeBlock, this);
-    }
-    
-    int8_t accessType;
-    bool seen : 1;
-    bool resetByGC : 1;
+    bool containsPC(void* pc) const;
 
     CodeOrigin codeOrigin;
+    CallSiteIndex callSiteIndex;
 
+    union {
+        struct {
+            WriteBarrierBase baseObjectStructure;
+            PropertyOffset offset;
+        } byIdSelf;
+        PolymorphicAccess* stub;
+    } u;
+    
+    // Represents those structures that already have buffered AccessCases in the PolymorphicAccess.
+    // Note that it's always safe to clear this. If we clear it prematurely, then if we see the same
+    // structure again during this buffering countdown, we will create an AccessCase object for it.
+    // That's not so bad - we'll get rid of the redundant ones once we regenerate.
+    StructureSet bufferedStructures;
+    
     struct {
-        int8_t registersFlushed;
-        int8_t callFrameRegister;
+        CodeLocationLabel start; // This is either the start of the inline IC for *byId caches, or the location of patchable jump for 'in' caches.
+        RegisterSet usedRegisters;
+        uint32_t inlineSize;
+        int32_t deltaFromStartToSlowPathCallLocation;
+        int32_t deltaFromStartToSlowPathStart;
+
         int8_t baseGPR;
+        int8_t valueGPR;
 #if USE(JSVALUE32_64)
         int8_t valueTagGPR;
-#endif
-        int8_t valueGPR;
-        RegisterSet usedRegisters;
-        int32_t deltaCallToDone;
-        int32_t deltaCallToStorageLoad;
-        int32_t deltaCallToJump;
-        int32_t deltaCallToSlowCase;
-        int32_t deltaCheckImmToCall;
-#if USE(JSVALUE64)
-        int32_t deltaCallToLoadOrStore;
-#else
-        int32_t deltaCallToTagLoadOrStore;
-        int32_t deltaCallToPayloadLoadOrStore;
+        int8_t baseTagGPR;
 #endif
     } patch;
 
-    union {
-        struct {
-            // It would be unwise to put anything here, as it will surely be overwritten.
-        } unset;
-        struct {
-            WriteBarrierBase baseObjectStructure;
-        } getByIdSelf;
-        struct {
-            WriteBarrierBase baseObjectStructure;
-            WriteBarrierBase prototypeStructure;
-            bool isDirect;
-        } getByIdProto;
-        struct {
-            WriteBarrierBase baseObjectStructure;
-            WriteBarrierBase chain;
-            unsigned count : 31;
-            bool isDirect : 1;
-        } getByIdChain;
-        struct {
-            PolymorphicAccessStructureList* structureList;
-            int listSize : 31;
-            bool didSelfPatching : 1;
-        } getByIdSelfList;
-        struct {
-            PolymorphicAccessStructureList* structureList;
-            int listSize;
-        } getByIdProtoList;
-        struct {
-            WriteBarrierBase previousStructure;
-            WriteBarrierBase structure;
-            WriteBarrierBase chain;
-        } putByIdTransition;
-        struct {
-            WriteBarrierBase baseObjectStructure;
-        } putByIdReplace;
-        struct {
-            PolymorphicPutByIdList* list;
-        } putByIdList;
-        struct {
-            PolymorphicAccessStructureList* structureList;
-            int listSize;
-        } inList;
-    } u;
+    CodeLocationCall slowPathCallLocation() { return patch.start.callAtOffset(patch.deltaFromStartToSlowPathCallLocation); }
+    CodeLocationLabel doneLocation() { return patch.start.labelAtOffset(patch.inlineSize); }
+    CodeLocationLabel slowPathStartLocation() { return patch.start.labelAtOffset(patch.deltaFromStartToSlowPathStart); }
+    CodeLocationJump patchableJumpForIn()
+    { 
+        ASSERT(accessType == AccessType::In);
+        return patch.start.jumpAtOffset(0);
+    }
+
+    JSValueRegs valueRegs() const
+    {
+        return JSValueRegs(
+#if USE(JSVALUE32_64)
+            static_cast(patch.valueTagGPR),
+#endif
+            static_cast(patch.valueGPR));
+    }
+
 
-    RefPtr stubRoutine;
-    CodeLocationCall callReturnLocation;
-    RefPtr watchpoints;
+    AccessType accessType;
+    CacheType cacheType;
+    uint8_t countdown; // We repatch only when this is zero. If not zero, we decrement.
+    uint8_t repatchCount;
+    uint8_t numberOfCoolDowns;
+    uint8_t bufferingCountdown;
+    bool resetByGC : 1;
+    bool tookSlowPath : 1;
+    bool everConsidered : 1;
 };
 
 inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStubInfo)
@@ -290,14 +218,12 @@ inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStu
     return structureStubInfo.codeOrigin;
 }
 
-typedef HashMap StubInfoMap;
-
 #else
 
-typedef HashMap StubInfoMap;
+class StructureStubInfo;
 
 #endif // ENABLE(JIT)
 
-} // namespace JSC
+typedef HashMap StubInfoMap;
 
-#endif // StructureStubInfo_h
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/SuperSampler.cpp b/Source/JavaScriptCore/bytecode/SuperSampler.cpp
new file mode 100644
index 000000000..a4e21f9fa
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/SuperSampler.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "SuperSampler.h"
+
+#include "MacroAssembler.h"
+#include "Options.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+volatile uint32_t g_superSamplerCount;
+
+static StaticLock lock;
+static double in;
+static double out;
+
+void initializeSuperSampler()
+{
+    if (!Options::useSuperSampler())
+        return;
+
+    createThread(
+        "JSC Super Sampler",
+        [] () {
+            const int sleepQuantum = 10;
+            const int printingPeriod = 1000;
+            for (;;) {
+                for (int ms = 0; ms < printingPeriod; ms += sleepQuantum) {
+                    {
+                        LockHolder locker(lock);
+                        if (g_superSamplerCount)
+                            in++;
+                        else
+                            out++;
+                    }
+                    sleepMS(sleepQuantum);
+                }
+                printSuperSamplerState();
+                if (static_cast(g_superSamplerCount) < 0)
+                    dataLog("WARNING: Super sampler undercount detected!\n");
+            }
+        });
+}
+
+void resetSuperSamplerState()
+{
+    LockHolder locker(lock);
+    in = 0;
+    out = 0;
+}
+
+void printSuperSamplerState()
+{
+    if (!Options::useSuperSampler())
+        return;
+
+    LockHolder locker(lock);
+    double percentage = 100.0 * in / (in + out);
+    if (percentage != percentage)
+        percentage = 0.0;
+    dataLog("Percent time behind super sampler flag: ", percentage, "\n");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/SuperSampler.h b/Source/JavaScriptCore/bytecode/SuperSampler.h
new file mode 100644
index 000000000..c90f6d43a
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/SuperSampler.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+namespace JSC {
+
+class MacroAssembler;
+
+extern volatile uint32_t g_superSamplerCount;
+
+void initializeSuperSampler();
+
+class SuperSamplerScope {
+public:
+    SuperSamplerScope(bool doSample = true)
+        : m_doSample(doSample)
+    {
+        if (m_doSample)
+            g_superSamplerCount++;
+    }
+
+    ~SuperSamplerScope()
+    {
+        if (m_doSample)
+            g_superSamplerCount--;
+    }
+
+private:
+    bool m_doSample;
+};
+
+JS_EXPORT_PRIVATE void resetSuperSamplerState();
+JS_EXPORT_PRIVATE void printSuperSamplerState();
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ToThisStatus.cpp b/Source/JavaScriptCore/bytecode/ToThisStatus.cpp
new file mode 100644
index 000000000..23d1e0800
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ToThisStatus.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ToThisStatus.h"
+
+namespace JSC {
+
+ToThisStatus merge(ToThisStatus a, ToThisStatus b)
+{
+    switch (a) {
+    case ToThisOK:
+        return b;
+    case ToThisConflicted:
+        return ToThisConflicted;
+    case ToThisClearedByGC:
+        if (b == ToThisConflicted)
+            return ToThisConflicted;
+        return ToThisClearedByGC;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+    return ToThisConflicted;
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, ToThisStatus status)
+{
+    switch (status) {
+    case ToThisOK:
+        out.print("OK");
+        return;
+    case ToThisConflicted:
+        out.print("Conflicted");
+        return;
+    case ToThisClearedByGC:
+        out.print("ClearedByGC");
+        return;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/ToThisStatus.h b/Source/JavaScriptCore/bytecode/ToThisStatus.h
new file mode 100644
index 000000000..ded012ae7
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ToThisStatus.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include 
+
+namespace JSC {
+
+enum ToThisStatus {
+    ToThisOK,
+    ToThisConflicted,
+    ToThisClearedByGC
+};
+
+ToThisStatus merge(ToThisStatus, ToThisStatus);
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::ToThisStatus);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/TrackedReferences.cpp b/Source/JavaScriptCore/bytecode/TrackedReferences.cpp
new file mode 100644
index 000000000..ae213d54b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/TrackedReferences.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "TrackedReferences.h"
+
+#include "JSCInlines.h"
+#include 
+
+namespace JSC {
+
+TrackedReferences::TrackedReferences()
+{
+}
+
+TrackedReferences::~TrackedReferences()
+{
+}
+
+void TrackedReferences::add(JSCell* cell)
+{
+    if (cell)
+        m_references.add(cell);
+}
+
+void TrackedReferences::add(JSValue value)
+{
+    if (value.isCell())
+        add(value.asCell());
+}
+
+void TrackedReferences::check(JSCell* cell) const
+{
+    if (!cell)
+        return;
+    
+    if (m_references.contains(cell))
+        return;
+    
+    dataLog("Found untracked reference: ", JSValue(cell), "\n");
+    dataLog("All tracked references: ", *this, "\n");
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void TrackedReferences::check(JSValue value) const
+{
+    if (value.isCell())
+        check(value.asCell());
+}
+
+void TrackedReferences::dump(PrintStream& out) const
+{
+    CommaPrinter comma;
+    for (JSCell* cell : m_references)
+        out.print(comma, RawPointer(cell));
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/TrackedReferences.h b/Source/JavaScriptCore/bytecode/TrackedReferences.h
new file mode 100644
index 000000000..a1021675c
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/TrackedReferences.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JSCJSValue.h"
+#include "JSCell.h"
+#include 
+#include 
+
+namespace JSC {
+
+class TrackedReferences {
+public:
+    TrackedReferences();
+    ~TrackedReferences();
+    
+    void add(JSCell*);
+    void add(JSValue);
+    
+    void check(JSCell*) const;
+    void check(JSValue) const;
+    
+    void dump(PrintStream&) const;
+    
+private:
+    HashSet m_references;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/TypeLocation.h b/Source/JavaScriptCore/bytecode/TypeLocation.h
new file mode 100644
index 000000000..bc75923fd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/TypeLocation.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "TypeSet.h"
+
+namespace JSC {
+
+enum TypeProfilerGlobalIDFlags {
+    TypeProfilerNeedsUniqueIDGeneration = -1,
+    TypeProfilerNoGlobalIDExists = -2,
+    TypeProfilerReturnStatement = -3
+};
+
+typedef intptr_t GlobalVariableID;
+
+class TypeLocation {
+public:
+    TypeLocation()
+        : m_lastSeenType(TypeNothing)
+        , m_divotForFunctionOffsetIfReturnStatement(UINT_MAX)
+        , m_instructionTypeSet(TypeSet::create())
+        , m_globalTypeSet(nullptr)
+    {
+    }
+
+    GlobalVariableID m_globalVariableID;
+    RuntimeType m_lastSeenType;
+    intptr_t m_sourceID;
+    unsigned m_divotStart;
+    unsigned m_divotEnd;
+    unsigned m_divotForFunctionOffsetIfReturnStatement;
+    RefPtr m_instructionTypeSet;
+    RefPtr m_globalTypeSet;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
index 1dfb5ac6a..53defbfb3 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,191 +28,52 @@
 #include "UnlinkedCodeBlock.h"
 
 #include "BytecodeGenerator.h"
+#include "BytecodeRewriter.h"
 #include "ClassInfo.h"
 #include "CodeCache.h"
-#include "Executable.h"
+#include "ExecutableInfo.h"
+#include "FunctionOverrides.h"
+#include "JSCInlines.h"
 #include "JSString.h"
-#include "Operations.h"
 #include "Parser.h"
+#include "PreciseJumpTargetsInlines.h"
 #include "SourceProvider.h"
 #include "Structure.h"
 #include "SymbolTable.h"
+#include "UnlinkedEvalCodeBlock.h"
+#include "UnlinkedFunctionCodeBlock.h"
 #include "UnlinkedInstructionStream.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+#include "UnlinkedProgramCodeBlock.h"
 #include 
 
 namespace JSC {
 
-const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) };
-const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) };
-const ClassInfo UnlinkedGlobalCodeBlock::s_info = { "UnlinkedGlobalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedGlobalCodeBlock) };
-const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) };
-const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) };
-const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) };
+const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) };
 
-static UnlinkedFunctionCodeBlock* generateFunctionCodeBlock(VM& vm, UnlinkedFunctionExecutable* executable, const SourceCode& source, CodeSpecializationKind kind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error)
-{
-    RefPtr body = parse(&vm, source, executable->parameters(), executable->name(), executable->isInStrictContext() ? JSParseStrict : JSParseNormal, JSParseFunctionCode, error);
-
-    if (!body) {
-        ASSERT(error.m_type != ParserError::ErrorNone);
-        return 0;
-    }
-
-    if (executable->forceUsesArguments())
-        body->setUsesArguments();
-    body->finishParsing(executable->parameters(), executable->name(), executable->functionNameIsInScopeToggle());
-    executable->recordParse(body->features(), body->hasCapturedVariables());
-    
-    UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode, ExecutableInfo(body->needsActivation(), body->usesEval(), body->isStrictMode(), kind == CodeForConstruct));
-    OwnPtr generator(adoptPtr(new BytecodeGenerator(vm, body.get(), result, debuggerMode, profilerMode)));
-    error = generator->generate();
-    body->destroyData();
-    if (error.m_type != ParserError::ErrorNone)
-        return 0;
-    return result;
-}
-
-unsigned UnlinkedCodeBlock::addOrFindConstant(JSValue v)
-{
-    unsigned numberOfConstants = numberOfConstantRegisters();
-    for (unsigned i = 0; i < numberOfConstants; ++i) {
-        if (getConstant(FirstConstantRegisterIndex + i) == v)
-            return i;
-    }
-    return addConstant(v);
-}
-
-UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& source, FunctionBodyNode* node, bool isFromGlobalCode)
-    : Base(*vm, structure)
-    , m_numCapturedVariables(node->capturedVariableCount())
-    , m_forceUsesArguments(node->usesArguments())
-    , m_isInStrictContext(node->isStrictMode())
-    , m_hasCapturedVariables(node->hasCapturedVariables())
-    , m_isFromGlobalCode(isFromGlobalCode)
-    , m_name(node->ident())
-    , m_inferredName(node->inferredName())
-    , m_parameters(node->parameters())
-    , m_firstLineOffset(node->firstLine() - source.firstLine())
-    , m_lineCount(node->lastLine() - node->firstLine())
-    , m_unlinkedFunctionNameStart(node->functionNameStart() - source.startOffset())
-    , m_unlinkedBodyStartColumn(node->startColumn())
-    , m_unlinkedBodyEndColumn(m_lineCount ? node->endColumn() : node->endColumn() - node->startColumn())
-    , m_startOffset(node->source().startOffset() - source.startOffset())
-    , m_sourceLength(node->source().length())
-    , m_features(node->features())
-    , m_functionNameIsInScopeToggle(node->functionNameIsInScopeToggle())
-{
-}
-
-size_t UnlinkedFunctionExecutable::parameterCount() const
-{
-    return m_parameters->size();
-}
-
-void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor)
-{
-    UnlinkedFunctionExecutable* thisObject = jsCast(cell);
-    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
-    COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
-    ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren());
-    Base::visitChildren(thisObject, visitor);
-    visitor.append(&thisObject->m_codeBlockForCall);
-    visitor.append(&thisObject->m_codeBlockForConstruct);
-    visitor.append(&thisObject->m_nameValue);
-    visitor.append(&thisObject->m_symbolTableForCall);
-    visitor.append(&thisObject->m_symbolTableForConstruct);
-}
-
-FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& source, size_t lineOffset, size_t sourceOffset)
-{
-    unsigned firstLine = lineOffset + m_firstLineOffset;
-    unsigned startOffset = sourceOffset + m_startOffset;
-    bool startColumnIsOnFirstSourceLine = !m_firstLineOffset;
-    unsigned startColumn = m_unlinkedBodyStartColumn + (startColumnIsOnFirstSourceLine ? source.startColumn() : 1);
-    bool endColumnIsOnStartLine = !m_lineCount;
-    unsigned endColumn = m_unlinkedBodyEndColumn + (endColumnIsOnStartLine ? startColumn : 1);
-    SourceCode code(source.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn);
-    return FunctionExecutable::create(vm, code, this, firstLine, firstLine + m_lineCount, startColumn, endColumn);
-}
-
-UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(const Identifier& name, ExecState* exec, Debugger*, const SourceCode& source, JSObject** exception)
-{
-    ParserError error;
-    VM& vm = exec->vm();
-    CodeCache* codeCache = vm.codeCache();
-    UnlinkedFunctionExecutable* executable = codeCache->getFunctionExecutableFromGlobalCode(vm, name, source, error);
-
-    if (exec->lexicalGlobalObject()->hasDebugger())
-        exec->lexicalGlobalObject()->debugger()->sourceParsed(exec, source.provider(), error.m_line, error.m_message);
-
-    if (error.m_type != ParserError::ErrorNone) {
-        *exception = error.toErrorObject(exec->lexicalGlobalObject(), source);
-        return 0;
-    }
-
-    return executable;
-}
-
-UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::codeBlockFor(VM& vm, const SourceCode& source, CodeSpecializationKind specializationKind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error)
-{
-    switch (specializationKind) {
-    case CodeForCall:
-        if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForCall.get())
-            return codeBlock;
-        break;
-    case CodeForConstruct:
-        if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForConstruct.get())
-            return codeBlock;
-        break;
-    }
-
-    UnlinkedFunctionCodeBlock* result = generateFunctionCodeBlock(vm, this, source, specializationKind, debuggerMode, profilerMode, error);
-    
-    if (error.m_type != ParserError::ErrorNone)
-        return 0;
-
-    switch (specializationKind) {
-    case CodeForCall:
-        m_codeBlockForCall.set(vm, this, result);
-        m_symbolTableForCall.set(vm, this, result->symbolTable());
-        break;
-    case CodeForConstruct:
-        m_codeBlockForConstruct.set(vm, this, result);
-        m_symbolTableForConstruct.set(vm, this, result->symbolTable());
-        break;
-    }
-    return result;
-}
-
-String UnlinkedFunctionExecutable::paramString() const
-{
-    FunctionParameters& parameters = *m_parameters;
-    StringBuilder builder;
-    for (size_t pos = 0; pos < parameters.size(); ++pos) {
-        if (!builder.isEmpty())
-            builder.appendLiteral(", ");
-        parameters.at(pos)->toString(builder);
-    }
-    return builder.toString();
-}
-
-UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
+UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
     : Base(*vm, structure)
     , m_numVars(0)
-    , m_numCalleeRegisters(0)
+    , m_numCalleeLocals(0)
     , m_numParameters(0)
-    , m_vm(vm)
-    , m_argumentsRegister(VirtualRegister())
     , m_globalObjectRegister(VirtualRegister())
-    , m_needsFullScopeChain(info.m_needsActivation)
-    , m_usesEval(info.m_usesEval)
-    , m_isNumericCompareFunction(false)
-    , m_isStrictMode(info.m_isStrictMode)
-    , m_isConstructor(info.m_isConstructor)
+    , m_usesEval(info.usesEval())
+    , m_isStrictMode(info.isStrictMode())
+    , m_isConstructor(info.isConstructor())
     , m_hasCapturedVariables(false)
-    , m_firstLine(0)
+    , m_isBuiltinFunction(info.isBuiltinFunction())
+    , m_superBinding(static_cast(info.superBinding()))
+    , m_scriptMode(static_cast(info.scriptMode()))
+    , m_isArrowFunctionContext(info.isArrowFunctionContext())
+    , m_isClassContext(info.isClassContext())
+    , m_wasCompiledWithDebuggingOpcodes(debuggerMode == DebuggerMode::DebuggerOn || Options::forceDebuggerBytecodeGeneration())
+    , m_constructorKind(static_cast(info.constructorKind()))
+    , m_derivedContextType(static_cast(info.derivedContextType()))
+    , m_evalContextType(static_cast(info.evalContextType()))
     , m_lineCount(0)
     , m_endColumn(UINT_MAX)
+    , m_didOptimize(MixedTriState)
+    , m_parseMode(info.parseMode())
     , m_features(0)
     , m_codeType(codeType)
     , m_arrayProfileCount(0)
@@ -220,32 +81,38 @@ UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType code
     , m_objectAllocationProfileCount(0)
     , m_valueProfileCount(0)
     , m_llintCallLinkInfoCount(0)
-#if ENABLE(BYTECODE_COMMENTS)
-    , m_bytecodeCommentIterator(0)
-#endif
 {
-
+    for (auto& constantRegisterIndex : m_linkTimeConstants)
+        constantRegisterIndex = 0;
+    ASSERT(m_constructorKind == static_cast(info.constructorKind()));
 }
 
 void UnlinkedCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
 {
     UnlinkedCodeBlock* thisObject = jsCast(cell);
     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
-    COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
-    ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren());
     Base::visitChildren(thisObject, visitor);
-    visitor.append(&thisObject->m_symbolTable);
+    auto locker = holdLock(*thisObject);
     for (FunctionExpressionVector::iterator ptr = thisObject->m_functionDecls.begin(), end = thisObject->m_functionDecls.end(); ptr != end; ++ptr)
-        visitor.append(ptr);
+        visitor.append(*ptr);
     for (FunctionExpressionVector::iterator ptr = thisObject->m_functionExprs.begin(), end = thisObject->m_functionExprs.end(); ptr != end; ++ptr)
-        visitor.append(ptr);
+        visitor.append(*ptr);
     visitor.appendValues(thisObject->m_constantRegisters.data(), thisObject->m_constantRegisters.size());
+    if (thisObject->m_unlinkedInstructions)
+        visitor.reportExtraMemoryVisited(thisObject->m_unlinkedInstructions->sizeInBytes());
     if (thisObject->m_rareData) {
         for (size_t i = 0, end = thisObject->m_rareData->m_regexps.size(); i != end; i++)
-            visitor.append(&thisObject->m_rareData->m_regexps[i]);
+            visitor.append(thisObject->m_rareData->m_regexps[i]);
     }
 }
 
+size_t UnlinkedCodeBlock::estimatedSize(JSCell* cell)
+{
+    UnlinkedCodeBlock* thisObject = jsCast(cell);
+    size_t extraSize = thisObject->m_unlinkedInstructions ? thisObject->m_unlinkedInstructions->sizeInBytes() : 0;
+    return Base::estimatedSize(cell) + extraSize;
+}
+
 int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
 {
     ASSERT(bytecodeOffset < instructions().count());
@@ -258,8 +125,8 @@ int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
     return line;
 }
 
-inline void UnlinkedCodeBlock::getLineAndColumn(ExpressionRangeInfo& info,
-    unsigned& line, unsigned& column)
+inline void UnlinkedCodeBlock::getLineAndColumn(const ExpressionRangeInfo& info,
+    unsigned& line, unsigned& column) const
 {
     switch (info.mode) {
     case ExpressionRangeInfo::FatLineMode:
@@ -292,6 +159,7 @@ static void dumpLineColumnEntry(size_t index, const UnlinkedInstructionStream& i
         case DidReachBreakpoint: event = " DidReachBreakpoint"; break;
         case WillLeaveCallFrame: event = " WillLeaveCallFrame"; break;
         case WillExecuteStatement: event = " WillExecuteStatement"; break;
+        case WillExecuteExpression: event = " WillExecuteExpression"; break;
         }
     }
     dataLogF("  [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, opcodeNames[opcode], event);
@@ -315,7 +183,7 @@ void UnlinkedCodeBlock::dumpExpressionRangeInfo()
 #endif
 
 void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset,
-    int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
+    int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
 {
     ASSERT(bytecodeOffset < instructions().count());
 
@@ -328,7 +196,7 @@ void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset
         return;
     }
 
-    Vector& expressionInfo = m_expressionInfo;
+    const Vector& expressionInfo = m_expressionInfo;
 
     int low = 0;
     int high = expressionInfo.size();
@@ -343,7 +211,7 @@ void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset
     if (!low)
         low = 1;
 
-    ExpressionRangeInfo& info = expressionInfo[low - 1];
+    const ExpressionRangeInfo& info = expressionInfo[low - 1];
     startOffset = info.startOffset;
     endOffset = info.endOffset;
     divot = info.divotPoint;
@@ -404,51 +272,145 @@ void UnlinkedCodeBlock::addExpressionInfo(unsigned instructionOffset,
     m_expressionInfo.append(info);
 }
 
-void UnlinkedProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+bool UnlinkedCodeBlock::typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot)
 {
-    UnlinkedProgramCodeBlock* thisObject = jsCast(cell);
-    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
-    COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
-    ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren());
-    Base::visitChildren(thisObject, visitor);
-    for (size_t i = 0, end = thisObject->m_functionDeclarations.size(); i != end; i++)
-        visitor.append(&thisObject->m_functionDeclarations[i].second);
+    static const bool verbose = false;
+    if (!m_rareData) {
+        if (verbose)
+            dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset);
+        startDivot = UINT_MAX;
+        endDivot = UINT_MAX;
+        return false;
+    }
+
+    auto iter = m_rareData->m_typeProfilerInfoMap.find(bytecodeOffset);
+    if (iter == m_rareData->m_typeProfilerInfoMap.end()) {
+        if (verbose)
+            dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset);
+        startDivot = UINT_MAX;
+        endDivot = UINT_MAX;
+        return false;
+    }
+    
+    RareData::TypeProfilerExpressionRange& range = iter->value;
+    startDivot = range.m_startDivot;
+    endDivot = range.m_endDivot;
+    return true;
 }
 
-UnlinkedCodeBlock::~UnlinkedCodeBlock()
+void UnlinkedCodeBlock::addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot)
 {
+    createRareDataIfNecessary();
+    RareData::TypeProfilerExpressionRange range;
+    range.m_startDivot = startDivot;
+    range.m_endDivot = endDivot;
+    m_rareData->m_typeProfilerInfoMap.set(instructionOffset, range);
 }
 
-void UnlinkedProgramCodeBlock::destroy(JSCell* cell)
+UnlinkedCodeBlock::~UnlinkedCodeBlock()
 {
-    jsCast(cell)->~UnlinkedProgramCodeBlock();
 }
 
-void UnlinkedEvalCodeBlock::destroy(JSCell* cell)
+void UnlinkedCodeBlock::setInstructions(std::unique_ptr instructions)
 {
-    jsCast(cell)->~UnlinkedEvalCodeBlock();
+    ASSERT(instructions);
+    {
+        auto locker = holdLock(*this);
+        m_unlinkedInstructions = WTFMove(instructions);
+    }
+    Heap::heap(this)->reportExtraMemoryAllocated(m_unlinkedInstructions->sizeInBytes());
 }
 
-void UnlinkedFunctionCodeBlock::destroy(JSCell* cell)
+const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const
 {
-    jsCast(cell)->~UnlinkedFunctionCodeBlock();
+    ASSERT(m_unlinkedInstructions.get());
+    return *m_unlinkedInstructions;
 }
 
-void UnlinkedFunctionExecutable::destroy(JSCell* cell)
+UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
 {
-    jsCast(cell)->~UnlinkedFunctionExecutable();
+    return handlerForIndex(bytecodeOffset, requiredHandler);
 }
 
-void UnlinkedCodeBlock::setInstructions(std::unique_ptr instructions)
+UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
 {
-    m_unlinkedInstructions = std::move(instructions);
+    if (!m_rareData)
+        return nullptr;
+    return UnlinkedHandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
 }
 
-const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const
+void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter)
 {
-    ASSERT(m_unlinkedInstructions.get());
-    return *m_unlinkedInstructions;
+    // Before applying the changes, we adjust the jumps based on the original bytecode offset, the offset to the jump target, and
+    // the insertion information.
+
+    BytecodeGraph& graph = rewriter.graph();
+    UnlinkedInstruction* instructionsBegin = graph.instructions().begin();
+
+    for (int bytecodeOffset = 0, instructionCount = graph.instructions().size(); bytecodeOffset < instructionCount;) {
+        UnlinkedInstruction* current = instructionsBegin + bytecodeOffset;
+        OpcodeID opcodeID = current[0].u.opcode;
+        extractStoredJumpTargetsForBytecodeOffset(this, vm()->interpreter, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) {
+            relativeOffset = rewriter.adjustJumpTarget(bytecodeOffset, bytecodeOffset + relativeOffset);
+        });
+        bytecodeOffset += opcodeLength(opcodeID);
+    }
+
+    // Then, exception handlers should be adjusted.
+    if (m_rareData) {
+        for (UnlinkedHandlerInfo& handler : m_rareData->m_exceptionHandlers) {
+            handler.target = rewriter.adjustAbsoluteOffset(handler.target);
+            handler.start = rewriter.adjustAbsoluteOffset(handler.start);
+            handler.end = rewriter.adjustAbsoluteOffset(handler.end);
+        }
+
+        for (size_t i = 0; i < m_rareData->m_opProfileControlFlowBytecodeOffsets.size(); ++i)
+            m_rareData->m_opProfileControlFlowBytecodeOffsets[i] = rewriter.adjustAbsoluteOffset(m_rareData->m_opProfileControlFlowBytecodeOffsets[i]);
+
+        if (!m_rareData->m_typeProfilerInfoMap.isEmpty()) {
+            HashMap adjustedTypeProfilerInfoMap;
+            for (auto& entry : m_rareData->m_typeProfilerInfoMap)
+                adjustedTypeProfilerInfoMap.set(rewriter.adjustAbsoluteOffset(entry.key), entry.value);
+            m_rareData->m_typeProfilerInfoMap.swap(adjustedTypeProfilerInfoMap);
+        }
+    }
+
+    for (size_t i = 0; i < m_propertyAccessInstructions.size(); ++i)
+        m_propertyAccessInstructions[i] = rewriter.adjustAbsoluteOffset(m_propertyAccessInstructions[i]);
+
+    for (size_t i = 0; i < m_expressionInfo.size(); ++i)
+        m_expressionInfo[i].instructionOffset = rewriter.adjustAbsoluteOffset(m_expressionInfo[i].instructionOffset);
+
+    // Then, modify the unlinked instructions.
+    rewriter.applyModification();
+
+    // And recompute the jump target based on the modified unlinked instructions.
+    m_jumpTargets.clear();
+    recomputePreciseJumpTargets(this, graph.instructions().begin(), graph.instructions().size(), m_jumpTargets);
 }
 
+void UnlinkedCodeBlock::shrinkToFit()
+{
+    auto locker = holdLock(*this);
+    
+    m_jumpTargets.shrinkToFit();
+    m_identifiers.shrinkToFit();
+    m_bitVectors.shrinkToFit();
+    m_constantRegisters.shrinkToFit();
+    m_constantsSourceCodeRepresentation.shrinkToFit();
+    m_functionDecls.shrinkToFit();
+    m_functionExprs.shrinkToFit();
+    m_propertyAccessInstructions.shrinkToFit();
+    m_expressionInfo.shrinkToFit();
+
+    if (m_rareData) {
+        m_rareData->m_exceptionHandlers.shrinkToFit();
+        m_rareData->m_regexps.shrinkToFit();
+        m_rareData->m_constantBuffers.shrinkToFit();
+        m_rareData->m_switchJumpTables.shrinkToFit();
+        m_rareData->m_stringSwitchJumpTables.shrinkToFit();
+        m_rareData->m_expressionInfoFatPositions.shrinkToFit();
+    }
 }
 
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
index b9dae2d5c..f0574976c 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,41 +23,42 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef UnlinkedCodeBlock_h
-#define UnlinkedCodeBlock_h
+#pragma once
 
 #include "BytecodeConventions.h"
 #include "CodeSpecializationKind.h"
 #include "CodeType.h"
+#include "ConstructAbility.h"
 #include "ExpressionRangeInfo.h"
+#include "HandlerInfo.h"
 #include "Identifier.h"
 #include "JSCell.h"
 #include "JSString.h"
+#include "LockDuringMarking.h"
 #include "ParserModes.h"
 #include "RegExp.h"
 #include "SpecialPointer.h"
-#include "SymbolTable.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VariableEnvironment.h"
 #include "VirtualRegister.h"
-
-#include 
-#include 
+#include 
+#include 
 #include 
 
 namespace JSC {
 
+class BytecodeRewriter;
 class Debugger;
-class FunctionBodyNode;
 class FunctionExecutable;
-class FunctionParameters;
-class JSScope;
-struct ParserError;
+class ParserError;
 class ScriptExecutable;
 class SourceCode;
 class SourceProvider;
-class SymbolTable;
 class UnlinkedCodeBlock;
 class UnlinkedFunctionCodeBlock;
+class UnlinkedFunctionExecutable;
 class UnlinkedInstructionStream;
+struct ExecutableInfo;
 
 typedef unsigned UnlinkedValueProfile;
 typedef unsigned UnlinkedArrayProfile;
@@ -65,134 +66,12 @@ typedef unsigned UnlinkedArrayAllocationProfile;
 typedef unsigned UnlinkedObjectAllocationProfile;
 typedef unsigned UnlinkedLLIntCallLinkInfo;
 
-struct ExecutableInfo {
-    ExecutableInfo(bool needsActivation, bool usesEval, bool isStrictMode, bool isConstructor)
-        : m_needsActivation(needsActivation)
-        , m_usesEval(usesEval)
-        , m_isStrictMode(isStrictMode)
-        , m_isConstructor(isConstructor)
-    {
-    }
-    bool m_needsActivation;
-    bool m_usesEval;
-    bool m_isStrictMode;
-    bool m_isConstructor;
-};
-
-class UnlinkedFunctionExecutable : public JSCell {
-public:
-    friend class CodeCache;
-    typedef JSCell Base;
-    static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionBodyNode* node, bool isFromGlobalCode = false)
-    {
-        UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, node, isFromGlobalCode);
-        instance->finishCreation(*vm);
-        return instance;
-    }
-
-    const Identifier& name() const { return m_name; }
-    const Identifier& inferredName() const { return m_inferredName; }
-    JSString* nameValue() const { return m_nameValue.get(); }
-    SymbolTable* symbolTable(CodeSpecializationKind kind)
-    {
-        return (kind == CodeForCall) ? m_symbolTableForCall.get() : m_symbolTableForConstruct.get();
-    }
-    size_t parameterCount() const;
-    bool isInStrictContext() const { return m_isInStrictContext; }
-    FunctionNameIsInScopeToggle functionNameIsInScopeToggle() const { return m_functionNameIsInScopeToggle; }
-
-    unsigned firstLineOffset() const { return m_firstLineOffset; }
-    unsigned lineCount() const { return m_lineCount; }
-    unsigned unlinkedFunctionNameStart() const { return m_unlinkedFunctionNameStart; }
-    unsigned unlinkedBodyStartColumn() const { return m_unlinkedBodyStartColumn; }
-    unsigned unlinkedBodyEndColumn() const { return m_unlinkedBodyEndColumn; }
-    unsigned startOffset() const { return m_startOffset; }
-    unsigned sourceLength() { return m_sourceLength; }
-
-    String paramString() const;
-
-    UnlinkedFunctionCodeBlock* codeBlockFor(VM&, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, ParserError&);
-
-    static UnlinkedFunctionExecutable* fromGlobalCode(const Identifier&, ExecState*, Debugger*, const SourceCode&, JSObject** exception);
-
-    FunctionExecutable* link(VM&, const SourceCode&, size_t lineOffset, size_t sourceOffset);
-
-    void clearCodeForRecompilation()
-    {
-        m_symbolTableForCall.clear();
-        m_symbolTableForConstruct.clear();
-        m_codeBlockForCall.clear();
-        m_codeBlockForConstruct.clear();
-    }
-
-    FunctionParameters* parameters() { return m_parameters.get(); }
-
-    void recordParse(CodeFeatures features, bool hasCapturedVariables)
-    {
-        m_features = features;
-        m_hasCapturedVariables = hasCapturedVariables;
-    }
-
-    bool forceUsesArguments() const { return m_forceUsesArguments; }
-
-    CodeFeatures features() const { return m_features; }
-    bool hasCapturedVariables() const { return m_hasCapturedVariables; }
-
-    static const bool needsDestruction = true;
-    static const bool hasImmortalStructure = true;
-    static void destroy(JSCell*);
-
-private:
-    UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, FunctionBodyNode*, bool isFromGlobalCode);
-    WriteBarrier m_codeBlockForCall;
-    WriteBarrier m_codeBlockForConstruct;
-
-    unsigned m_numCapturedVariables : 29;
-    bool m_forceUsesArguments : 1;
-    bool m_isInStrictContext : 1;
-    bool m_hasCapturedVariables : 1;
-    bool m_isFromGlobalCode : 1;
-
-    Identifier m_name;
-    Identifier m_inferredName;
-    WriteBarrier m_nameValue;
-    WriteBarrier m_symbolTableForCall;
-    WriteBarrier m_symbolTableForConstruct;
-    RefPtr m_parameters;
-    unsigned m_firstLineOffset;
-    unsigned m_lineCount;
-    unsigned m_unlinkedFunctionNameStart;
-    unsigned m_unlinkedBodyStartColumn;
-    unsigned m_unlinkedBodyEndColumn;
-    unsigned m_startOffset;
-    unsigned m_sourceLength;
-
-    CodeFeatures m_features;
-
-    FunctionNameIsInScopeToggle m_functionNameIsInScopeToggle;
-
-protected:
-    void finishCreation(VM& vm)
-    {
-        Base::finishCreation(vm);
-        m_nameValue.set(vm, this, jsString(&vm, name().string()));
-    }
-
-    static void visitChildren(JSCell*, SlotVisitor&);
-
-public:
-    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
-    {
-        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), info());
-    }
-
-    static const unsigned StructureFlags = OverridesVisitChildren | JSCell::StructureFlags;
-
-    DECLARE_EXPORT_INFO;
-};
-
 struct UnlinkedStringJumpTable {
-    typedef HashMap, int32_t> StringOffsetTable;
+    struct OffsetLocation {
+        int32_t branchOffset;
+    };
+
+    typedef HashMap, OffsetLocation> StringOffsetTable;
     StringOffsetTable offsetTable;
 
     inline int32_t offsetForValue(StringImpl* value, int32_t defaultOffset)
@@ -201,7 +80,7 @@ struct UnlinkedStringJumpTable {
         StringOffsetTable::const_iterator loc = offsetTable.find(value);
         if (loc == end)
             return defaultOffset;
-        return loc->value;
+        return loc->value.branchOffset;
     }
 
 };
@@ -218,13 +97,6 @@ struct UnlinkedSimpleJumpTable {
     }
 };
 
-struct UnlinkedHandlerInfo {
-    uint32_t start;
-    uint32_t end;
-    uint32_t target;
-    uint32_t scopeDepth;
-};
-
 struct UnlinkedInstruction {
     UnlinkedInstruction() { u.operand = 0; }
     UnlinkedInstruction(OpcodeID opcode) { u.opcode = opcode; }
@@ -239,31 +111,36 @@ struct UnlinkedInstruction {
 class UnlinkedCodeBlock : public JSCell {
 public:
     typedef JSCell Base;
+    static const unsigned StructureFlags = Base::StructureFlags;
+
     static const bool needsDestruction = true;
-    static const bool hasImmortalStructure = true;
 
     enum { CallFunction, ApplyFunction };
 
+    typedef UnlinkedInstruction Instruction;
+    typedef Vector UnpackedInstructions;
+
     bool isConstructor() const { return m_isConstructor; }
     bool isStrictMode() const { return m_isStrictMode; }
     bool usesEval() const { return m_usesEval; }
-
-    bool needsFullScopeChain() const { return m_needsFullScopeChain; }
-    void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
+    SourceParseMode parseMode() const { return m_parseMode; }
+    bool isArrowFunction() const { return isArrowFunctionParseMode(parseMode()); }
+    DerivedContextType derivedContextType() const { return static_cast(m_derivedContextType); }
+    EvalContextType evalContextType() const { return static_cast(m_evalContextType); }
+    bool isArrowFunctionContext() const { return m_isArrowFunctionContext; }
+    bool isClassContext() const { return m_isClassContext; }
 
     void addExpressionInfo(unsigned instructionOffset, int divot,
         int startOffset, int endOffset, unsigned line, unsigned column);
 
+    void addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot);
+
     bool hasExpressionInfo() { return m_expressionInfo.size(); }
+    const Vector& expressionInfo() { return m_expressionInfo; }
 
     // Special registers
     void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
-    void setActivationRegister(VirtualRegister activationRegister) { m_activationRegister = activationRegister; }
-
-    void setArgumentsRegister(VirtualRegister argumentsRegister) { m_argumentsRegister = argumentsRegister; }
-    bool usesArguments() const { return m_argumentsRegister.isValid(); }
-    VirtualRegister argumentsRegister() const { return m_argumentsRegister; }
-
+    void setScopeRegister(VirtualRegister scopeRegister) { m_scopeRegister = scopeRegister; }
 
     bool usesGlobalObject() const { return m_globalObjectRegister.isValid(); }
     void setGlobalObjectRegister(VirtualRegister globalObjectRegister) { m_globalObjectRegister = globalObjectRegister; }
@@ -277,8 +154,10 @@ public:
     unsigned addRegExp(RegExp* r)
     {
         createRareDataIfNecessary();
+        VM& vm = *this->vm();
+        auto locker = lockDuringMarking(vm.heap, *this);
         unsigned size = m_rareData->m_regexps.size();
-        m_rareData->m_regexps.append(WriteBarrier(*m_vm, this, r));
+        m_rareData->m_regexps.append(WriteBarrier(vm, this, r));
         return size;
     }
     unsigned numberOfRegExps() const
@@ -296,19 +175,47 @@ public:
     const Identifier& identifier(int index) const { return m_identifiers[index]; }
     const Vector& identifiers() const { return m_identifiers; }
 
-    size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
-    unsigned addConstant(JSValue v)
+    const Vector& bitVectors() const { return m_bitVectors; }
+    BitVector& bitVector(size_t i) { return m_bitVectors[i]; }
+    unsigned addBitVector(BitVector&& bitVector)
+    {
+        m_bitVectors.append(WTFMove(bitVector));
+        return m_bitVectors.size() - 1;
+    }
+
+    unsigned addConstant(JSValue v, SourceCodeRepresentation sourceCodeRepresentation = SourceCodeRepresentation::Other)
+    {
+        VM& vm = *this->vm();
+        auto locker = lockDuringMarking(vm.heap, *this);
+        unsigned result = m_constantRegisters.size();
+        m_constantRegisters.append(WriteBarrier());
+        m_constantRegisters.last().set(vm, this, v);
+        m_constantsSourceCodeRepresentation.append(sourceCodeRepresentation);
+        return result;
+    }
+    unsigned addConstant(LinkTimeConstant type)
     {
+        VM& vm = *this->vm();
+        auto locker = lockDuringMarking(vm.heap, *this);
         unsigned result = m_constantRegisters.size();
+        ASSERT(result);
+        unsigned index = static_cast(type);
+        ASSERT(index < LinkTimeConstantCount);
+        m_linkTimeConstants[index] = result;
         m_constantRegisters.append(WriteBarrier());
-        m_constantRegisters.last().set(*m_vm, this, v);
+        m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
         return result;
     }
-    unsigned addOrFindConstant(JSValue);
+    unsigned registerIndexForLinkTimeConstant(LinkTimeConstant type)
+    {
+        unsigned index = static_cast(type);
+        ASSERT(index < LinkTimeConstantCount);
+        return m_linkTimeConstants[index];
+    }
     const Vector>& constantRegisters() { return m_constantRegisters; }
     const WriteBarrier& constantRegister(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
     ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
-    ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+    const Vector& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
 
     // Jumps
     size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
@@ -316,38 +223,25 @@ public:
     unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
     unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
 
-    void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
-    bool isNumericCompareFunction() const { return m_isNumericCompareFunction; }
+    UnlinkedHandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+    UnlinkedHandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
 
-    void shrinkToFit()
-    {
-        m_jumpTargets.shrinkToFit();
-        m_identifiers.shrinkToFit();
-        m_constantRegisters.shrinkToFit();
-        m_functionDecls.shrinkToFit();
-        m_functionExprs.shrinkToFit();
-        m_propertyAccessInstructions.shrinkToFit();
-        m_expressionInfo.shrinkToFit();
-
-#if ENABLE(BYTECODE_COMMENTS)
-        m_bytecodeComments.shrinkToFit();
-#endif
-        if (m_rareData) {
-            m_rareData->m_exceptionHandlers.shrinkToFit();
-            m_rareData->m_regexps.shrinkToFit();
-            m_rareData->m_constantBuffers.shrinkToFit();
-            m_rareData->m_switchJumpTables.shrinkToFit();
-            m_rareData->m_stringSwitchJumpTables.shrinkToFit();
-            m_rareData->m_expressionInfoFatPositions.shrinkToFit();
-        }
-    }
+    bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+
+    ConstructorKind constructorKind() const { return static_cast(m_constructorKind); }
+    SuperBinding superBinding() const { return static_cast(m_superBinding); }
+    JSParserScriptMode scriptMode() const { return static_cast(m_scriptMode); }
+
+    void shrinkToFit();
 
     void setInstructions(std::unique_ptr);
     const UnlinkedInstructionStream& instructions() const;
 
+    int numCalleeLocals() const { return m_numCalleeLocals; }
+
     int m_numVars;
     int m_numCapturedVars;
-    int m_numCalleeRegisters;
+    int m_numCalleeLocals;
 
     // Jump Tables
 
@@ -361,18 +255,22 @@ public:
 
     unsigned addFunctionDecl(UnlinkedFunctionExecutable* n)
     {
+        VM& vm = *this->vm();
+        auto locker = lockDuringMarking(vm.heap, *this);
         unsigned size = m_functionDecls.size();
         m_functionDecls.append(WriteBarrier());
-        m_functionDecls.last().set(*m_vm, this, n);
+        m_functionDecls.last().set(vm, this, n);
         return size;
     }
     UnlinkedFunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
     size_t numberOfFunctionDecls() { return m_functionDecls.size(); }
     unsigned addFunctionExpr(UnlinkedFunctionExecutable* n)
     {
+        VM& vm = *this->vm();
+        auto locker = lockDuringMarking(vm.heap, *this);
         unsigned size = m_functionExprs.size();
         m_functionExprs.append(WriteBarrier());
-        m_functionExprs.last().set(*m_vm, this, n);
+        m_functionExprs.last().set(vm, this, n);
         return size;
     }
     UnlinkedFunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
@@ -380,13 +278,9 @@ public:
 
     // Exception handling support
     size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
-    void addExceptionHandler(const UnlinkedHandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
+    void addExceptionHandler(const UnlinkedHandlerInfo& handler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(handler); }
     UnlinkedHandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
 
-    SymbolTable* symbolTable() const { return m_symbolTable.get(); }
-
-    VM* vm() const { return m_vm; }
-
     UnlinkedArrayProfile addArrayProfile() { return m_arrayProfileCount++; }
     unsigned numberOfArrayProfiles() { return m_arrayProfileCount; }
     UnlinkedArrayAllocationProfile addArrayAllocationProfile() { return m_arrayAllocationProfileCount++; }
@@ -402,8 +296,7 @@ public:
     CodeType codeType() const { return m_codeType; }
 
     VirtualRegister thisRegister() const { return m_thisRegister; }
-    VirtualRegister activationRegister() const { return m_activationRegister; }
-
+    VirtualRegister scopeRegister() const { return m_scopeRegister; }
 
     void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
     {
@@ -436,94 +329,125 @@ public:
         return m_rareData->m_constantBuffers[index];
     }
 
-    bool hasRareData() const { return m_rareData; }
+    bool hasRareData() const { return m_rareData.get(); }
 
     int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
 
     void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
-        int& startOffset, int& endOffset, unsigned& line, unsigned& column);
+        int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
+
+    bool typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot);
 
-    void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned firstLine, unsigned lineCount, unsigned endColumn)
+    void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned lineCount, unsigned endColumn)
     {
         m_features = features;
         m_hasCapturedVariables = hasCapturedVariables;
-        m_firstLine = firstLine;
         m_lineCount = lineCount;
         // For the UnlinkedCodeBlock, startColumn is always 0.
         m_endColumn = endColumn;
     }
 
+    const String& sourceURLDirective() const { return m_sourceURLDirective; }
+    const String& sourceMappingURLDirective() const { return m_sourceMappingURLDirective; }
+    void setSourceURLDirective(const String& sourceURL) { m_sourceURLDirective = sourceURL; }
+    void setSourceMappingURLDirective(const String& sourceMappingURL) { m_sourceMappingURLDirective = sourceMappingURL; }
+
     CodeFeatures codeFeatures() const { return m_features; }
     bool hasCapturedVariables() const { return m_hasCapturedVariables; }
-    unsigned firstLine() const { return m_firstLine; }
     unsigned lineCount() const { return m_lineCount; }
     ALWAYS_INLINE unsigned startColumn() const { return 0; }
     unsigned endColumn() const { return m_endColumn; }
 
+    void addOpProfileControlFlowBytecodeOffset(size_t offset)
+    {
+        createRareDataIfNecessary();
+        m_rareData->m_opProfileControlFlowBytecodeOffsets.append(offset);
+    }
+    const Vector& opProfileControlFlowBytecodeOffsets() const
+    {
+        ASSERT(m_rareData);
+        return m_rareData->m_opProfileControlFlowBytecodeOffsets;
+    }
+    bool hasOpProfileControlFlowBytecodeOffsets() const
+    {
+        return m_rareData && !m_rareData->m_opProfileControlFlowBytecodeOffsets.isEmpty();
+    }
+
     void dumpExpressionRangeInfo(); // For debugging purpose only.
 
+    bool wasCompiledWithDebuggingOpcodes() const { return m_wasCompiledWithDebuggingOpcodes; }
+
+    TriState didOptimize() const { return m_didOptimize; }
+    void setDidOptimize(TriState didOptimize) { m_didOptimize = didOptimize; }
+
 protected:
-    UnlinkedCodeBlock(VM*, Structure*, CodeType, const ExecutableInfo&);
+    UnlinkedCodeBlock(VM*, Structure*, CodeType, const ExecutableInfo&, DebuggerMode);
     ~UnlinkedCodeBlock();
 
     void finishCreation(VM& vm)
     {
         Base::finishCreation(vm);
-        if (codeType() == GlobalCode)
-            return;
-        m_symbolTable.set(vm, this, SymbolTable::create(vm));
     }
 
 private:
+    friend class BytecodeRewriter;
+    void applyModification(BytecodeRewriter&);
 
     void createRareDataIfNecessary()
     {
-        if (!m_rareData)
-            m_rareData = adoptPtr(new RareData);
+        if (!m_rareData) {
+            auto locker = lockDuringMarking(*heap(), *this);
+            m_rareData = std::make_unique();
+        }
     }
 
-    void getLineAndColumn(ExpressionRangeInfo&, unsigned& line, unsigned& column);
-
-    std::unique_ptr m_unlinkedInstructions;
+    void getLineAndColumn(const ExpressionRangeInfo&, unsigned& line, unsigned& column) const;
 
     int m_numParameters;
-    VM* m_vm;
+
+    std::unique_ptr m_unlinkedInstructions;
 
     VirtualRegister m_thisRegister;
-    VirtualRegister m_argumentsRegister;
-    VirtualRegister m_activationRegister;
+    VirtualRegister m_scopeRegister;
     VirtualRegister m_globalObjectRegister;
 
-    bool m_needsFullScopeChain : 1;
-    bool m_usesEval : 1;
-    bool m_isNumericCompareFunction : 1;
-    bool m_isStrictMode : 1;
-    bool m_isConstructor : 1;
-    bool m_hasCapturedVariables : 1;
-    unsigned m_firstLine;
+    String m_sourceURLDirective;
+    String m_sourceMappingURLDirective;
+
+    unsigned m_usesEval : 1;
+    unsigned m_isStrictMode : 1;
+    unsigned m_isConstructor : 1;
+    unsigned m_hasCapturedVariables : 1;
+    unsigned m_isBuiltinFunction : 1;
+    unsigned m_superBinding : 1;
+    unsigned m_scriptMode: 1;
+    unsigned m_isArrowFunctionContext : 1;
+    unsigned m_isClassContext : 1;
+    unsigned m_wasCompiledWithDebuggingOpcodes : 1;
+    unsigned m_constructorKind : 2;
+    unsigned m_derivedContextType : 2;
+    unsigned m_evalContextType : 2;
     unsigned m_lineCount;
     unsigned m_endColumn;
 
+    TriState m_didOptimize;
+    SourceParseMode m_parseMode;
     CodeFeatures m_features;
     CodeType m_codeType;
 
     Vector m_jumpTargets;
 
+    Vector m_propertyAccessInstructions;
+
     // Constant Pools
     Vector m_identifiers;
+    Vector m_bitVectors;
     Vector> m_constantRegisters;
+    Vector m_constantsSourceCodeRepresentation;
     typedef Vector> FunctionExpressionVector;
     FunctionExpressionVector m_functionDecls;
     FunctionExpressionVector m_functionExprs;
-
-    WriteBarrier m_symbolTable;
-
-    Vector m_propertyAccessInstructions;
-
-#if ENABLE(BYTECODE_COMMENTS)
-    Vector  m_bytecodeComments;
-    size_t m_bytecodeCommentIterator;
-#endif
+    std::array m_linkTimeConstants;
 
     unsigned m_arrayProfileCount;
     unsigned m_arrayAllocationProfileCount;
@@ -548,159 +472,25 @@ public:
         Vector m_stringSwitchJumpTables;
 
         Vector m_expressionInfoFatPositions;
+
+        struct TypeProfilerExpressionRange {
+            unsigned m_startDivot;
+            unsigned m_endDivot;
+        };
+        HashMap m_typeProfilerInfoMap;
+        Vector m_opProfileControlFlowBytecodeOffsets;
     };
 
 private:
-    OwnPtr m_rareData;
+    std::unique_ptr m_rareData;
     Vector m_expressionInfo;
 
 protected:
-
-    static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
     static void visitChildren(JSCell*, SlotVisitor&);
+    static size_t estimatedSize(JSCell*);
 
 public:
     DECLARE_INFO;
 };
 
-class UnlinkedGlobalCodeBlock : public UnlinkedCodeBlock {
-public:
-    typedef UnlinkedCodeBlock Base;
-
-protected:
-    UnlinkedGlobalCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
-        : Base(vm, structure, codeType, info)
-    {
-    }
-
-    static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
-    DECLARE_INFO;
-};
-
-class UnlinkedProgramCodeBlock : public UnlinkedGlobalCodeBlock {
-private:
-    friend class CodeCache;
-    static UnlinkedProgramCodeBlock* create(VM* vm, const ExecutableInfo& info)
-    {
-        UnlinkedProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedProgramCodeBlock(vm, vm->unlinkedProgramCodeBlockStructure.get(), info);
-        instance->finishCreation(*vm);
-        return instance;
-    }
-
-public:
-    typedef UnlinkedGlobalCodeBlock Base;
-    static void destroy(JSCell*);
-
-    void addFunctionDeclaration(VM& vm, const Identifier& name, UnlinkedFunctionExecutable* functionExecutable)
-    {
-        m_functionDeclarations.append(std::make_pair(name, WriteBarrier(vm, this, functionExecutable)));
-    }
-
-    void addVariableDeclaration(const Identifier& name, bool isConstant)
-    {
-        m_varDeclarations.append(std::make_pair(name, isConstant));
-    }
-
-    typedef Vector> VariableDeclations;
-    typedef Vector> > FunctionDeclations;
-
-    const VariableDeclations& variableDeclarations() const { return m_varDeclarations; }
-    const FunctionDeclations& functionDeclarations() const { return m_functionDeclarations; }
-
-    static void visitChildren(JSCell*, SlotVisitor&);
-
-private:
-    UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info)
-        : Base(vm, structure, GlobalCode, info)
-    {
-    }
-
-    VariableDeclations m_varDeclarations;
-    FunctionDeclations m_functionDeclarations;
-
-public:
-    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
-    {
-        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), info());
-    }
-
-    static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
-    DECLARE_INFO;
-};
-
-class UnlinkedEvalCodeBlock : public UnlinkedGlobalCodeBlock {
-private:
-    friend class CodeCache;
-
-    static UnlinkedEvalCodeBlock* create(VM* vm, const ExecutableInfo& info)
-    {
-        UnlinkedEvalCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedEvalCodeBlock(vm, vm->unlinkedEvalCodeBlockStructure.get(), info);
-        instance->finishCreation(*vm);
-        return instance;
-    }
-
-public:
-    typedef UnlinkedGlobalCodeBlock Base;
-    static void destroy(JSCell*);
-
-    const Identifier& variable(unsigned index) { return m_variables[index]; }
-    unsigned numVariables() { return m_variables.size(); }
-    void adoptVariables(Vector& variables)
-    {
-        ASSERT(m_variables.isEmpty());
-        m_variables.swap(variables);
-    }
-
-private:
-    UnlinkedEvalCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info)
-        : Base(vm, structure, EvalCode, info)
-    {
-    }
-
-    Vector m_variables;
-
-public:
-    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
-    {
-        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), info());
-    }
-
-    static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
-    DECLARE_INFO;
-};
-
-class UnlinkedFunctionCodeBlock : public UnlinkedCodeBlock {
-public:
-    static UnlinkedFunctionCodeBlock* create(VM* vm, CodeType codeType, const ExecutableInfo& info)
-    {
-        UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedFunctionCodeBlock(vm, vm->unlinkedFunctionCodeBlockStructure.get(), codeType, info);
-        instance->finishCreation(*vm);
-        return instance;
-    }
-
-    typedef UnlinkedCodeBlock Base;
-    static void destroy(JSCell*);
-
-private:
-    UnlinkedFunctionCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
-        : Base(vm, structure, codeType, info)
-    {
-    }
-    
-public:
-    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
-    {
-        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), info());
-    }
-
-    static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
-    DECLARE_INFO;
-};
-
 }
-
-#endif // UnlinkedCodeBlock_h
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.cpp
new file mode 100644
index 000000000..07f991688
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedEvalCodeBlock.h"
+
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) };
+
+void UnlinkedEvalCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~UnlinkedEvalCodeBlock();
+}
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.h
new file mode 100644
index 000000000..3130ea448
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedGlobalCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedEvalCodeBlock final : public UnlinkedGlobalCodeBlock {
+public:
+    typedef UnlinkedGlobalCodeBlock Base;
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    static UnlinkedEvalCodeBlock* create(VM* vm, const ExecutableInfo& info, DebuggerMode debuggerMode)
+    {
+        UnlinkedEvalCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedEvalCodeBlock(vm, vm->unlinkedEvalCodeBlockStructure.get(), info, debuggerMode);
+        instance->finishCreation(*vm);
+        return instance;
+    }
+
+    static void destroy(JSCell*);
+
+    const Identifier& variable(unsigned index) { return m_variables[index]; }
+    unsigned numVariables() { return m_variables.size(); }
+    void adoptVariables(Vector& variables)
+    {
+        ASSERT(m_variables.isEmpty());
+        m_variables.swap(variables);
+    }
+
+private:
+    UnlinkedEvalCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info, DebuggerMode debuggerMode)
+        : Base(vm, structure, EvalCode, info, debuggerMode)
+    {
+    }
+
+    Vector m_variables;
+
+public:
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+    {
+        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), info());
+    }
+
+    DECLARE_INFO;
+};
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.cpp
new file mode 100644
index 000000000..151d56077
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedFunctionCodeBlock.h"
+
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) };
+
+void UnlinkedFunctionCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~UnlinkedFunctionCodeBlock();
+}
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.h
new file mode 100644
index 000000000..b5482b65c
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedFunctionCodeBlock final : public UnlinkedCodeBlock {
+public:
+    typedef UnlinkedCodeBlock Base;
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    static UnlinkedFunctionCodeBlock* create(VM* vm, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
+    {
+        UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedFunctionCodeBlock(vm, vm->unlinkedFunctionCodeBlockStructure.get(), codeType, info, debuggerMode);
+        instance->finishCreation(*vm);
+        return instance;
+    }
+
+    static void destroy(JSCell*);
+
+private:
+    UnlinkedFunctionCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
+        : Base(vm, structure, codeType, info, debuggerMode)
+    {
+    }
+    
+public:
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+    {
+        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), info());
+    }
+
+    DECLARE_INFO;
+};
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp
new file mode 100644
index 000000000..2481db5dd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedFunctionExecutable.h"
+
+#include "BytecodeGenerator.h"
+#include "ClassInfo.h"
+#include "CodeCache.h"
+#include "Debugger.h"
+#include "ExecutableInfo.h"
+#include "FunctionOverrides.h"
+#include "JSCInlines.h"
+#include "Parser.h"
+#include "SourceProvider.h"
+#include "Structure.h"
+#include "UnlinkedFunctionCodeBlock.h"
+
+namespace JSC {
+
+static_assert(sizeof(UnlinkedFunctionExecutable) <= 256, "UnlinkedFunctionExecutable should fit in a 256-byte cell.");
+
+const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) };
+
+static UnlinkedFunctionCodeBlock* generateUnlinkedFunctionCodeBlock(
+    VM& vm, UnlinkedFunctionExecutable* executable, const SourceCode& source,
+    CodeSpecializationKind kind, DebuggerMode debuggerMode,
+    UnlinkedFunctionKind functionKind, ParserError& error, SourceParseMode parseMode)
+{
+    JSParserBuiltinMode builtinMode = executable->isBuiltinFunction() ? JSParserBuiltinMode::Builtin : JSParserBuiltinMode::NotBuiltin;
+    JSParserStrictMode strictMode = executable->isInStrictContext() ? JSParserStrictMode::Strict : JSParserStrictMode::NotStrict;
+    JSParserScriptMode scriptMode = executable->scriptMode();
+    ASSERT(isFunctionParseMode(executable->parseMode()));
+    std::unique_ptr function = parse(
+        &vm, source, executable->name(), builtinMode, strictMode, scriptMode, executable->parseMode(), executable->superBinding(), error, nullptr);
+
+    if (!function) {
+        ASSERT(error.isValid());
+        return nullptr;
+    }
+
+    function->finishParsing(executable->name(), executable->functionMode());
+    executable->recordParse(function->features(), function->hasCapturedVariables());
+
+    bool isClassContext = executable->superBinding() == SuperBinding::Needed;
+
+    UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode, ExecutableInfo(function->usesEval(), function->isStrictMode(), kind == CodeForConstruct, functionKind == UnlinkedBuiltinFunction, executable->constructorKind(), scriptMode, executable->superBinding(), parseMode, executable->derivedContextType(), false, isClassContext, EvalContextType::FunctionEvalContext), debuggerMode);
+
+    error = BytecodeGenerator::generate(vm, function.get(), result, debuggerMode, executable->parentScopeTDZVariables());
+
+    if (error.isValid())
+        return nullptr;
+    return result;
+}
+
+UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& parentSource, SourceCode&& parentSourceOverride, FunctionMetadataNode* node, UnlinkedFunctionKind kind, ConstructAbility constructAbility, JSParserScriptMode scriptMode, VariableEnvironment& parentScopeTDZVariables, DerivedContextType derivedContextType)
+    : Base(*vm, structure)
+    , m_firstLineOffset(node->firstLine() - parentSource.firstLine().oneBasedInt())
+    , m_lineCount(node->lastLine() - node->firstLine())
+    , m_unlinkedFunctionNameStart(node->functionNameStart() - parentSource.startOffset())
+    , m_unlinkedBodyStartColumn(node->startColumn())
+    , m_unlinkedBodyEndColumn(m_lineCount ? node->endColumn() : node->endColumn() - node->startColumn())
+    , m_startOffset(node->source().startOffset() - parentSource.startOffset())
+    , m_sourceLength(node->source().length())
+    , m_parametersStartOffset(node->parametersStart())
+    , m_typeProfilingStartOffset(node->functionKeywordStart())
+    , m_typeProfilingEndOffset(node->startStartOffset() + node->source().length() - 1)
+    , m_parameterCount(node->parameterCount())
+    , m_features(0)
+    , m_sourceParseMode(node->parseMode())
+    , m_isInStrictContext(node->isInStrictContext())
+    , m_hasCapturedVariables(false)
+    , m_isBuiltinFunction(kind == UnlinkedBuiltinFunction)
+    , m_constructAbility(static_cast(constructAbility))
+    , m_constructorKind(static_cast(node->constructorKind()))
+    , m_functionMode(static_cast(node->functionMode()))
+    , m_scriptMode(static_cast(scriptMode))
+    , m_superBinding(static_cast(node->superBinding()))
+    , m_derivedContextType(static_cast(derivedContextType))
+    , m_name(node->ident())
+    , m_ecmaName(node->ecmaName())
+    , m_inferredName(node->inferredName())
+    , m_parentSourceOverride(WTFMove(parentSourceOverride))
+    , m_classSource(node->classSource())
+{
+    // Make sure these bitfields are adequately wide.
+    ASSERT(m_constructAbility == static_cast(constructAbility));
+    ASSERT(m_constructorKind == static_cast(node->constructorKind()));
+    ASSERT(m_functionMode == static_cast(node->functionMode()));
+    ASSERT(m_scriptMode == static_cast(scriptMode));
+    ASSERT(m_superBinding == static_cast(node->superBinding()));
+    ASSERT(m_derivedContextType == static_cast(derivedContextType));
+
+    m_parentScopeTDZVariables.swap(parentScopeTDZVariables);
+}
+
+void UnlinkedFunctionExecutable::destroy(JSCell* cell)
+{
+    static_cast(cell)->~UnlinkedFunctionExecutable();
+}
+
+void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+    UnlinkedFunctionExecutable* thisObject = jsCast(cell);
+    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+    Base::visitChildren(thisObject, visitor);
+    visitor.append(thisObject->m_unlinkedCodeBlockForCall);
+    visitor.append(thisObject->m_unlinkedCodeBlockForConstruct);
+}
+
+FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& passedParentSource, std::optional overrideLineNumber, Intrinsic intrinsic)
+{
+    const SourceCode& parentSource = m_parentSourceOverride.isNull() ? passedParentSource : m_parentSourceOverride;
+    unsigned firstLine = parentSource.firstLine().oneBasedInt() + m_firstLineOffset;
+    unsigned startOffset = parentSource.startOffset() + m_startOffset;
+    unsigned lineCount = m_lineCount;
+
+    unsigned startColumn = linkedStartColumn(parentSource.startColumn().oneBasedInt());
+    unsigned endColumn = linkedEndColumn(startColumn);
+
+    SourceCode source(parentSource.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn);
+    FunctionOverrides::OverrideInfo overrideInfo;
+    bool hasFunctionOverride = false;
+
+    if (UNLIKELY(Options::functionOverrides())) {
+        hasFunctionOverride = FunctionOverrides::initializeOverrideFor(source, overrideInfo);
+        if (UNLIKELY(hasFunctionOverride)) {
+            firstLine = overrideInfo.firstLine;
+            lineCount = overrideInfo.lineCount;
+            startColumn = overrideInfo.startColumn;
+            endColumn = overrideInfo.endColumn;
+            source = overrideInfo.sourceCode;
+        }
+    }
+
+    FunctionExecutable* result = FunctionExecutable::create(vm, source, this, firstLine + lineCount, endColumn, intrinsic);
+    if (overrideLineNumber)
+        result->setOverrideLineNumber(*overrideLineNumber);
+
+    if (UNLIKELY(hasFunctionOverride)) {
+        result->overrideParameterAndTypeProfilingStartEndOffsets(
+            overrideInfo.parametersStartOffset,
+            overrideInfo.typeProfilingStartOffset,
+            overrideInfo.typeProfilingEndOffset);
+    }
+
+    return result;
+}
+
+UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(
+    const Identifier& name, ExecState& exec, const SourceCode& source, 
+    JSObject*& exception, int overrideLineNumber)
+{
+    ParserError error;
+    VM& vm = exec.vm();
+    auto& globalObject = *exec.lexicalGlobalObject();
+    CodeCache* codeCache = vm.codeCache();
+    DebuggerMode debuggerMode = globalObject.hasInteractiveDebugger() ? DebuggerOn : DebuggerOff;
+    UnlinkedFunctionExecutable* executable = codeCache->getUnlinkedGlobalFunctionExecutable(vm, name, source, debuggerMode, error);
+
+    if (globalObject.hasDebugger())
+        globalObject.debugger()->sourceParsed(&exec, source.provider(), error.line(), error.message());
+
+    if (error.isValid()) {
+        exception = error.toErrorObject(&globalObject, source, overrideLineNumber);
+        return nullptr;
+    }
+
+    return executable;
+}
+
+UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::unlinkedCodeBlockFor(
+    VM& vm, const SourceCode& source, CodeSpecializationKind specializationKind, 
+    DebuggerMode debuggerMode, ParserError& error, SourceParseMode parseMode)
+{
+    switch (specializationKind) {
+    case CodeForCall:
+        if (UnlinkedFunctionCodeBlock* codeBlock = m_unlinkedCodeBlockForCall.get())
+            return codeBlock;
+        break;
+    case CodeForConstruct:
+        if (UnlinkedFunctionCodeBlock* codeBlock = m_unlinkedCodeBlockForConstruct.get())
+            return codeBlock;
+        break;
+    }
+
+    UnlinkedFunctionCodeBlock* result = generateUnlinkedFunctionCodeBlock(
+        vm, this, source, specializationKind, debuggerMode, 
+        isBuiltinFunction() ? UnlinkedBuiltinFunction : UnlinkedNormalFunction, 
+        error, parseMode);
+    
+    if (error.isValid())
+        return nullptr;
+
+    switch (specializationKind) {
+    case CodeForCall:
+        m_unlinkedCodeBlockForCall.set(vm, this, result);
+        break;
+    case CodeForConstruct:
+        m_unlinkedCodeBlockForConstruct.set(vm, this, result);
+        break;
+    }
+    return result;
+}
+
+void UnlinkedFunctionExecutable::setInvalidTypeProfilingOffsets()
+{
+    m_typeProfilingStartOffset = std::numeric_limits::max();
+    m_typeProfilingEndOffset = std::numeric_limits::max();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h
new file mode 100644
index 000000000..9c258505b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeConventions.h"
+#include "CodeSpecializationKind.h"
+#include "CodeType.h"
+#include "ConstructAbility.h"
+#include "ExecutableInfo.h"
+#include "ExpressionRangeInfo.h"
+#include "HandlerInfo.h"
+#include "Identifier.h"
+#include "Intrinsic.h"
+#include "JSCell.h"
+#include "JSString.h"
+#include "ParserModes.h"
+#include "RegExp.h"
+#include "SpecialPointer.h"
+#include "VariableEnvironment.h"
+#include "VirtualRegister.h"
+
+namespace JSC {
+
+class FunctionMetadataNode;
+class FunctionExecutable;
+class ParserError;
+class SourceCode;
+class SourceProvider;
+class UnlinkedFunctionCodeBlock;
+
+enum UnlinkedFunctionKind {
+    UnlinkedNormalFunction,
+    UnlinkedBuiltinFunction,
+};
+
+class UnlinkedFunctionExecutable final : public JSCell {
+public:
+    friend class CodeCache;
+    friend class VM;
+
+    typedef JSCell Base;
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionMetadataNode* node, UnlinkedFunctionKind unlinkedFunctionKind, ConstructAbility constructAbility, JSParserScriptMode scriptMode, VariableEnvironment& parentScopeTDZVariables, DerivedContextType derivedContextType, SourceCode&& parentSourceOverride = SourceCode())
+    {
+        UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell(vm->heap))
+            UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, WTFMove(parentSourceOverride), node, unlinkedFunctionKind, constructAbility, scriptMode, parentScopeTDZVariables, derivedContextType);
+        instance->finishCreation(*vm);
+        return instance;
+    }
+
+    const Identifier& name() const { return m_name; }
+    const Identifier& ecmaName() const { return m_ecmaName; }
+    void setEcmaName(const Identifier& name) { m_ecmaName = name; }
+    const Identifier& inferredName() const { return m_inferredName; }
+    unsigned parameterCount() const { return m_parameterCount; }; // Excluding 'this'!
+    SourceParseMode parseMode() const { return static_cast(m_sourceParseMode); };
+
+    const SourceCode& classSource() const { return m_classSource; };
+    void setClassSource(const SourceCode& source) { m_classSource = source; };
+
+    bool isInStrictContext() const { return m_isInStrictContext; }
+    FunctionMode functionMode() const { return static_cast(m_functionMode); }
+    ConstructorKind constructorKind() const { return static_cast(m_constructorKind); }
+    SuperBinding superBinding() const { return static_cast(m_superBinding); }
+
+    unsigned lineCount() const { return m_lineCount; }
+    unsigned linkedStartColumn(unsigned parentStartColumn) const { return m_unlinkedBodyStartColumn + (!m_firstLineOffset ? parentStartColumn : 1); }
+    unsigned linkedEndColumn(unsigned startColumn) const { return m_unlinkedBodyEndColumn + (!m_lineCount ? startColumn : 1); }
+
+    unsigned unlinkedFunctionNameStart() const { return m_unlinkedFunctionNameStart; }
+    unsigned unlinkedBodyStartColumn() const { return m_unlinkedBodyStartColumn; }
+    unsigned unlinkedBodyEndColumn() const { return m_unlinkedBodyEndColumn; }
+    unsigned startOffset() const { return m_startOffset; }
+    unsigned sourceLength() { return m_sourceLength; }
+    unsigned parametersStartOffset() const { return m_parametersStartOffset; }
+    unsigned typeProfilingStartOffset() const { return m_typeProfilingStartOffset; }
+    unsigned typeProfilingEndOffset() const { return m_typeProfilingEndOffset; }
+    void setInvalidTypeProfilingOffsets();
+
+    UnlinkedFunctionCodeBlock* unlinkedCodeBlockFor(
+        VM&, const SourceCode&, CodeSpecializationKind, DebuggerMode,
+        ParserError&, SourceParseMode);
+
+    static UnlinkedFunctionExecutable* fromGlobalCode(
+        const Identifier&, ExecState&, const SourceCode&, JSObject*& exception, 
+        int overrideLineNumber);
+
+    JS_EXPORT_PRIVATE FunctionExecutable* link(VM&, const SourceCode& parentSource, std::optional overrideLineNumber = std::nullopt, Intrinsic = NoIntrinsic);
+
+    void clearCode()
+    {
+        m_unlinkedCodeBlockForCall.clear();
+        m_unlinkedCodeBlockForConstruct.clear();
+    }
+
+    void recordParse(CodeFeatures features, bool hasCapturedVariables)
+    {
+        m_features = features;
+        m_hasCapturedVariables = hasCapturedVariables;
+    }
+
+    CodeFeatures features() const { return m_features; }
+    bool hasCapturedVariables() const { return m_hasCapturedVariables; }
+
+    static const bool needsDestruction = true;
+    static void destroy(JSCell*);
+
+    bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+    ConstructAbility constructAbility() const { return static_cast(m_constructAbility); }
+    JSParserScriptMode scriptMode() const { return static_cast(m_scriptMode); }
+    bool isClassConstructorFunction() const { return constructorKind() != ConstructorKind::None; }
+    const VariableEnvironment* parentScopeTDZVariables() const { return &m_parentScopeTDZVariables; }
+    
+    bool isArrowFunction() const { return isArrowFunctionParseMode(parseMode()); }
+
+    JSC::DerivedContextType derivedContextType() const {return static_cast(m_derivedContextType); }
+
+    const String& sourceURLDirective() const { return m_sourceURLDirective; }
+    const String& sourceMappingURLDirective() const { return m_sourceMappingURLDirective; }
+    void setSourceURLDirective(const String& sourceURL) { m_sourceURLDirective = sourceURL; }
+    void setSourceMappingURLDirective(const String& sourceMappingURL) { m_sourceMappingURLDirective = sourceMappingURL; }
+
+private:
+    UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, SourceCode&& parentSourceOverride, FunctionMetadataNode*, UnlinkedFunctionKind, ConstructAbility, JSParserScriptMode, VariableEnvironment&,  JSC::DerivedContextType);
+
+    unsigned m_firstLineOffset;
+    unsigned m_lineCount;
+    unsigned m_unlinkedFunctionNameStart;
+    unsigned m_unlinkedBodyStartColumn;
+    unsigned m_unlinkedBodyEndColumn;
+    unsigned m_startOffset;
+    unsigned m_sourceLength;
+    unsigned m_parametersStartOffset;
+    unsigned m_typeProfilingStartOffset;
+    unsigned m_typeProfilingEndOffset;
+    unsigned m_parameterCount;
+    CodeFeatures m_features;
+    SourceParseMode m_sourceParseMode;
+    unsigned m_isInStrictContext : 1;
+    unsigned m_hasCapturedVariables : 1;
+    unsigned m_isBuiltinFunction : 1;
+    unsigned m_constructAbility: 1;
+    unsigned m_constructorKind : 2;
+    unsigned m_functionMode : 2; // FunctionMode
+    unsigned m_scriptMode: 1; // JSParserScriptMode
+    unsigned m_superBinding : 1;
+    unsigned m_derivedContextType: 2;
+
+    WriteBarrier m_unlinkedCodeBlockForCall;
+    WriteBarrier m_unlinkedCodeBlockForConstruct;
+
+    Identifier m_name;
+    Identifier m_ecmaName;
+    Identifier m_inferredName;
+    SourceCode m_parentSourceOverride;
+    SourceCode m_classSource;
+
+    String m_sourceURLDirective;
+    String m_sourceMappingURLDirective;
+
+    VariableEnvironment m_parentScopeTDZVariables;
+
+protected:
+    static void visitChildren(JSCell*, SlotVisitor&);
+
+public:
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+    {
+        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), info());
+    }
+
+    DECLARE_EXPORT_INFO;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedGlobalCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedGlobalCodeBlock.h
new file mode 100644
index 000000000..343862e64
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedGlobalCodeBlock.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedGlobalCodeBlock : public UnlinkedCodeBlock {
+public:
+    typedef UnlinkedCodeBlock Base;
+
+protected:
+    UnlinkedGlobalCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
+        : Base(vm, structure, codeType, info, debuggerMode)
+    {
+    }
+};
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp
index 2e07f4f47..e8762ff66 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp
+++ b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp
@@ -26,82 +26,9 @@
 #include "config.h"
 #include "UnlinkedInstructionStream.h"
 
-namespace JSC {
+#include "Opcode.h"
 
-// Unlinked instructions are packed in a simple stream format.
-//
-// The first byte is always the opcode.
-// It's followed by an opcode-dependent number of argument values.
-// The first 3 bits of each value determines the format:
-//
-//     5-bit positive integer (1 byte total)
-//     5-bit negative integer (1 byte total)
-//     13-bit positive integer (2 bytes total)
-//     13-bit negative integer (2 bytes total)
-//     5-bit constant register index, based at 0x40000000 (1 byte total)
-//     13-bit constant register index, based at 0x40000000 (2 bytes total)
-//     32-bit raw value (5 bytes total)
-
-enum PackedValueType {
-    Positive5Bit = 0,
-    Negative5Bit,
-    Positive13Bit,
-    Negative13Bit,
-    ConstantRegister5Bit,
-    ConstantRegister13Bit,
-    Full32Bit
-};
-
-UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream)
-    : m_stream(stream)
-    , m_index(0)
-{
-}
-
-inline unsigned char UnlinkedInstructionStream::Reader::read8()
-{
-    return m_stream.m_data.data()[m_index++];
-}
-
-inline unsigned UnlinkedInstructionStream::Reader::read32()
-{
-    const unsigned char* data = &m_stream.m_data.data()[m_index];
-    unsigned char type = data[0] >> 5;
-
-    switch (type) {
-    case Positive5Bit:
-        m_index++;
-        return data[0];
-    case Negative5Bit:
-        m_index++;
-        return 0xffffffe0 | data[0];
-    case Positive13Bit:
-        m_index += 2;
-        return ((data[0] & 0x1F) << 8) | data[1];
-    case Negative13Bit:
-        m_index += 2;
-        return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1];
-    case ConstantRegister5Bit:
-        m_index++;
-        return 0x40000000 | (data[0] & 0x1F);
-    case ConstantRegister13Bit:
-        m_index += 2;
-        return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1];
-    default:
-        ASSERT(type == Full32Bit);
-        m_index += 5;
-        return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24;
-    }
-}
-
-const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next()
-{
-    m_unpackedBuffer[0].u.opcode = static_cast(read8());
-    unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode);
-    for (unsigned i = 1; i < opLength; ++i)
-        m_unpackedBuffer[i].u.index = read32();
-    return m_unpackedBuffer;
-}
+namespace JSC {
 
 static void append8(unsigned char*& ptr, unsigned char value)
 {
@@ -150,7 +77,7 @@ static void append32(unsigned char*& ptr, unsigned value)
     *(ptr++) = (value >> 24) & 0xff;
 }
 
-UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector& instructions)
+UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector& instructions)
     : m_instructionCount(instructions.size())
 {
     Vector buffer;
@@ -177,6 +104,11 @@ UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector(buffer);
 }
 
+size_t UnlinkedInstructionStream::sizeInBytes() const
+{
+    return m_data.size() * sizeof(unsigned char);
+}
+
 #ifndef NDEBUG
 const RefCountedArray& UnlinkedInstructionStream::unpackForDebugging() const
 {
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h
index 5a919a29e..ef139adf7 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h
+++ b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h
@@ -24,19 +24,21 @@
  */
 
 
-#ifndef UnlinkedInstructionStream_h
-#define UnlinkedInstructionStream_h
+#pragma once
 
+#include "Opcode.h"
 #include "UnlinkedCodeBlock.h"
 #include 
 
 namespace JSC {
 
 class UnlinkedInstructionStream {
+    WTF_MAKE_FAST_ALLOCATED;
 public:
-    explicit UnlinkedInstructionStream(const Vector&);
+    explicit UnlinkedInstructionStream(const Vector&);
 
     unsigned count() const { return m_instructionCount; }
+    size_t sizeInBytes() const;
 
     class Reader {
     public:
@@ -69,6 +71,79 @@ private:
     unsigned m_instructionCount;
 };
 
-} // namespace JSC
+// Unlinked instructions are packed in a simple stream format.
+//
+// The first byte is always the opcode.
+// It's followed by an opcode-dependent number of argument values.
+// The first 3 bits of each value determines the format:
+//
+//     5-bit positive integer (1 byte total)
+//     5-bit negative integer (1 byte total)
+//     13-bit positive integer (2 bytes total)
+//     13-bit negative integer (2 bytes total)
+//     5-bit constant register index, based at 0x40000000 (1 byte total)
+//     13-bit constant register index, based at 0x40000000 (2 bytes total)
+//     32-bit raw value (5 bytes total)
+
+enum PackedValueType {
+    Positive5Bit = 0,
+    Negative5Bit,
+    Positive13Bit,
+    Negative13Bit,
+    ConstantRegister5Bit,
+    ConstantRegister13Bit,
+    Full32Bit
+};
 
-#endif // UnlinkedInstructionStream_h
+ALWAYS_INLINE UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream)
+    : m_stream(stream)
+    , m_index(0)
+{
+}
+
+ALWAYS_INLINE unsigned char UnlinkedInstructionStream::Reader::read8()
+{
+    return m_stream.m_data.data()[m_index++];
+}
+
+ALWAYS_INLINE unsigned UnlinkedInstructionStream::Reader::read32()
+{
+    const unsigned char* data = &m_stream.m_data.data()[m_index];
+    unsigned char type = data[0] >> 5;
+
+    switch (type) {
+    case Positive5Bit:
+        m_index++;
+        return data[0];
+    case Negative5Bit:
+        m_index++;
+        return 0xffffffe0 | data[0];
+    case Positive13Bit:
+        m_index += 2;
+        return ((data[0] & 0x1F) << 8) | data[1];
+    case Negative13Bit:
+        m_index += 2;
+        return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1];
+    case ConstantRegister5Bit:
+        m_index++;
+        return 0x40000000 | (data[0] & 0x1F);
+    case ConstantRegister13Bit:
+        m_index += 2;
+        return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1];
+    default:
+        ASSERT(type == Full32Bit);
+        m_index += 5;
+        return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24;
+    }
+}
+
+ALWAYS_INLINE const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next()
+{
+    m_unpackedBuffer[0].u.opcode = static_cast(read8());
+    unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode);
+    for (unsigned i = 1; i < opLength; ++i)
+        m_unpackedBuffer[i].u.index = read32();
+    return m_unpackedBuffer;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.cpp
new file mode 100644
index 000000000..00f36c0ac
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+
+#include "HeapInlines.h"
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedModuleProgramCodeBlock::s_info = { "UnlinkedModuleProgramCodeBlock", &Base::s_info, nullptr, CREATE_METHOD_TABLE(UnlinkedModuleProgramCodeBlock) };
+
+void UnlinkedModuleProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+    UnlinkedModuleProgramCodeBlock* thisObject = jsCast(cell);
+    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+    Base::visitChildren(thisObject, visitor);
+}
+
+void UnlinkedModuleProgramCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~UnlinkedModuleProgramCodeBlock();
+}
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.h
new file mode 100644
index 000000000..8676a2438
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedGlobalCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedModuleProgramCodeBlock final : public UnlinkedGlobalCodeBlock {
+public:
+    typedef UnlinkedGlobalCodeBlock Base;
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    static UnlinkedModuleProgramCodeBlock* create(VM* vm, const ExecutableInfo& info, DebuggerMode debuggerMode)
+    {
+        UnlinkedModuleProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedModuleProgramCodeBlock(vm, vm->unlinkedModuleProgramCodeBlockStructure.get(), info, debuggerMode);
+        instance->finishCreation(*vm);
+        return instance;
+    }
+
+    static void destroy(JSCell*);
+
+    static void visitChildren(JSCell*, SlotVisitor&);
+
+    // This offset represents the constant register offset to the stored symbol table that represents the layout of the
+    // module environment. This symbol table is created by the byte code generator since the module environment includes
+    // the top-most lexical captured variables inside the module code. This means that, once the module environment is
+    // allocated and instantiated from this symbol table, it is titely coupled with the specific unlinked module program
+    // code block and the stored symbol table. So before executing the module code, we should not clear the unlinked module
+    // program code block in the module executable. This requirement is met because the garbage collector only clears
+    // unlinked code in (1) unmarked executables and (2) function executables.
+    //
+    // Since the function code may be executed repeatedly and the environment of each function execution is different,
+    // the function code need to allocate and instantiate the environment in the prologue of the function code. On the
+    // other hand, the module code is executed only once. So we can instantiate the module environment outside the module
+    // code. At that time, we construct the module environment by using the symbol table that is held by the module executable.
+    // The symbol table held by the executable is the cloned one from one in the unlinked code block. Instantiating the module
+    // environment before executing and linking the module code is required to link the imported bindings between the modules.
+    //
+    // The unlinked module program code block only holds the pre-cloned symbol table in its constant register pool. It does
+    // not hold the instantiated module environment. So while the module environment requires the specific unlinked module
+    // program code block, the unlinked module code block can be used for the module environment instantiated from this
+    // unlinked code block. There is 1:N relation between the unlinked module code block and the module environments. So the
+    // unlinked module program code block can be cached.
+    //
+    // On the other hand, the linked code block for the module environment includes the resolved references to the imported
+    // bindings. The imported binding references the other module environment, so the linked code block is titly coupled
+    // with the specific set of the module environments. Thus, the linked code block should not be cached.
+    int moduleEnvironmentSymbolTableConstantRegisterOffset() { return m_moduleEnvironmentSymbolTableConstantRegisterOffset; }
+    void setModuleEnvironmentSymbolTableConstantRegisterOffset(int offset)
+    {
+        m_moduleEnvironmentSymbolTableConstantRegisterOffset = offset;
+    }
+
+private:
+    UnlinkedModuleProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info, DebuggerMode debuggerMode)
+        : Base(vm, structure, ModuleCode, info, debuggerMode)
+    {
+    }
+
+    int m_moduleEnvironmentSymbolTableConstantRegisterOffset { 0 };
+
+public:
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+    {
+        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedModuleProgramCodeBlockType, StructureFlags), info());
+    }
+
+    DECLARE_INFO;
+};
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.cpp
new file mode 100644
index 000000000..95df29990
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedProgramCodeBlock.h"
+
+#include "HeapInlines.h"
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) };
+
+void UnlinkedProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+    UnlinkedProgramCodeBlock* thisObject = jsCast(cell);
+    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+    Base::visitChildren(thisObject, visitor);
+}
+
+void UnlinkedProgramCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~UnlinkedProgramCodeBlock();
+}
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.h
new file mode 100644
index 000000000..290eae47f
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedGlobalCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedProgramCodeBlock final : public UnlinkedGlobalCodeBlock {
+public:
+    typedef UnlinkedGlobalCodeBlock Base;
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    static UnlinkedProgramCodeBlock* create(VM* vm, const ExecutableInfo& info, DebuggerMode debuggerMode)
+    {
+        UnlinkedProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedProgramCodeBlock(vm, vm->unlinkedProgramCodeBlockStructure.get(), info, debuggerMode);
+        instance->finishCreation(*vm);
+        return instance;
+    }
+
+    static void destroy(JSCell*);
+
+    void setVariableDeclarations(const VariableEnvironment& environment) { m_varDeclarations = environment; }
+    const VariableEnvironment& variableDeclarations() const { return m_varDeclarations; }
+
+    void setLexicalDeclarations(const VariableEnvironment& environment) { m_lexicalDeclarations = environment; }
+    const VariableEnvironment& lexicalDeclarations() const { return m_lexicalDeclarations; }
+
+    static void visitChildren(JSCell*, SlotVisitor&);
+
+private:
+    UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info, DebuggerMode debuggerMode)
+        : Base(vm, structure, GlobalCode, info, debuggerMode)
+    {
+    }
+
+    VariableEnvironment m_varDeclarations;
+    VariableEnvironment m_lexicalDeclarations;
+
+public:
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+    {
+        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), info());
+    }
+
+    DECLARE_INFO;
+};
+
+}
diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.h b/Source/JavaScriptCore/bytecode/ValueProfile.h
index 0790f79da..8724eb47d 100644
--- a/Source/JavaScriptCore/bytecode/ValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/ValueProfile.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2013, 2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -26,14 +26,14 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef ValueProfile_h
-#define ValueProfile_h
+#pragma once
 
-#include "ConcurrentJITLock.h"
+#include "ConcurrentJSLock.h"
 #include "Heap.h"
 #include "JSArray.h"
 #include "SpeculatedType.h"
 #include "Structure.h"
+#include "TagRegistersMode.h"
 #include "WriteBarrier.h"
 #include 
 #include 
@@ -106,7 +106,7 @@ struct ValueProfileBase {
         return false;
     }
     
-    CString briefDescription(const ConcurrentJITLocker& locker)
+    CString briefDescription(const ConcurrentJSLocker& locker)
     {
         computeUpdatedPrediction(locker);
         
@@ -134,7 +134,7 @@ struct ValueProfileBase {
     
     // Updates the prediction and returns the new one. Never call this from any thread
     // that isn't executing the code.
-    SpeculatedType computeUpdatedPrediction(const ConcurrentJITLocker&)
+    SpeculatedType computeUpdatedPrediction(const ConcurrentJSLocker&)
     {
         for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
             JSValue value = JSValue::decode(m_buckets[i]);
@@ -207,6 +207,3 @@ inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile)
 }
 
 } // namespace JSC
-
-#endif // ValueProfile_h
-
diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.cpp b/Source/JavaScriptCore/bytecode/ValueRecovery.cpp
index 5032684dd..9c083b04a 100644
--- a/Source/JavaScriptCore/bytecode/ValueRecovery.cpp
+++ b/Source/JavaScriptCore/bytecode/ValueRecovery.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -27,7 +27,7 @@
 #include "ValueRecovery.h"
 
 #include "CodeBlock.h"
-#include "Operations.h"
+#include "JSCInlines.h"
 
 namespace JSC {
 
@@ -86,34 +86,40 @@ void ValueRecovery::dumpInContext(PrintStream& out, DumpContext* context) const
     case InFPR:
         out.print(fpr());
         return;
+    case UnboxedDoubleInFPR:
+        out.print("double(", fpr(), ")");
+        return;
 #if USE(JSVALUE32_64)
     case InPair:
         out.print("pair(", tagGPR(), ", ", payloadGPR(), ")");
         return;
 #endif
     case DisplacedInJSStack:
-        out.printf("*%d", virtualRegister().offset());
+        out.print("*", virtualRegister());
         return;
     case Int32DisplacedInJSStack:
-        out.printf("*int32(%d)", virtualRegister().offset());
+        out.print("*int32(", virtualRegister(), ")");
         return;
     case Int52DisplacedInJSStack:
-        out.printf("*int52(%d)", virtualRegister().offset());
+        out.print("*int52(", virtualRegister(), ")");
         return;
     case StrictInt52DisplacedInJSStack:
-        out.printf("*strictInt52(%d)", virtualRegister().offset());
+        out.print("*strictInt52(", virtualRegister(), ")");
         return;
     case DoubleDisplacedInJSStack:
-        out.printf("*double(%d)", virtualRegister().offset());
+        out.print("*double(", virtualRegister(), ")");
         return;
     case CellDisplacedInJSStack:
-        out.printf("*cell(%d)", virtualRegister().offset());
+        out.print("*cell(", virtualRegister(), ")");
         return;
     case BooleanDisplacedInJSStack:
-        out.printf("*bool(%d)", virtualRegister().offset());
+        out.print("*bool(", virtualRegister(), ")");
+        return;
+    case DirectArgumentsThatWereNotCreated:
+        out.print("DirectArguments(", nodeID(), ")");
         return;
-    case ArgumentsThatWereNotCreated:
-        out.printf("arguments");
+    case ClonedArgumentsThatWereNotCreated:
+        out.print("ClonedArguments(", nodeID(), ")");
         return;
     case Constant:
         out.print("[", inContext(constant(), context), "]");
diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.h b/Source/JavaScriptCore/bytecode/ValueRecovery.h
index 3af2c3409..c98fd2075 100644
--- a/Source/JavaScriptCore/bytecode/ValueRecovery.h
+++ b/Source/JavaScriptCore/bytecode/ValueRecovery.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,22 +23,23 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef ValueRecovery_h
-#define ValueRecovery_h
+#pragma once
 
+#include "DFGMinifiedID.h"
 #include "DataFormat.h"
 #if ENABLE(JIT)
 #include "GPRInfo.h"
 #include "FPRInfo.h"
+#include "Reg.h"
 #endif
 #include "JSCJSValue.h"
 #include "MacroAssembler.h"
 #include "VirtualRegister.h"
-#include 
 
 namespace JSC {
 
 struct DumpContext;
+struct InlineCallFrame;
 
 // Describes how to recover a given bytecode virtual register at a given
 // code point.
@@ -54,6 +55,7 @@ enum ValueRecoveryTechnique {
     InPair,
 #endif
     InFPR,
+    UnboxedDoubleInFPR,
     // It's in the stack, but at a different location.
     DisplacedInJSStack,
     // It's in the stack, at a different location, and it's unboxed.
@@ -63,8 +65,9 @@ enum ValueRecoveryTechnique {
     DoubleDisplacedInJSStack,
     CellDisplacedInJSStack,
     BooleanDisplacedInJSStack,
-    // It's an Arguments object.
-    ArgumentsThatWereNotCreated,
+    // It's an Arguments object. This arises because of the simplified arguments simplification done by the DFG.
+    DirectArgumentsThatWereNotCreated,
+    ClonedArgumentsThatWereNotCreated,
     // It's a constant.
     Constant,
     // Don't know how to recover it.
@@ -80,6 +83,19 @@ public:
     
     bool isSet() const { return m_technique != DontKnow; }
     bool operator!() const { return !isSet(); }
+
+#if ENABLE(JIT)
+    static ValueRecovery inRegister(Reg reg, DataFormat dataFormat)
+    {
+        if (reg.isGPR())
+            return inGPR(reg.gpr(), dataFormat);
+
+        ASSERT(reg.isFPR());
+        return inFPR(reg.fpr(), dataFormat);
+    }
+#endif
+
+    explicit operator bool() const { return isSet(); }
     
     static ValueRecovery inGPR(MacroAssembler::RegisterID gpr, DataFormat dataFormat)
     {
@@ -115,10 +131,14 @@ public:
     }
 #endif
 
-    static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr)
+    static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr, DataFormat dataFormat)
     {
+        ASSERT(dataFormat == DataFormatDouble || dataFormat & DataFormatJS);
         ValueRecovery result;
-        result.m_technique = InFPR;
+        if (dataFormat == DataFormatDouble)
+            result.m_technique = UnboxedDoubleInFPR;
+        else
+            result.m_technique = InFPR;
         result.m_source.fpr = fpr;
         return result;
     }
@@ -168,18 +188,27 @@ public:
         return result;
     }
     
-    static ValueRecovery argumentsThatWereNotCreated()
+    static ValueRecovery directArgumentsThatWereNotCreated(DFG::MinifiedID id)
     {
         ValueRecovery result;
-        result.m_technique = ArgumentsThatWereNotCreated;
+        result.m_technique = DirectArgumentsThatWereNotCreated;
+        result.m_source.nodeID = id.bits();
         return result;
     }
     
+    static ValueRecovery clonedArgumentsThatWereNotCreated(DFG::MinifiedID id)
+    {
+        ValueRecovery result;
+        result.m_technique = ClonedArgumentsThatWereNotCreated;
+        result.m_source.nodeID = id.bits();
+        return result;
+    }
+
     ValueRecoveryTechnique technique() const { return m_technique; }
     
     bool isConstant() const { return m_technique == Constant; }
-    
-    bool isInRegisters() const
+
+    bool isInGPR() const
     {
         switch (m_technique) {
         case InGPR:
@@ -188,19 +217,81 @@ public:
         case UnboxedCellInGPR:
         case UnboxedInt52InGPR:
         case UnboxedStrictInt52InGPR:
-#if USE(JSVALUE32_64)
-        case InPair:
-#endif
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool isInFPR() const
+    {
+        switch (m_technique) {
         case InFPR:
+        case UnboxedDoubleInFPR:
             return true;
         default:
             return false;
         }
     }
+
+    bool isInRegisters() const
+    {
+        return isInJSValueRegs() || isInGPR() || isInFPR();
+    }
+
+    bool isInJSStack() const
+    {
+        switch (m_technique) {
+        case DisplacedInJSStack:
+        case Int32DisplacedInJSStack:
+        case Int52DisplacedInJSStack:
+        case StrictInt52DisplacedInJSStack:
+        case DoubleDisplacedInJSStack:
+        case CellDisplacedInJSStack:
+        case BooleanDisplacedInJSStack:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    DataFormat dataFormat() const
+    {
+        switch (m_technique) {
+        case InGPR:
+        case InFPR:
+        case DisplacedInJSStack:
+        case Constant:
+#if USE(JSVALUE32_64)
+        case InPair:
+#endif
+            return DataFormatJS;
+        case UnboxedInt32InGPR:
+        case Int32DisplacedInJSStack:
+            return DataFormatInt32;
+        case UnboxedInt52InGPR:
+        case Int52DisplacedInJSStack:
+            return DataFormatInt52;
+        case UnboxedStrictInt52InGPR:
+        case StrictInt52DisplacedInJSStack:
+            return DataFormatStrictInt52;
+        case UnboxedBooleanInGPR:
+        case BooleanDisplacedInJSStack:
+            return DataFormatBoolean;
+        case UnboxedCellInGPR:
+        case CellDisplacedInJSStack:
+            return DataFormatCell;
+        case UnboxedDoubleInFPR:
+        case DoubleDisplacedInJSStack:
+            return DataFormatDouble;
+        default:
+            return DataFormatNone;
+        }
+    }
     
     MacroAssembler::RegisterID gpr() const
     {
-        ASSERT(m_technique == InGPR || m_technique == UnboxedInt32InGPR || m_technique == UnboxedBooleanInGPR || m_technique == UnboxedInt52InGPR || m_technique == UnboxedStrictInt52InGPR || m_technique == UnboxedCellInGPR);
+        ASSERT(isInGPR());
         return m_source.gpr;
     }
     
@@ -216,29 +307,101 @@ public:
         ASSERT(m_technique == InPair);
         return m_source.pair.payloadGPR;
     }
-#endif
+
+    bool isInJSValueRegs() const
+    {
+        return m_technique == InPair;
+    }
+
+#if ENABLE(JIT)
+    JSValueRegs jsValueRegs() const
+    {
+        ASSERT(isInJSValueRegs());
+        return JSValueRegs(tagGPR(), payloadGPR());
+    }
+#endif // ENABLE(JIT)
+#else
+    bool isInJSValueRegs() const
+    {
+        return isInGPR();
+    }
+#endif // USE(JSVALUE32_64)
     
     MacroAssembler::FPRegisterID fpr() const
     {
-        ASSERT(m_technique == InFPR);
+        ASSERT(isInFPR());
         return m_source.fpr;
     }
     
     VirtualRegister virtualRegister() const
     {
-        ASSERT(m_technique == DisplacedInJSStack || m_technique == Int32DisplacedInJSStack || m_technique == DoubleDisplacedInJSStack || m_technique == CellDisplacedInJSStack || m_technique == BooleanDisplacedInJSStack || m_technique == Int52DisplacedInJSStack || m_technique == StrictInt52DisplacedInJSStack);
+        ASSERT(isInJSStack());
         return VirtualRegister(m_source.virtualReg);
     }
     
+    ValueRecovery withLocalsOffset(int offset) const
+    {
+        switch (m_technique) {
+        case DisplacedInJSStack:
+        case Int32DisplacedInJSStack:
+        case DoubleDisplacedInJSStack:
+        case CellDisplacedInJSStack:
+        case BooleanDisplacedInJSStack:
+        case Int52DisplacedInJSStack:
+        case StrictInt52DisplacedInJSStack: {
+            ValueRecovery result;
+            result.m_technique = m_technique;
+            result.m_source.virtualReg = m_source.virtualReg + offset;
+            return result;
+        }
+            
+        default:
+            return *this;
+        }
+    }
+    
     JSValue constant() const
     {
-        ASSERT(m_technique == Constant);
+        ASSERT(isConstant());
         return JSValue::decode(m_source.constant);
     }
     
+    DFG::MinifiedID nodeID() const
+    {
+        ASSERT(m_technique == DirectArgumentsThatWereNotCreated || m_technique == ClonedArgumentsThatWereNotCreated);
+        return DFG::MinifiedID::fromBits(m_source.nodeID);
+    }
+    
     JSValue recover(ExecState*) const;
     
 #if ENABLE(JIT)
+    template
+    void forEachReg(const Func& func)
+    {
+        switch (m_technique) {
+        case InGPR:
+        case UnboxedInt32InGPR:
+        case UnboxedBooleanInGPR:
+        case UnboxedCellInGPR:
+        case UnboxedInt52InGPR:
+        case UnboxedStrictInt52InGPR:
+            func(gpr());
+            return;
+        case InFPR:
+        case UnboxedDoubleInFPR:
+            func(fpr());
+            return;
+#if USE(JSVALUE32_64)
+        case InPair:
+            func(jsValueRegs().payloadGPR());
+            func(jsValueRegs().tagGPR());
+            return;
+#endif
+        default:
+            return;
+        }
+    }
+    
     void dumpInContext(PrintStream& out, DumpContext* context) const;
     void dump(PrintStream& out) const;
 #endif
@@ -256,9 +419,8 @@ private:
 #endif
         int virtualReg;
         EncodedJSValue constant;
+        uintptr_t nodeID;
     } m_source;
 };
 
 } // namespace JSC
-
-#endif // ValueRecovery_h
diff --git a/Source/JavaScriptCore/bytecode/VariableWatchpointSet.h b/Source/JavaScriptCore/bytecode/VariableWatchpointSet.h
deleted file mode 100644
index 4dec40495..000000000
--- a/Source/JavaScriptCore/bytecode/VariableWatchpointSet.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef VariableWatchpointSet_h
-#define VariableWatchpointSet_h
-
-#include "Watchpoint.h"
-#include "WriteBarrier.h"
-
-namespace JSC {
-
-class VariableWatchpointSet : public WatchpointSet {
-    friend class LLIntOffsetsExtractor;
-public:
-    VariableWatchpointSet()
-        : WatchpointSet(ClearWatchpoint)
-    {
-    }
-    
-    ~VariableWatchpointSet() { }
-    
-    // For the purpose of deciding whether or not to watch this variable, you only need
-    // to inspect inferredValue(). If this returns something other than the empty
-    // value, then it means that at all future safepoints, this watchpoint set will be
-    // in one of these states:
-    //
-    //    IsWatched: in this case, the variable's value must still be the
-    //        inferredValue.
-    //
-    //    IsInvalidated: in this case the variable's value may be anything but you'll
-    //        either notice that it's invalidated and not install the watchpoint, or
-    //        you will have been notified that the watchpoint was fired.
-    JSValue inferredValue() const { return m_inferredValue; }
-    
-    void notifyWrite(JSValue value)
-    {
-        ASSERT(!!value);
-        switch (state()) {
-        case ClearWatchpoint:
-            m_inferredValue = value;
-            startWatching();
-            return;
-
-        case IsWatched:
-            ASSERT(!!m_inferredValue);
-            if (value == m_inferredValue)
-                return;
-            invalidate();
-            return;
-            
-        case IsInvalidated:
-            ASSERT(!m_inferredValue);
-            return;
-        }
-        
-        ASSERT_NOT_REACHED();
-    }
-    
-    void invalidate()
-    {
-        m_inferredValue = JSValue();
-        WatchpointSet::invalidate();
-    }
-    
-    void finalizeUnconditionally()
-    {
-        ASSERT(!!m_inferredValue == (state() == IsWatched));
-        if (!m_inferredValue)
-            return;
-        if (!m_inferredValue.isCell())
-            return;
-        JSCell* cell = m_inferredValue.asCell();
-        if (Heap::isMarked(cell))
-            return;
-        invalidate();
-    }
-    
-    JSValue* addressOfInferredValue() { return &m_inferredValue; }
-
-private:
-    JSValue m_inferredValue;
-};
-
-} // namespace JSC
-
-#endif // VariableWatchpointSet_h
-
diff --git a/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp
new file mode 100644
index 000000000..ec6198449
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "VariableWriteFireDetail.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+void VariableWriteFireDetail::dump(PrintStream& out) const
+{
+    out.print("Write to ", m_name, " in ", JSValue(m_object));
+}
+
+void VariableWriteFireDetail::touch(VM& vm, WatchpointSet* set, JSObject* object, const PropertyName& name)
+{
+    set->touch(vm, VariableWriteFireDetail(object, name));
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h
new file mode 100644
index 000000000..42ffb1b59
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "Watchpoint.h"
+
+namespace JSC {
+
+class JSObject;
+class PropertyName;
+
+class VariableWriteFireDetail : public FireDetail {
+public:
+    VariableWriteFireDetail(JSObject* object, const PropertyName& name)
+        : m_object(object)
+        , m_name(name)
+    {
+    }
+    
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const override;
+    
+    JS_EXPORT_PRIVATE static void touch(VM&, WatchpointSet*, JSObject*, const PropertyName&);
+
+private:
+    JSObject* m_object;
+    const PropertyName& m_name;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.cpp b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp
new file mode 100644
index 000000000..57cdb62c9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "VirtualRegister.h"
+
+namespace JSC {
+
+void VirtualRegister::dump(PrintStream& out) const
+{
+    if (!isValid()) {
+        out.print("");
+        return;
+    }
+    
+    if (isHeader()) {
+        out.print("head", m_virtualRegister);
+        return;
+    }
+    
+    if (isConstant()) {
+        out.print("const", toConstantIndex());
+        return;
+    }
+    
+    if (isArgument()) {
+        if (!toArgument())
+            out.print("this");
+        else
+            out.print("arg", toArgument());
+        return;
+    }
+    
+    if (isLocal()) {
+        out.print("loc", toLocal());
+        return;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.h b/Source/JavaScriptCore/bytecode/VirtualRegister.h
index c63aee85f..f32e8d24f 100644
--- a/Source/JavaScriptCore/bytecode/VirtualRegister.h
+++ b/Source/JavaScriptCore/bytecode/VirtualRegister.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2015-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,12 +23,10 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef VirtualRegister_h
-#define VirtualRegister_h
+#pragma once
 
+#include "BytecodeConventions.h"
 #include "CallFrame.h"
-
-#include 
 #include 
 
 namespace JSC {
@@ -60,18 +58,51 @@ public:
     bool isValid() const { return (m_virtualRegister != s_invalidVirtualRegister); }
     bool isLocal() const { return operandIsLocal(m_virtualRegister); }
     bool isArgument() const { return operandIsArgument(m_virtualRegister); }
+    bool isHeader() const { return m_virtualRegister >= 0 && m_virtualRegister < CallFrameSlot::thisArgument; }
     bool isConstant() const { return m_virtualRegister >= s_firstConstantRegisterIndex; }
     int toLocal() const { ASSERT(isLocal()); return operandToLocal(m_virtualRegister); }
     int toArgument() const { ASSERT(isArgument()); return operandToArgument(m_virtualRegister); }
     int toConstantIndex() const { ASSERT(isConstant()); return m_virtualRegister - s_firstConstantRegisterIndex; }
     int offset() const { return m_virtualRegister; }
-
-    bool operator==(const VirtualRegister other) const { return m_virtualRegister == other.m_virtualRegister; }
-    bool operator!=(const VirtualRegister other) const { return m_virtualRegister != other.m_virtualRegister; }
+    int offsetInBytes() const { return m_virtualRegister * sizeof(Register); }
+
+    bool operator==(VirtualRegister other) const { return m_virtualRegister == other.m_virtualRegister; }
+    bool operator!=(VirtualRegister other) const { return m_virtualRegister != other.m_virtualRegister; }
+    bool operator<(VirtualRegister other) const { return m_virtualRegister < other.m_virtualRegister; }
+    bool operator>(VirtualRegister other) const { return m_virtualRegister > other.m_virtualRegister; }
+    bool operator<=(VirtualRegister other) const { return m_virtualRegister <= other.m_virtualRegister; }
+    bool operator>=(VirtualRegister other) const { return m_virtualRegister >= other.m_virtualRegister; }
+    
+    VirtualRegister operator+(int value) const
+    {
+        return VirtualRegister(offset() + value);
+    }
+    VirtualRegister operator-(int value) const
+    {
+        return VirtualRegister(offset() - value);
+    }
+    VirtualRegister operator+(VirtualRegister value) const
+    {
+        return VirtualRegister(offset() + value.offset());
+    }
+    VirtualRegister operator-(VirtualRegister value) const
+    {
+        return VirtualRegister(offset() - value.offset());
+    }
+    VirtualRegister& operator+=(int value)
+    {
+        return *this = *this + value;
+    }
+    VirtualRegister& operator-=(int value)
+    {
+        return *this = *this - value;
+    }
+    
+    void dump(PrintStream& out) const;
 
 private:
     static const int s_invalidVirtualRegister = 0x3fffffff;
-    static const int s_firstConstantRegisterIndex = 0x40000000;
+    static const int s_firstConstantRegisterIndex = FirstConstantRegisterIndex;
 
     static int localToOperand(int local) { return -1 - local; }
     static int operandToLocal(int operand) { return -1 - operand; }
@@ -94,14 +125,3 @@ inline VirtualRegister virtualRegisterForArgument(int argument, int offset = 0)
 }
 
 } // namespace JSC
-
-namespace WTF {
-
-inline void printInternal(PrintStream& out, JSC::VirtualRegister value)
-{
-    out.print(value.offset());
-}
-
-} // namespace WTF
-
-#endif // VirtualRegister_h
diff --git a/Source/JavaScriptCore/bytecode/Watchpoint.cpp b/Source/JavaScriptCore/bytecode/Watchpoint.cpp
index f29c2141c..fbe952d03 100644
--- a/Source/JavaScriptCore/bytecode/Watchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/Watchpoint.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -26,16 +26,33 @@
 #include "config.h"
 #include "Watchpoint.h"
 
-#include "LinkBuffer.h"
+#include "HeapInlines.h"
+#include "VM.h"
 #include 
-#include 
 
 namespace JSC {
 
+void StringFireDetail::dump(PrintStream& out) const
+{
+    out.print(m_string);
+}
+
 Watchpoint::~Watchpoint()
 {
-    if (isOnList())
+    if (isOnList()) {
+        // This will happen if we get destroyed before the set fires. That's totally a valid
+        // possibility. For example:
+        //
+        // CodeBlock has a Watchpoint on transition from structure S1. The transition never
+        // happens, but the CodeBlock gets destroyed because of GC.
         remove();
+    }
+}
+
+void Watchpoint::fire(const FireDetail& detail)
+{
+    RELEASE_ASSERT(!isOnList());
+    fireInternal(detail);
 }
 
 WatchpointSet::WatchpointSet(WatchpointState state)
@@ -65,20 +82,55 @@ void WatchpointSet::add(Watchpoint* watchpoint)
     m_state = IsWatched;
 }
 
-void WatchpointSet::fireAllSlow()
+void WatchpointSet::fireAllSlow(VM& vm, const FireDetail& detail)
 {
     ASSERT(state() == IsWatched);
     
     WTF::storeStoreFence();
-    fireAllWatchpoints();
-    m_state = IsInvalidated;
+    m_state = IsInvalidated; // Do this first. Needed for adaptive watchpoints.
+    fireAllWatchpoints(vm, detail);
     WTF::storeStoreFence();
 }
 
-void WatchpointSet::fireAllWatchpoints()
+void WatchpointSet::fireAllSlow(VM& vm, const char* reason)
 {
-    while (!m_set.isEmpty())
-        m_set.begin()->fire();
+    fireAllSlow(vm, StringFireDetail(reason));
+}
+
+void WatchpointSet::fireAllWatchpoints(VM& vm, const FireDetail& detail)
+{
+    // In case there are any adaptive watchpoints, we need to make sure that they see that this
+    // watchpoint has been already invalidated.
+    RELEASE_ASSERT(hasBeenInvalidated());
+
+    // Firing a watchpoint may cause a GC to happen. This GC could destroy various
+    // Watchpoints themselves while they're in the process of firing. It's not safe
+    // for most Watchpoints to be destructed while they're in the middle of firing.
+    // This GC could also destroy us, and we're not in a safe state to be destroyed.
+    // The safest thing to do is to DeferGCForAWhile to prevent this GC from happening.
+    DeferGCForAWhile deferGC(vm.heap);
+    
+    while (!m_set.isEmpty()) {
+        Watchpoint* watchpoint = m_set.begin();
+        ASSERT(watchpoint->isOnList());
+        
+        // Removing the Watchpoint before firing it makes it possible to implement watchpoints
+        // that add themselves to a different set when they fire. This kind of "adaptive"
+        // watchpoint can be used to track some semantic property that is more fine-graiend than
+        // what the set can convey. For example, we might care if a singleton object ever has a
+        // property called "foo". We can watch for this by checking if its Structure has "foo" and
+        // then watching its transitions. But then the watchpoint fires if any property is added.
+        // So, before the watchpoint decides to invalidate any code, it can check if it is
+        // possible to add itself to the transition watchpoint set of the singleton object's new
+        // Structure.
+        watchpoint->remove();
+        ASSERT(m_set.begin() != watchpoint);
+        ASSERT(!watchpoint->isOnList());
+        
+        watchpoint->fire(detail);
+        // After we fire the watchpoint, the watchpoint pointer may be a dangling pointer. That's
+        // fine, because we have no use for the pointer anymore.
+    }
 }
 
 void InlineWatchpointSet::add(Watchpoint* watchpoint)
@@ -86,6 +138,11 @@ void InlineWatchpointSet::add(Watchpoint* watchpoint)
     inflate()->add(watchpoint);
 }
 
+void InlineWatchpointSet::fireAll(VM& vm, const char* reason)
+{
+    fireAll(vm, StringFireDetail(reason));
+}
+
 WatchpointSet* InlineWatchpointSet::inflateSlow()
 {
     ASSERT(isThin());
diff --git a/Source/JavaScriptCore/bytecode/Watchpoint.h b/Source/JavaScriptCore/bytecode/Watchpoint.h
index 8790f4e62..69e393de4 100644
--- a/Source/JavaScriptCore/bytecode/Watchpoint.h
+++ b/Source/JavaScriptCore/bytecode/Watchpoint.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -23,16 +23,50 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef Watchpoint_h
-#define Watchpoint_h
+#pragma once
 
 #include 
+#include 
+#include 
+#include 
 #include 
 #include 
 
 namespace JSC {
 
+class FireDetail {
+    void* operator new(size_t) = delete;
+    
+public:
+    FireDetail()
+    {
+    }
+    
+    virtual ~FireDetail()
+    {
+    }
+    
+    virtual void dump(PrintStream&) const = 0;
+};
+
+class StringFireDetail : public FireDetail {
+public:
+    StringFireDetail(const char* string)
+        : m_string(string)
+    {
+    }
+    
+    void dump(PrintStream& out) const override;
+
+private:
+    const char* m_string;
+};
+
+class WatchpointSet;
+
 class Watchpoint : public BasicRawSentinelNode {
+    WTF_MAKE_NONCOPYABLE(Watchpoint);
+    WTF_MAKE_FAST_ALLOCATED;
 public:
     Watchpoint()
     {
@@ -40,10 +74,12 @@ public:
     
     virtual ~Watchpoint();
 
-    void fire() { fireInternal(); }
-    
 protected:
-    virtual void fireInternal() = 0;
+    virtual void fireInternal(const FireDetail&) = 0;
+
+private:
+    friend class WatchpointSet;
+    void fire(const FireDetail&);
 };
 
 enum WatchpointState {
@@ -53,12 +89,22 @@ enum WatchpointState {
 };
 
 class InlineWatchpointSet;
+class VM;
 
 class WatchpointSet : public ThreadSafeRefCounted {
     friend class LLIntOffsetsExtractor;
 public:
-    WatchpointSet(WatchpointState);
-    ~WatchpointSet(); // Note that this will not fire any of the watchpoints; if you need to know when a WatchpointSet dies then you need a separate mechanism for this.
+    JS_EXPORT_PRIVATE WatchpointSet(WatchpointState);
+    
+    // FIXME: In many cases, it would be amazing if this *did* fire the watchpoints. I suspect that
+    // this might be hard to get right, but still, it might be awesome.
+    JS_EXPORT_PRIVATE ~WatchpointSet(); // Note that this will not fire any of the watchpoints; if you need to know when a WatchpointSet dies then you need a separate mechanism for this.
+    
+    // Fast way of getting the state, which only works from the main thread.
+    WatchpointState stateOnJSThread() const
+    {
+        return static_cast(m_state);
+    }
     
     // It is safe to call this from another thread. It may return an old
     // state. Guarantees that if *first* read the state() of the thing being
@@ -98,39 +144,67 @@ public:
     // set watchpoints that we believe will actually be fired.
     void startWatching()
     {
-        ASSERT(state() != IsInvalidated);
+        ASSERT(m_state != IsInvalidated);
+        if (m_state == IsWatched)
+            return;
+        WTF::storeStoreFence();
         m_state = IsWatched;
+        WTF::storeStoreFence();
     }
     
-    void fireAll()
+    void fireAll(VM& vm, const FireDetail& detail)
     {
-        if (state() != IsWatched)
+        if (LIKELY(m_state != IsWatched))
             return;
-        fireAllSlow();
+        fireAllSlow(vm, detail);
     }
     
-    void touch()
+    void fireAll(VM& vm, const char* reason)
+    {
+        if (LIKELY(m_state != IsWatched))
+            return;
+        fireAllSlow(vm, reason);
+    }
+    
+    void touch(VM& vm, const FireDetail& detail)
     {
         if (state() == ClearWatchpoint)
             startWatching();
         else
-            fireAll();
+            fireAll(vm, detail);
     }
     
-    void invalidate()
+    void touch(VM& vm, const char* reason)
+    {
+        touch(vm, StringFireDetail(reason));
+    }
+    
+    void invalidate(VM& vm, const FireDetail& detail)
     {
         if (state() == IsWatched)
-            fireAll();
+            fireAll(vm, detail);
         m_state = IsInvalidated;
     }
-
+    
+    void invalidate(VM& vm, const char* reason)
+    {
+        invalidate(vm, StringFireDetail(reason));
+    }
+    
+    bool isBeingWatched() const
+    {
+        return m_setIsNotEmpty;
+    }
+    
     int8_t* addressOfState() { return &m_state; }
+    static ptrdiff_t offsetOfState() { return OBJECT_OFFSETOF(WatchpointSet, m_state); }
     int8_t* addressOfSetIsNotEmpty() { return &m_setIsNotEmpty; }
     
-    JS_EXPORT_PRIVATE void fireAllSlow(); // Call only if you've checked isWatched.
+    JS_EXPORT_PRIVATE void fireAllSlow(VM&, const FireDetail&); // Call only if you've checked isWatched.
+    JS_EXPORT_PRIVATE void fireAllSlow(VM&, const char* reason); // Ditto.
     
 private:
-    void fireAllWatchpoints();
+    void fireAllWatchpoints(VM&, const FireDetail&);
     
     friend class InlineWatchpointSet;
 
@@ -174,18 +248,34 @@ public:
         freeFat();
     }
     
+    // Fast way of getting the state, which only works from the main thread.
+    WatchpointState stateOnJSThread() const
+    {
+        uintptr_t data = m_data;
+        if (isFat(data))
+            return fat(data)->stateOnJSThread();
+        return decodeState(data);
+    }
+
+    // It is safe to call this from another thread. It may return a prior state,
+    // but that should be fine since you should only perform actions based on the
+    // state if you also add a watchpoint.
+    WatchpointState state() const
+    {
+        WTF::loadLoadFence();
+        uintptr_t data = m_data;
+        WTF::loadLoadFence();
+        if (isFat(data))
+            return fat(data)->state();
+        return decodeState(data);
+    }
+    
     // It is safe to call this from another thread.  It may return false
     // even if the set actually had been invalidated, but that ought to happen
     // only in the case of races, and should be rare.
     bool hasBeenInvalidated() const
     {
-        WTF::loadLoadFence();
-        uintptr_t data = m_data;
-        if (isFat(data)) {
-            WTF::loadLoadFence();
-            return fat(data)->hasBeenInvalidated();
-        }
-        return decodeState(data) == IsInvalidated;
+        return state() == IsInvalidated;
     }
     
     // Like hasBeenInvalidated(), may be called from another thread.
@@ -206,10 +296,10 @@ public:
         m_data = encodeState(IsWatched);
     }
     
-    void fireAll()
+    void fireAll(VM& vm, const FireDetail& detail)
     {
         if (isFat()) {
-            fat()->fireAll();
+            fat()->fireAll(vm, detail);
             return;
         }
         if (decodeState(m_data) == ClearWatchpoint)
@@ -218,19 +308,77 @@ public:
         WTF::storeStoreFence();
     }
     
-    void touch()
+    void invalidate(VM& vm, const FireDetail& detail)
+    {
+        if (isFat())
+            fat()->invalidate(vm, detail);
+        else
+            m_data = encodeState(IsInvalidated);
+    }
+    
+    JS_EXPORT_PRIVATE void fireAll(VM&, const char* reason);
+    
+    void touch(VM& vm, const FireDetail& detail)
     {
         if (isFat()) {
-            fat()->touch();
+            fat()->touch(vm, detail);
             return;
         }
-        if (decodeState(m_data) == ClearWatchpoint)
+        uintptr_t data = m_data;
+        if (decodeState(data) == IsInvalidated)
+            return;
+        WTF::storeStoreFence();
+        if (decodeState(data) == ClearWatchpoint)
             m_data = encodeState(IsWatched);
         else
             m_data = encodeState(IsInvalidated);
         WTF::storeStoreFence();
     }
     
+    void touch(VM& vm, const char* reason)
+    {
+        touch(vm, StringFireDetail(reason));
+    }
+
+    // Note that for any watchpoint that is visible from the DFG, it would be incorrect to write code like:
+    //
+    // if (w.isBeingWatched())
+    //     w.fireAll()
+    //
+    // Concurrently to this, the DFG could do:
+    //
+    // if (w.isStillValid())
+    //     perform optimizations;
+    // if (!w.isStillValid())
+    //     retry compilation;
+    //
+    // Note that the DFG algorithm is widespread, and sound, because fireAll() and invalidate() will leave
+    // the watchpoint in a !isStillValid() state. Hence, if fireAll() or invalidate() interleaved between
+    // the first isStillValid() check and the second one, then it would simply cause the DFG to retry
+    // compilation later.
+    //
+    // But, if you change some piece of state that the DFG might optimize for, but invalidate the
+    // watchpoint by doing:
+    //
+    // if (w.isBeingWatched())
+    //     w.fireAll()
+    //
+    // then the DFG would never know that you invalidated state between the two checks.
+    //
+    // There are two ways to work around this:
+    //
+    // - Call fireAll() without a isBeingWatched() check. Then, the DFG will know that the watchpoint has
+    //   been invalidated when it does its second check.
+    //
+    // - Do not expose the watchpoint set to the DFG directly, and have your own way of validating whether
+    //   the assumptions that the DFG thread used are still valid when the DFG code is installed.
+    bool isBeingWatched() const
+    {
+        if (isFat())
+            return fat()->isBeingWatched();
+        return false;
+    }
+    
 private:
     static const uintptr_t IsThinFlag        = 1;
     static const uintptr_t StateMask         = 6;
@@ -247,7 +395,7 @@ private:
     
     static uintptr_t encodeState(WatchpointState state)
     {
-        return (state << StateShift) | IsThinFlag;
+        return (static_cast(state) << StateShift) | IsThinFlag;
     }
     
     bool isThin() const { return isThin(m_data); }
@@ -284,6 +432,3 @@ private:
 };
 
 } // namespace JSC
-
-#endif // Watchpoint_h
-
diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
index cd4490f59..4b211b7b6 100644
--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2009, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
  * Copyright (C) 2008 Cameron Zwarich 
  * Copyright (C) 2012 Igalia, S.L.
  *
@@ -12,7 +12,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
@@ -31,17 +31,30 @@
 #include "config.h"
 #include "BytecodeGenerator.h"
 
+#include "ArithProfile.h"
+#include "BuiltinExecutables.h"
+#include "BytecodeGeneratorification.h"
+#include "BytecodeLivenessAnalysis.h"
+#include "DefinePropertyAttributes.h"
 #include "Interpreter.h"
-#include "JSActivation.h"
+#include "JSCInlines.h"
 #include "JSFunction.h"
-#include "JSNameScope.h"
+#include "JSGeneratorFunction.h"
+#include "JSLexicalEnvironment.h"
+#include "JSTemplateRegistryKey.h"
 #include "LowLevelInterpreter.h"
-#include "Operations.h"
 #include "Options.h"
 #include "StackAlignment.h"
 #include "StrongInlines.h"
 #include "UnlinkedCodeBlock.h"
+#include "UnlinkedEvalCodeBlock.h"
+#include "UnlinkedFunctionCodeBlock.h"
 #include "UnlinkedInstructionStream.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+#include "UnlinkedProgramCodeBlock.h"
+#include 
+#include 
+#include 
 #include 
 #include 
 
@@ -55,21 +68,65 @@ void Label::setLocation(unsigned location)
     
     unsigned size = m_unresolvedJumps.size();
     for (unsigned i = 0; i < size; ++i)
-        m_generator->m_instructions[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first;
+        m_generator.instructions()[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first;
+}
+
+void Variable::dump(PrintStream& out) const
+{
+    out.print(
+        "{ident = ", m_ident,
+        ", offset = ", m_offset,
+        ", local = ", RawPointer(m_local),
+        ", attributes = ", m_attributes,
+        ", kind = ", m_kind,
+        ", symbolTableConstantIndex = ", m_symbolTableConstantIndex,
+        ", isLexicallyScoped = ", m_isLexicallyScoped, "}");
 }
 
 ParserError BytecodeGenerator::generate()
 {
-    SamplingRegion samplingRegion("Bytecode Generation");
-    
     m_codeBlock->setThisRegister(m_thisRegister.virtualRegister());
 
-    m_scopeNode->emitBytecode(*this);
+    emitLogShadowChickenPrologueIfNecessary();
+    
+    // If we have declared a variable named "arguments" and we are using arguments then we should
+    // perform that assignment now.
+    if (m_needToInitializeArguments)
+        initializeVariable(variable(propertyNames().arguments), m_argumentsRegister);
+
+    if (m_restParameter)
+        m_restParameter->emit(*this);
+
+    {
+        RefPtr temp = newTemporary();
+        RefPtr globalScope;
+        for (auto functionPair : m_functionsToInitialize) {
+            FunctionMetadataNode* metadata = functionPair.first;
+            FunctionVariableType functionType = functionPair.second;
+            emitNewFunction(temp.get(), metadata);
+            if (functionType == NormalFunctionVariable)
+                initializeVariable(variable(metadata->ident()), temp.get());
+            else if (functionType == GlobalFunctionVariable) {
+                if (!globalScope) {
+                    // We know this will resolve to the global object because our parser/global initialization code 
+                    // doesn't allow let/const/class variables to have the same names as functions.
+                    RefPtr globalObjectScope = emitResolveScope(nullptr, Variable(metadata->ident())); 
+                    globalScope = newBlockScopeVariable(); 
+                    emitMove(globalScope.get(), globalObjectScope.get());
+                }
+                emitPutToScope(globalScope.get(), Variable(metadata->ident()), temp.get(), ThrowIfNotFound, InitializationMode::NotInitialization);
+            } else
+                RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+    
+    bool callingClassConstructor = constructorKind() != ConstructorKind::None && !isConstructor();
+    if (!callingClassConstructor)
+        m_scopeNode->emitBytecode(*this);
 
     m_staticPropertyAnalyzer.kill();
 
-    for (unsigned i = 0; i < m_tryRanges.size(); ++i) {
-        TryRange& range = m_tryRanges[i];
+    for (auto& range : m_tryRanges) {
         int start = range.start->bind();
         int end = range.end->bind();
         
@@ -98,15 +155,15 @@ ParserError BytecodeGenerator::generate()
         if (end <= start)
             continue;
         
-        ASSERT(range.tryData->targetScopeDepth != UINT_MAX);
-        UnlinkedHandlerInfo info = {
-            static_cast(start), static_cast(end),
-            static_cast(range.tryData->target->bind()),
-            range.tryData->targetScopeDepth
-        };
+        UnlinkedHandlerInfo info(static_cast(start), static_cast(end),
+            static_cast(range.tryData->target->bind()), range.tryData->handlerType);
         m_codeBlock->addExceptionHandler(info);
     }
     
+
+    if (isGeneratorOrAsyncFunctionBodyParseMode(m_codeBlock->parseMode()))
+        performGeneratorification(m_codeBlock.get(), m_instructions, m_generatorFrameSymbolTable.get(), m_generatorFrameSymbolTableIndex);
+
     m_codeBlock->setInstructions(std::make_unique(m_instructions));
 
     m_codeBlock->shrinkToFit();
@@ -116,517 +173,1078 @@ ParserError BytecodeGenerator::generate()
     return ParserError(ParserError::ErrorNone);
 }
 
-bool BytecodeGenerator::addVar(
-    const Identifier& ident, ConstantMode constantMode, WatchMode watchMode, RegisterID*& r0)
-{
-    ASSERT(static_cast(m_codeBlock->m_numVars) == m_calleeRegisters.size());
-    
-    ConcurrentJITLocker locker(symbolTable().m_lock);
-    int index = virtualRegisterForLocal(m_calleeRegisters.size()).offset();
-    SymbolTableEntry newEntry(index, constantMode == IsConstant ? ReadOnly : 0);
-    SymbolTable::Map::AddResult result = symbolTable().add(locker, ident.impl(), newEntry);
-
-    if (!result.isNewEntry) {
-        r0 = ®isterFor(result.iterator->value.getIndex());
-        return false;
-    }
-    
-    if (watchMode == IsWatchable) {
-        while (m_watchableVariables.size() < static_cast(m_codeBlock->m_numVars))
-            m_watchableVariables.append(Identifier());
-        m_watchableVariables.append(ident);
-    }
-    
-    r0 = addVar();
-    
-    ASSERT(watchMode == NotWatchable || static_cast(m_codeBlock->m_numVars) == m_watchableVariables.size());
-    
-    return true;
-}
-
-void BytecodeGenerator::preserveLastVar()
-{
-    if ((m_firstConstantIndex = m_calleeRegisters.size()) != 0)
-        m_lastVar = &m_calleeRegisters.last();
-}
-
-BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
-    : m_shouldEmitDebugHooks(debuggerMode == DebuggerOn)
-    , m_shouldEmitProfileHooks(profilerMode == ProfilerOn)
-    , m_symbolTable(0)
+BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, DebuggerMode debuggerMode, const VariableEnvironment* parentScopeTDZVariables)
+    : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
     , m_scopeNode(programNode)
     , m_codeBlock(vm, codeBlock)
     , m_thisRegister(CallFrame::thisArgumentOffset())
-    , m_emptyValueRegister(0)
-    , m_globalObjectRegister(0)
-    , m_finallyDepth(0)
-    , m_localScopeDepth(0)
     , m_codeType(GlobalCode)
-    , m_nextConstantOffset(0)
-    , m_globalConstantIndex(0)
-    , m_hasCreatedActivation(true)
-    , m_firstLazyFunction(0)
-    , m_lastLazyFunction(0)
-    , m_staticPropertyAnalyzer(&m_instructions)
     , m_vm(&vm)
-    , m_lastOpcodeID(op_end)
-#ifndef NDEBUG
-    , m_lastOpcodePosition(0)
-#endif
-    , m_usesExceptions(false)
-    , m_expressionTooDeep(false)
+    , m_needsToUpdateArrowFunctionContext(programNode->usesArrowFunction() || programNode->usesEval())
 {
-    if (m_shouldEmitDebugHooks)
-        m_codeBlock->setNeedsFullScopeChain(true);
+    ASSERT_UNUSED(parentScopeTDZVariables, !parentScopeTDZVariables->size());
+
+    for (auto& constantRegister : m_linkTimeConstantRegisters)
+        constantRegister = nullptr;
+
+    allocateCalleeSaveSpace();
 
     m_codeBlock->setNumParameters(1); // Allocate space for "this"
 
-    emitOpcode(op_enter);
+    emitEnter();
 
-    const VarStack& varStack = programNode->varStack();
-    const FunctionStack& functionStack = programNode->functionStack();
+    allocateAndEmitScope();
 
-    for (size_t i = 0; i < functionStack.size(); ++i) {
-        FunctionBodyNode* function = functionStack[i];
-        UnlinkedFunctionExecutable* unlinkedFunction = makeFunction(function);
-        codeBlock->addFunctionDeclaration(*m_vm, function->ident(), unlinkedFunction);
-    }
+    emitWatchdog();
+
+    const FunctionStack& functionStack = programNode->functionStack();
 
-    for (size_t i = 0; i < varStack.size(); ++i)
-        codeBlock->addVariableDeclaration(varStack[i].first, !!(varStack[i].second & DeclarationStacks::IsConstant));
+    for (auto* function : functionStack)
+        m_functionsToInitialize.append(std::make_pair(function, GlobalFunctionVariable));
 
+    if (Options::validateBytecode()) {
+        for (auto& entry : programNode->varDeclarations())
+            RELEASE_ASSERT(entry.value.isVar());
+    }
+    codeBlock->setVariableDeclarations(programNode->varDeclarations());
+    codeBlock->setLexicalDeclarations(programNode->lexicalVariables());
+    // Even though this program may have lexical variables that go under TDZ, when linking the get_from_scope/put_to_scope
+    // operations we emit we will have ResolveTypes that implictly do TDZ checks. Therefore, we don't need
+    // additional TDZ checks on top of those. This is why we can omit pushing programNode->lexicalVariables()
+    // to the TDZ stack.
+    
+    if (needsToUpdateArrowFunctionContext()) {
+        initializeArrowFunctionContextScopeIfNeeded();
+        emitPutThisToArrowFunctionContextScope();
+    }
 }
 
-BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionBodyNode* functionBody, UnlinkedFunctionCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode)
-    : m_shouldEmitDebugHooks(debuggerMode == DebuggerOn)
-    , m_shouldEmitProfileHooks(profilerMode == ProfilerOn)
-    , m_symbolTable(codeBlock->symbolTable())
-    , m_scopeNode(functionBody)
+BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, UnlinkedFunctionCodeBlock* codeBlock, DebuggerMode debuggerMode, const VariableEnvironment* parentScopeTDZVariables)
+    : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+    , m_scopeNode(functionNode)
     , m_codeBlock(vm, codeBlock)
-    , m_activationRegister(0)
-    , m_emptyValueRegister(0)
-    , m_globalObjectRegister(0)
-    , m_finallyDepth(0)
-    , m_localScopeDepth(0)
     , m_codeType(FunctionCode)
-    , m_nextConstantOffset(0)
-    , m_globalConstantIndex(0)
-    , m_hasCreatedActivation(false)
-    , m_firstLazyFunction(0)
-    , m_lastLazyFunction(0)
-    , m_staticPropertyAnalyzer(&m_instructions)
     , m_vm(&vm)
-    , m_lastOpcodeID(op_end)
-#ifndef NDEBUG
-    , m_lastOpcodePosition(0)
-#endif
-    , m_usesExceptions(false)
-    , m_expressionTooDeep(false)
+    , m_isBuiltinFunction(codeBlock->isBuiltinFunction())
+    , m_usesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode())
+    // FIXME: We should be able to have tail call elimination with the profiler
+    // enabled. This is currently not possible because the profiler expects
+    // op_will_call / op_did_call pairs before and after a call, which are not
+    // compatible with tail calls (we have no way of emitting op_did_call).
+    // https://bugs.webkit.org/show_bug.cgi?id=148819
+    , m_inTailPosition(Options::useTailCalls() && !isConstructor() && constructorKind() == ConstructorKind::None && isStrictMode())
+    , m_needsToUpdateArrowFunctionContext(functionNode->usesArrowFunction() || functionNode->usesEval())
+    , m_derivedContextType(codeBlock->derivedContextType())
 {
-    if (m_shouldEmitDebugHooks)
-        m_codeBlock->setNeedsFullScopeChain(true);
-
-    m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode());
-    Vector boundParameterProperties;
-    FunctionParameters& parameters = *functionBody->parameters();
-    for (size_t i = 0; i < parameters.size(); i++) {
-        auto pattern = parameters.at(i);
-        if (pattern->isBindingNode())
-            continue;
-        pattern->collectBoundIdentifiers(boundParameterProperties);
-        continue;
+    for (auto& constantRegister : m_linkTimeConstantRegisters)
+        constantRegister = nullptr;
+
+    if (m_isBuiltinFunction)
+        m_shouldEmitDebugHooks = false;
+
+    allocateCalleeSaveSpace();
+    
+    SymbolTable* functionSymbolTable = SymbolTable::create(*m_vm);
+    functionSymbolTable->setUsesNonStrictEval(m_usesNonStrictEval);
+    int symbolTableConstantIndex = 0;
+
+    FunctionParameters& parameters = *functionNode->parameters(); 
+    // http://www.ecma-international.org/ecma-262/6.0/index.html#sec-functiondeclarationinstantiation
+    // This implements IsSimpleParameterList in the Ecma 2015 spec.
+    // If IsSimpleParameterList is false, we will create a strict-mode like arguments object.
+    // IsSimpleParameterList is false if the argument list contains any default parameter values,
+    // a rest parameter, or any destructuring patterns.
+    // If we do have default parameters, destructuring parameters, or a rest parameter, our parameters will be allocated in a different scope.
+    bool isSimpleParameterList = parameters.isSimpleParameterList();
+
+    SourceParseMode parseMode = codeBlock->parseMode();
+
+    bool containsArrowOrEvalButNotInArrowBlock = ((functionNode->usesArrowFunction() && functionNode->doAnyInnerArrowFunctionsUseAnyFeature()) || functionNode->usesEval()) && !m_codeBlock->isArrowFunction();
+    bool shouldCaptureSomeOfTheThings = m_shouldEmitDebugHooks || functionNode->needsActivation() || containsArrowOrEvalButNotInArrowBlock;
+
+    bool shouldCaptureAllOfTheThings = m_shouldEmitDebugHooks || codeBlock->usesEval();
+    bool needsArguments = ((functionNode->usesArguments() && !codeBlock->isArrowFunction()) || codeBlock->usesEval() || (functionNode->usesArrowFunction() && !codeBlock->isArrowFunction() && isArgumentsUsedInInnerArrowFunction()));
+
+    if (isGeneratorOrAsyncFunctionBodyParseMode(parseMode)) {
+        // Generator and AsyncFunction never provides "arguments". "arguments" reference will be resolved in an upper generator function scope.
+        needsArguments = false;
+
+        // Generator and AsyncFunction uses the var scope to save and resume its variables. So the lexical scope is always instantiated.
+        shouldCaptureSomeOfTheThings = true;
     }
-    m_symbolTable->setParameterCountIncludingThis(functionBody->parameters()->size() + 1);
 
-    emitOpcode(op_enter);
-    if (m_codeBlock->needsFullScopeChain()) {
-        m_activationRegister = addVar();
-        emitInitLazyRegister(m_activationRegister);
-        m_codeBlock->setActivationRegister(m_activationRegister->virtualRegister());
+    if (isGeneratorOrAsyncFunctionWrapperParseMode(parseMode) && needsArguments) {
+        // Generator does not provide "arguments". Instead, wrapping GeneratorFunction provides "arguments".
+        // This is because arguments of a generator should be evaluated before starting it.
+        // To workaround it, we evaluate these arguments as arguments of a wrapping generator function, and reference it from a generator.
+        //
+        //    function *gen(a, b = hello())
+        //    {
+        //        return {
+        //            @generatorNext: function (@generator, @generatorState, @generatorValue, @generatorResumeMode, @generatorFrame)
+        //            {
+        //                arguments;  // This `arguments` should reference to the gen's arguments.
+        //                ...
+        //            }
+        //        }
+        //    }
+        shouldCaptureSomeOfTheThings = true;
     }
 
-    m_symbolTable->setCaptureStart(virtualRegisterForLocal(m_codeBlock->m_numVars).offset());
+    if (shouldCaptureAllOfTheThings)
+        functionNode->varDeclarations().markAllVariablesAsCaptured();
+    
+    auto captures = [&] (UniquedStringImpl* uid) -> bool {
+        if (!shouldCaptureSomeOfTheThings)
+            return false;
+        if (needsArguments && uid == propertyNames().arguments.impl()) {
+            // Actually, we only need to capture the arguments object when we "need full activation"
+            // because of name scopes. But historically we did it this way, so for now we just preserve
+            // the old behavior.
+            // FIXME: https://bugs.webkit.org/show_bug.cgi?id=143072
+            return true;
+        }
+        return functionNode->captures(uid);
+    };
+    auto varKind = [&] (UniquedStringImpl* uid) -> VarKind {
+        return captures(uid) ? VarKind::Scope : VarKind::Stack;
+    };
 
-    if (functionBody->usesArguments() || codeBlock->usesEval()) { // May reify arguments object.
-        RegisterID* unmodifiedArgumentsRegister = addVar(); // Anonymous, so it can't be modified by user code.
-        RegisterID* argumentsRegister = addVar(propertyNames().arguments, IsVariable, NotWatchable); // Can be changed by assigning to 'arguments'.
+    m_calleeRegister.setIndex(CallFrameSlot::callee);
 
-        // We can save a little space by hard-coding the knowledge that the two
-        // 'arguments' values are stored in consecutive registers, and storing
-        // only the index of the assignable one.
-        codeBlock->setArgumentsRegister(argumentsRegister->virtualRegister());
-        ASSERT_UNUSED(unmodifiedArgumentsRegister, unmodifiedArgumentsRegister->virtualRegister() == JSC::unmodifiedArgumentsRegister(codeBlock->argumentsRegister()));
+    initializeParameters(parameters);
+    ASSERT(!(isSimpleParameterList && m_restParameter));
 
-        emitInitLazyRegister(argumentsRegister);
-        emitInitLazyRegister(unmodifiedArgumentsRegister);
-        
-        if (shouldTearOffArgumentsEagerly()) {
-            emitOpcode(op_create_arguments);
-            instructions().append(argumentsRegister->index());
-        }
+    emitEnter();
+
+    if (isGeneratorOrAsyncFunctionBodyParseMode(parseMode))
+        m_generatorRegister = &m_parameters[1];
+
+    allocateAndEmitScope();
+
+    emitWatchdog();
+    
+    if (functionNameIsInScope(functionNode->ident(), functionNode->functionMode())) {
+        ASSERT(parseMode != SourceParseMode::GeneratorBodyMode);
+        ASSERT(!isAsyncFunctionBodyParseMode(parseMode));
+        bool isDynamicScope = functionNameScopeIsDynamic(codeBlock->usesEval(), codeBlock->isStrictMode());
+        bool isFunctionNameCaptured = captures(functionNode->ident().impl());
+        bool markAsCaptured = isDynamicScope || isFunctionNameCaptured;
+        emitPushFunctionNameScope(functionNode->ident(), &m_calleeRegister, markAsCaptured);
     }
 
-    bool shouldCaptureAllTheThings = m_shouldEmitDebugHooks || codeBlock->usesEval();
+    if (shouldCaptureSomeOfTheThings)
+        m_lexicalEnvironmentRegister = addVar();
+
+    if (shouldCaptureSomeOfTheThings || vm.typeProfiler())
+        symbolTableConstantIndex = addConstantValue(functionSymbolTable)->index();
 
+    // We can allocate the "var" environment if we don't have default parameter expressions. If we have
+    // default parameter expressions, we have to hold off on allocating the "var" environment because
+    // the parent scope of the "var" environment is the parameter environment.
+    if (isSimpleParameterList)
+        initializeVarLexicalEnvironment(symbolTableConstantIndex, functionSymbolTable, shouldCaptureSomeOfTheThings);
+
+    // Figure out some interesting facts about our arguments.
     bool capturesAnyArgumentByName = false;
-    Vector capturedArguments;
-    if (functionBody->hasCapturedVariables() || shouldCaptureAllTheThings) {
-        FunctionParameters& parameters = *functionBody->parameters();
-        capturedArguments.resize(parameters.size());
+    if (functionNode->hasCapturedVariables()) {
+        FunctionParameters& parameters = *functionNode->parameters();
         for (size_t i = 0; i < parameters.size(); ++i) {
-            capturedArguments[i] = 0;
-            auto pattern = parameters.at(i);
+            auto pattern = parameters.at(i).first;
             if (!pattern->isBindingNode())
                 continue;
             const Identifier& ident = static_cast(pattern)->boundProperty();
-            if (!functionBody->captures(ident) && !shouldCaptureAllTheThings)
-                continue;
-            capturesAnyArgumentByName = true;
-            capturedArguments[i] = addVar();
+            capturesAnyArgumentByName |= captures(ident.impl());
         }
     }
+    
+    if (capturesAnyArgumentByName)
+        ASSERT(m_lexicalEnvironmentRegister);
 
-    if (capturesAnyArgumentByName && !shouldTearOffArgumentsEagerly()) {
-        size_t parameterCount = m_symbolTable->parameterCount();
-        auto slowArguments = std::make_unique(parameterCount);
-        for (size_t i = 0; i < parameterCount; ++i) {
-            if (!capturedArguments[i]) {
-                ASSERT(slowArguments[i].status == SlowArgument::Normal);
-                slowArguments[i].index = CallFrame::argumentOffset(i);
+    // Need to know what our functions are called. Parameters have some goofy behaviors when it
+    // comes to functions of the same name.
+    for (FunctionMetadataNode* function : functionNode->functionStack())
+        m_functions.add(function->ident().impl());
+    
+    if (needsArguments) {
+        // Create the arguments object now. We may put the arguments object into the activation if
+        // it is captured. Either way, we create two arguments object variables: one is our
+        // private variable that is immutable, and another that is the user-visible variable. The
+        // immutable one is only used here, or during formal parameter resolutions if we opt for
+        // DirectArguments.
+        
+        m_argumentsRegister = addVar();
+        m_argumentsRegister->ref();
+    }
+    
+    if (needsArguments && !codeBlock->isStrictMode() && isSimpleParameterList) {
+        // If we captured any formal parameter by name, then we use ScopedArguments. Otherwise we
+        // use DirectArguments. With ScopedArguments, we lift all of our arguments into the
+        // activation.
+        
+        if (capturesAnyArgumentByName) {
+            functionSymbolTable->setArgumentsLength(vm, parameters.size());
+            
+            // For each parameter, we have two possibilities:
+            // Either it's a binding node with no function overlap, in which case it gets a name
+            // in the symbol table - or it just gets space reserved in the symbol table. Either
+            // way we lift the value into the scope.
+            for (unsigned i = 0; i < parameters.size(); ++i) {
+                ScopeOffset offset = functionSymbolTable->takeNextScopeOffset(NoLockingNecessary);
+                functionSymbolTable->setArgumentOffset(vm, i, offset);
+                if (UniquedStringImpl* name = visibleNameForParameter(parameters.at(i).first)) {
+                    VarOffset varOffset(offset);
+                    SymbolTableEntry entry(varOffset);
+                    // Stores to these variables via the ScopedArguments object will not do
+                    // notifyWrite(), since that would be cumbersome. Also, watching formal
+                    // parameters when "arguments" is in play is unlikely to be super profitable.
+                    // So, we just disable it.
+                    entry.disableWatching(*m_vm);
+                    functionSymbolTable->set(NoLockingNecessary, name, entry);
+                }
+                emitOpcode(op_put_to_scope);
+                instructions().append(m_lexicalEnvironmentRegister->index());
+                instructions().append(UINT_MAX);
+                instructions().append(virtualRegisterForArgument(1 + i).offset());
+                instructions().append(GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand());
+                instructions().append(symbolTableConstantIndex);
+                instructions().append(offset.offset());
+            }
+            
+            // This creates a scoped arguments object and copies the overflow arguments into the
+            // scope. It's the equivalent of calling ScopedArguments::createByCopying().
+            emitOpcode(op_create_scoped_arguments);
+            instructions().append(m_argumentsRegister->index());
+            instructions().append(m_lexicalEnvironmentRegister->index());
+        } else {
+            // We're going to put all parameters into the DirectArguments object. First ensure
+            // that the symbol table knows that this is happening.
+            for (unsigned i = 0; i < parameters.size(); ++i) {
+                if (UniquedStringImpl* name = visibleNameForParameter(parameters.at(i).first))
+                    functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(DirectArgumentsOffset(i))));
+            }
+            
+            emitOpcode(op_create_direct_arguments);
+            instructions().append(m_argumentsRegister->index());
+        }
+    } else if (isSimpleParameterList) {
+        // Create the formal parameters the normal way. Any of them could be captured, or not. If
+        // captured, lift them into the scope. We cannot do this if we have default parameter expressions
+        // because when default parameter expressions exist, they belong in their own lexical environment
+        // separate from the "var" lexical environment.
+        for (unsigned i = 0; i < parameters.size(); ++i) {
+            UniquedStringImpl* name = visibleNameForParameter(parameters.at(i).first);
+            if (!name)
+                continue;
+            
+            if (!captures(name)) {
+                // This is the easy case - just tell the symbol table about the argument. It will
+                // be accessed directly.
+                functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(virtualRegisterForArgument(1 + i))));
                 continue;
             }
-            slowArguments[i].status = SlowArgument::Captured;
-            slowArguments[i].index = capturedArguments[i]->index();
+            
+            ScopeOffset offset = functionSymbolTable->takeNextScopeOffset(NoLockingNecessary);
+            const Identifier& ident =
+                static_cast(parameters.at(i).first)->boundProperty();
+            functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(offset)));
+            
+            emitOpcode(op_put_to_scope);
+            instructions().append(m_lexicalEnvironmentRegister->index());
+            instructions().append(addConstant(ident));
+            instructions().append(virtualRegisterForArgument(1 + i).offset());
+            instructions().append(GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand());
+            instructions().append(symbolTableConstantIndex);
+            instructions().append(offset.offset());
         }
-        m_symbolTable->setSlowArguments(std::move(slowArguments));
     }
-
-    RegisterID* calleeRegister = resolveCallee(functionBody); // May push to the scope chain and/or add a captured var.
-
-    const DeclarationStacks::FunctionStack& functionStack = functionBody->functionStack();
-    const DeclarationStacks::VarStack& varStack = functionBody->varStack();
-
-    // Captured variables and functions go first so that activations don't have
-    // to step over the non-captured locals to mark them.
-    m_hasCreatedActivation = false;
-    if (functionBody->hasCapturedVariables()) {
-        for (size_t i = 0; i < functionStack.size(); ++i) {
-            FunctionBodyNode* function = functionStack[i];
-            const Identifier& ident = function->ident();
-            if (functionBody->captures(ident)) {
-                if (!m_hasCreatedActivation) {
-                    m_hasCreatedActivation = true;
-                    emitOpcode(op_create_activation);
-                    instructions().append(m_activationRegister->index());
-                }
-                m_functions.add(ident.impl());
-                emitNewFunction(addVar(ident, IsVariable, IsWatchable), IsCaptured, function);
+    
+    if (needsArguments && (codeBlock->isStrictMode() || !isSimpleParameterList)) {
+        // Allocate a cloned arguments object.
+        emitOpcode(op_create_cloned_arguments);
+        instructions().append(m_argumentsRegister->index());
+    }
+    
+    // There are some variables that need to be preinitialized to something other than Undefined:
+    //
+    // - "arguments": unless it's used as a function or parameter, this should refer to the
+    //   arguments object.
+    //
+    // - functions: these always override everything else.
+    //
+    // The most logical way to do all of this is to initialize none of the variables until now,
+    // and then initialize them in BytecodeGenerator::generate() in such an order that the rules
+    // for how these things override each other end up holding. We would initialize "arguments" first, 
+    // then all arguments, then the functions.
+    //
+    // But some arguments are already initialized by default, since if they aren't captured and we
+    // don't have "arguments" then we just point the symbol table at the stack slot of those
+    // arguments. We end up initializing the rest of the arguments that have an uncomplicated
+    // binding (i.e. don't involve destructuring) above when figuring out how to lay them out,
+    // because that's just the simplest thing. This means that when we initialize them, we have to
+    // watch out for the things that override arguments (namely, functions).
+    
+    // This is our final act of weirdness. "arguments" is overridden by everything except the
+    // callee. We add it to the symbol table if it's not already there and it's not an argument.
+    bool shouldCreateArgumentsVariableInParameterScope = false;
+    if (needsArguments) {
+        // If "arguments" is overridden by a function or destructuring parameter name, then it's
+        // OK for us to call createVariable() because it won't change anything. It's also OK for
+        // us to them tell BytecodeGenerator::generate() to write to it because it will do so
+        // before it initializes functions and destructuring parameters. But if "arguments" is
+        // overridden by a "simple" function parameter, then we have to bail: createVariable()
+        // would assert and BytecodeGenerator::generate() would write the "arguments" after the
+        // argument value had already been properly initialized.
+        
+        bool haveParameterNamedArguments = false;
+        for (unsigned i = 0; i < parameters.size(); ++i) {
+            UniquedStringImpl* name = visibleNameForParameter(parameters.at(i).first);
+            if (name == propertyNames().arguments.impl()) {
+                haveParameterNamedArguments = true;
+                break;
             }
         }
-        for (size_t i = 0; i < varStack.size(); ++i) {
-            const Identifier& ident = varStack[i].first;
-            if (functionBody->captures(ident))
-                addVar(ident, (varStack[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, IsWatchable);
+
+        bool shouldCreateArgumensVariable = !haveParameterNamedArguments
+            && !SourceParseModeSet(SourceParseMode::ArrowFunctionMode, SourceParseMode::AsyncArrowFunctionMode).contains(m_codeBlock->parseMode());
+        shouldCreateArgumentsVariableInParameterScope = shouldCreateArgumensVariable && !isSimpleParameterList;
+        // Do not create arguments variable in case of Arrow function. Value will be loaded from parent scope
+        if (shouldCreateArgumensVariable && !shouldCreateArgumentsVariableInParameterScope) {
+            createVariable(
+                propertyNames().arguments, varKind(propertyNames().arguments.impl()), functionSymbolTable);
+
+            m_needToInitializeArguments = true;
         }
     }
-    bool canLazilyCreateFunctions = !functionBody->needsActivationForMoreThanVariables() && !m_shouldEmitDebugHooks;
-    if (!canLazilyCreateFunctions && !m_hasCreatedActivation) {
-        m_hasCreatedActivation = true;
-        emitOpcode(op_create_activation);
-        instructions().append(m_activationRegister->index());
+
+    for (FunctionMetadataNode* function : functionNode->functionStack()) {
+        const Identifier& ident = function->ident();
+        createVariable(ident, varKind(ident.impl()), functionSymbolTable);
+        m_functionsToInitialize.append(std::make_pair(function, NormalFunctionVariable));
+    }
+    for (auto& entry : functionNode->varDeclarations()) {
+        ASSERT(!entry.value.isLet() && !entry.value.isConst());
+        if (!entry.value.isVar()) // This is either a parameter or callee.
+            continue;
+        if (shouldCreateArgumentsVariableInParameterScope && entry.key.get() == propertyNames().arguments.impl())
+            continue;
+        createVariable(Identifier::fromUid(m_vm, entry.key.get()), varKind(entry.key.get()), functionSymbolTable, IgnoreExisting);
     }
 
-    m_symbolTable->setCaptureEnd(virtualRegisterForLocal(codeBlock->m_numVars).offset());
 
-    m_firstLazyFunction = codeBlock->m_numVars;
-    for (size_t i = 0; i < functionStack.size(); ++i) {
-        FunctionBodyNode* function = functionStack[i];
-        const Identifier& ident = function->ident();
-        if (!functionBody->captures(ident)) {
-            m_functions.add(ident.impl());
-            RefPtr reg = addVar(ident, IsVariable, NotWatchable);
-            // Don't lazily create functions that override the name 'arguments'
-            // as this would complicate lazy instantiation of actual arguments.
-            if (!canLazilyCreateFunctions || ident == propertyNames().arguments)
-                emitNewFunction(reg.get(), NotCaptured, function);
+    m_newTargetRegister = addVar();
+    switch (parseMode) {
+    case SourceParseMode::GeneratorWrapperFunctionMode: {
+        m_generatorRegister = addVar();
+
+        // FIXME: Emit to_this only when Generator uses it.
+        // https://bugs.webkit.org/show_bug.cgi?id=151586
+        m_codeBlock->addPropertyAccessInstruction(instructions().size());
+        emitOpcode(op_to_this);
+        instructions().append(kill(&m_thisRegister));
+        instructions().append(0);
+        instructions().append(0);
+
+        emitMove(m_generatorRegister, &m_calleeRegister);
+        emitCreateThis(m_generatorRegister);
+        break;
+    }
+
+    case SourceParseMode::AsyncArrowFunctionMode:
+    case SourceParseMode::AsyncMethodMode:
+    case SourceParseMode::AsyncFunctionMode: {
+        ASSERT(!isConstructor());
+        ASSERT(constructorKind() == ConstructorKind::None);
+        m_generatorRegister = addVar();
+        m_promiseCapabilityRegister = addVar();
+
+        if (parseMode != SourceParseMode::AsyncArrowFunctionMode) {
+            // FIXME: Emit to_this only when AsyncFunctionBody uses it.
+            // https://bugs.webkit.org/show_bug.cgi?id=151586
+            m_codeBlock->addPropertyAccessInstruction(instructions().size());
+            emitOpcode(op_to_this);
+            instructions().append(kill(&m_thisRegister));
+            instructions().append(0);
+            instructions().append(0);
+        }
+
+        emitNewObject(m_generatorRegister);
+
+        // let promiseCapability be @newPromiseCapability(@Promise)
+        auto varNewPromiseCapability = variable(propertyNames().builtinNames().newPromiseCapabilityPrivateName());
+        RefPtr scope = newTemporary();
+        moveToDestinationIfNeeded(scope.get(), emitResolveScope(scope.get(), varNewPromiseCapability));
+        RefPtr newPromiseCapability = emitGetFromScope(newTemporary(), scope.get(), varNewPromiseCapability, ThrowIfNotFound);
+
+        CallArguments args(*this, nullptr, 1);
+        emitLoad(args.thisRegister(), jsUndefined());
+
+        auto varPromiseConstructor = variable(propertyNames().builtinNames().PromisePrivateName());
+        moveToDestinationIfNeeded(scope.get(), emitResolveScope(scope.get(), varPromiseConstructor));
+        emitGetFromScope(args.argumentRegister(0), scope.get(), varPromiseConstructor, ThrowIfNotFound);
+
+        // JSTextPosition(int _line, int _offset, int _lineStartOffset)
+        JSTextPosition divot(m_scopeNode->firstLine(), m_scopeNode->startOffset(), m_scopeNode->lineStartOffset());
+        emitCall(promiseCapabilityRegister(), newPromiseCapability.get(), NoExpectedFunction, args, divot, divot, divot, DebuggableCall::No);
+        break;
+    }
+
+    case SourceParseMode::AsyncFunctionBodyMode:
+    case SourceParseMode::AsyncArrowFunctionBodyMode:
+    case SourceParseMode::GeneratorBodyMode: {
+        // |this| is already filled correctly before here.
+        emitLoad(m_newTargetRegister, jsUndefined());
+        break;
+    }
+
+    default: {
+        if (SourceParseMode::ArrowFunctionMode != parseMode) {
+            if (isConstructor()) {
+                emitMove(m_newTargetRegister, &m_thisRegister);
+                if (constructorKind() == ConstructorKind::Extends) {
+                    Ref