summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/b3
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/b3')
-rw-r--r--Source/JavaScriptCore/b3/B3ArgumentRegValue.cpp49
-rw-r--r--Source/JavaScriptCore/b3/B3ArgumentRegValue.h63
-rw-r--r--Source/JavaScriptCore/b3/B3BasicBlock.cpp202
-rw-r--r--Source/JavaScriptCore/b3/B3BasicBlock.h200
-rw-r--r--Source/JavaScriptCore/b3/B3BasicBlockInlines.h98
-rw-r--r--Source/JavaScriptCore/b3/B3BasicBlockUtils.h150
-rw-r--r--Source/JavaScriptCore/b3/B3BlockInsertionSet.cpp134
-rw-r--r--Source/JavaScriptCore/b3/B3BlockInsertionSet.h93
-rw-r--r--Source/JavaScriptCore/b3/B3BlockWorklist.h57
-rw-r--r--Source/JavaScriptCore/b3/B3BottomProvider.h57
-rw-r--r--Source/JavaScriptCore/b3/B3BreakCriticalEdges.cpp67
-rw-r--r--Source/JavaScriptCore/b3/B3BreakCriticalEdges.h38
-rw-r--r--Source/JavaScriptCore/b3/B3CCallValue.cpp45
-rw-r--r--Source/JavaScriptCore/b3/B3CCallValue.h68
-rw-r--r--Source/JavaScriptCore/b3/B3CFG.h76
-rw-r--r--Source/JavaScriptCore/b3/B3CaseCollection.cpp48
-rw-r--r--Source/JavaScriptCore/b3/B3CaseCollection.h116
-rw-r--r--Source/JavaScriptCore/b3/B3CaseCollectionInlines.h53
-rw-r--r--Source/JavaScriptCore/b3/B3CheckSpecial.cpp248
-rw-r--r--Source/JavaScriptCore/b3/B3CheckSpecial.h165
-rw-r--r--Source/JavaScriptCore/b3/B3CheckValue.cpp69
-rw-r--r--Source/JavaScriptCore/b3/B3CheckValue.h68
-rw-r--r--Source/JavaScriptCore/b3/B3Common.cpp76
-rw-r--r--Source/JavaScriptCore/b3/B3Common.h175
-rw-r--r--Source/JavaScriptCore/b3/B3Commutativity.cpp52
-rw-r--r--Source/JavaScriptCore/b3/B3Commutativity.h46
-rw-r--r--Source/JavaScriptCore/b3/B3Compilation.cpp55
-rw-r--r--Source/JavaScriptCore/b3/B3Compilation.h67
-rw-r--r--Source/JavaScriptCore/b3/B3Compile.cpp57
-rw-r--r--Source/JavaScriptCore/b3/B3Compile.h52
-rw-r--r--Source/JavaScriptCore/b3/B3ComputeDivisionMagic.h139
-rw-r--r--Source/JavaScriptCore/b3/B3Const32Value.cpp289
-rw-r--r--Source/JavaScriptCore/b3/B3Const32Value.h97
-rw-r--r--Source/JavaScriptCore/b3/B3Const64Value.cpp289
-rw-r--r--Source/JavaScriptCore/b3/B3Const64Value.h97
-rw-r--r--Source/JavaScriptCore/b3/B3ConstDoubleValue.cpp207
-rw-r--r--Source/JavaScriptCore/b3/B3ConstDoubleValue.h86
-rw-r--r--Source/JavaScriptCore/b3/B3ConstFloatValue.cpp188
-rw-r--r--Source/JavaScriptCore/b3/B3ConstFloatValue.h84
-rw-r--r--Source/JavaScriptCore/b3/B3ConstPtrValue.h69
-rw-r--r--Source/JavaScriptCore/b3/B3ConstrainedValue.cpp43
-rw-r--r--Source/JavaScriptCore/b3/B3ConstrainedValue.h68
-rw-r--r--Source/JavaScriptCore/b3/B3DataSection.cpp52
-rw-r--r--Source/JavaScriptCore/b3/B3DataSection.h51
-rw-r--r--Source/JavaScriptCore/b3/B3Dominators.h50
-rw-r--r--Source/JavaScriptCore/b3/B3DuplicateTails.cpp162
-rw-r--r--Source/JavaScriptCore/b3/B3DuplicateTails.h42
-rw-r--r--Source/JavaScriptCore/b3/B3Effects.cpp132
-rw-r--r--Source/JavaScriptCore/b3/B3Effects.h121
-rw-r--r--Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.cpp703
-rw-r--r--Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.h40
-rw-r--r--Source/JavaScriptCore/b3/B3FenceValue.cpp57
-rw-r--r--Source/JavaScriptCore/b3/B3FenceValue.h89
-rw-r--r--Source/JavaScriptCore/b3/B3FixSSA.cpp270
-rw-r--r--Source/JavaScriptCore/b3/B3FixSSA.h48
-rw-r--r--Source/JavaScriptCore/b3/B3FoldPathConstants.cpp275
-rw-r--r--Source/JavaScriptCore/b3/B3FoldPathConstants.h40
-rw-r--r--Source/JavaScriptCore/b3/B3FrequencyClass.cpp53
-rw-r--r--Source/JavaScriptCore/b3/B3FrequencyClass.h62
-rw-r--r--Source/JavaScriptCore/b3/B3FrequentedBlock.h40
-rw-r--r--Source/JavaScriptCore/b3/B3Generate.cpp127
-rw-r--r--Source/JavaScriptCore/b3/B3Generate.h55
-rw-r--r--Source/JavaScriptCore/b3/B3GenericFrequentedBlock.h85
-rw-r--r--Source/JavaScriptCore/b3/B3HeapRange.cpp49
-rw-r--r--Source/JavaScriptCore/b3/B3HeapRange.h110
-rw-r--r--Source/JavaScriptCore/b3/B3InferSwitches.cpp337
-rw-r--r--Source/JavaScriptCore/b3/B3InferSwitches.h40
-rw-r--r--Source/JavaScriptCore/b3/B3InsertionSet.cpp71
-rw-r--r--Source/JavaScriptCore/b3/B3InsertionSet.h86
-rw-r--r--Source/JavaScriptCore/b3/B3InsertionSetInlines.h43
-rw-r--r--Source/JavaScriptCore/b3/B3Kind.cpp51
-rw-r--r--Source/JavaScriptCore/b3/B3Kind.h236
-rw-r--r--Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.cpp93
-rw-r--r--Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.h40
-rw-r--r--Source/JavaScriptCore/b3/B3LowerMacros.cpp500
-rw-r--r--Source/JavaScriptCore/b3/B3LowerMacros.h41
-rw-r--r--Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.cpp205
-rw-r--r--Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.h40
-rw-r--r--Source/JavaScriptCore/b3/B3LowerToAir.cpp2899
-rw-r--r--Source/JavaScriptCore/b3/B3LowerToAir.h41
-rw-r--r--Source/JavaScriptCore/b3/B3MathExtras.cpp124
-rw-r--r--Source/JavaScriptCore/b3/B3MathExtras.h44
-rw-r--r--Source/JavaScriptCore/b3/B3MemoryValue.cpp74
-rw-r--r--Source/JavaScriptCore/b3/B3MemoryValue.h154
-rw-r--r--Source/JavaScriptCore/b3/B3MoveConstants.cpp363
-rw-r--r--Source/JavaScriptCore/b3/B3MoveConstants.h40
-rw-r--r--Source/JavaScriptCore/b3/B3OpaqueByproduct.h48
-rw-r--r--Source/JavaScriptCore/b3/B3OpaqueByproducts.cpp60
-rw-r--r--Source/JavaScriptCore/b3/B3OpaqueByproducts.h55
-rw-r--r--Source/JavaScriptCore/b3/B3Opcode.cpp323
-rw-r--r--Source/JavaScriptCore/b3/B3Opcode.h314
-rw-r--r--Source/JavaScriptCore/b3/B3Origin.cpp40
-rw-r--r--Source/JavaScriptCore/b3/B3Origin.h60
-rw-r--r--Source/JavaScriptCore/b3/B3OriginDump.cpp46
-rw-r--r--Source/JavaScriptCore/b3/B3OriginDump.h53
-rw-r--r--Source/JavaScriptCore/b3/B3PCToOriginMap.h69
-rw-r--r--Source/JavaScriptCore/b3/B3PatchpointSpecial.cpp175
-rw-r--r--Source/JavaScriptCore/b3/B3PatchpointSpecial.h67
-rw-r--r--Source/JavaScriptCore/b3/B3PatchpointValue.cpp61
-rw-r--r--Source/JavaScriptCore/b3/B3PatchpointValue.h77
-rw-r--r--Source/JavaScriptCore/b3/B3PhaseScope.cpp62
-rw-r--r--Source/JavaScriptCore/b3/B3PhaseScope.h53
-rw-r--r--Source/JavaScriptCore/b3/B3PhiChildren.cpp56
-rw-r--r--Source/JavaScriptCore/b3/B3PhiChildren.h177
-rw-r--r--Source/JavaScriptCore/b3/B3Procedure.cpp362
-rw-r--r--Source/JavaScriptCore/b3/B3Procedure.h259
-rw-r--r--Source/JavaScriptCore/b3/B3ProcedureInlines.h43
-rw-r--r--Source/JavaScriptCore/b3/B3PureCSE.cpp95
-rw-r--r--Source/JavaScriptCore/b3/B3PureCSE.h61
-rw-r--r--Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.cpp496
-rw-r--r--Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.h40
-rw-r--r--Source/JavaScriptCore/b3/B3ReduceStrength.cpp2518
-rw-r--r--Source/JavaScriptCore/b3/B3ReduceStrength.h46
-rw-r--r--Source/JavaScriptCore/b3/B3SSACalculator.cpp150
-rw-r--r--Source/JavaScriptCore/b3/B3SSACalculator.h167
-rw-r--r--Source/JavaScriptCore/b3/B3SlotBaseValue.cpp51
-rw-r--r--Source/JavaScriptCore/b3/B3SlotBaseValue.h63
-rw-r--r--Source/JavaScriptCore/b3/B3SparseCollection.h142
-rw-r--r--Source/JavaScriptCore/b3/B3StackSlot.cpp55
-rw-r--r--Source/JavaScriptCore/b3/B3StackSlot.h105
-rw-r--r--Source/JavaScriptCore/b3/B3StackmapGenerationParams.cpp99
-rw-r--r--Source/JavaScriptCore/b3/B3StackmapGenerationParams.h127
-rw-r--r--Source/JavaScriptCore/b3/B3StackmapSpecial.cpp304
-rw-r--r--Source/JavaScriptCore/b3/B3StackmapSpecial.h89
-rw-r--r--Source/JavaScriptCore/b3/B3StackmapValue.cpp95
-rw-r--r--Source/JavaScriptCore/b3/B3StackmapValue.h308
-rw-r--r--Source/JavaScriptCore/b3/B3SuccessorCollection.h142
-rw-r--r--Source/JavaScriptCore/b3/B3SwitchCase.cpp42
-rw-r--r--Source/JavaScriptCore/b3/B3SwitchCase.h63
-rw-r--r--Source/JavaScriptCore/b3/B3SwitchValue.cpp124
-rw-r--r--Source/JavaScriptCore/b3/B3SwitchValue.h85
-rw-r--r--Source/JavaScriptCore/b3/B3TimingScope.cpp55
-rw-r--r--Source/JavaScriptCore/b3/B3TimingScope.h47
-rw-r--r--Source/JavaScriptCore/b3/B3Type.cpp61
-rw-r--r--Source/JavaScriptCore/b3/B3Type.h94
-rw-r--r--Source/JavaScriptCore/b3/B3TypeMap.h108
-rw-r--r--Source/JavaScriptCore/b3/B3UpsilonValue.cpp55
-rw-r--r--Source/JavaScriptCore/b3/B3UpsilonValue.h72
-rw-r--r--Source/JavaScriptCore/b3/B3UseCounts.cpp63
-rw-r--r--Source/JavaScriptCore/b3/B3UseCounts.h56
-rw-r--r--Source/JavaScriptCore/b3/B3Validate.cpp595
-rw-r--r--Source/JavaScriptCore/b3/B3Validate.h38
-rw-r--r--Source/JavaScriptCore/b3/B3Value.cpp870
-rw-r--r--Source/JavaScriptCore/b3/B3Value.h515
-rw-r--r--Source/JavaScriptCore/b3/B3ValueInlines.h247
-rw-r--r--Source/JavaScriptCore/b3/B3ValueKey.cpp122
-rw-r--r--Source/JavaScriptCore/b3/B3ValueKey.h199
-rw-r--r--Source/JavaScriptCore/b3/B3ValueKeyInlines.h67
-rw-r--r--Source/JavaScriptCore/b3/B3ValueRep.cpp202
-rw-r--r--Source/JavaScriptCore/b3/B3ValueRep.h288
-rw-r--r--Source/JavaScriptCore/b3/B3Variable.cpp56
-rw-r--r--Source/JavaScriptCore/b3/B3Variable.h89
-rw-r--r--Source/JavaScriptCore/b3/B3VariableValue.cpp66
-rw-r--r--Source/JavaScriptCore/b3/B3VariableValue.h63
-rw-r--r--Source/JavaScriptCore/b3/B3WasmAddressValue.cpp56
-rw-r--r--Source/JavaScriptCore/b3/B3WasmAddressValue.h58
-rw-r--r--Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.cpp56
-rw-r--r--Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.h68
-rw-r--r--Source/JavaScriptCore/b3/air/AirAllocateStack.cpp308
-rw-r--r--Source/JavaScriptCore/b3/air/AirAllocateStack.h43
-rw-r--r--Source/JavaScriptCore/b3/air/AirArg.cpp350
-rw-r--r--Source/JavaScriptCore/b3/air/AirArg.h1383
-rw-r--r--Source/JavaScriptCore/b3/air/AirArgInlines.h194
-rw-r--r--Source/JavaScriptCore/b3/air/AirBasicBlock.cpp87
-rw-r--r--Source/JavaScriptCore/b3/air/AirBasicBlock.h172
-rw-r--r--Source/JavaScriptCore/b3/air/AirBlockWorklist.h52
-rw-r--r--Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp167
-rw-r--r--Source/JavaScriptCore/b3/air/AirCCallSpecial.h84
-rw-r--r--Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp127
-rw-r--r--Source/JavaScriptCore/b3/air/AirCCallingConvention.h51
-rw-r--r--Source/JavaScriptCore/b3/air/AirCode.cpp229
-rw-r--r--Source/JavaScriptCore/b3/air/AirCode.h321
-rw-r--r--Source/JavaScriptCore/b3/air/AirCustom.cpp195
-rw-r--r--Source/JavaScriptCore/b3/air/AirCustom.h328
-rw-r--r--Source/JavaScriptCore/b3/air/AirDumpAsJS.cpp245
-rw-r--r--Source/JavaScriptCore/b3/air/AirDumpAsJS.h43
-rw-r--r--Source/JavaScriptCore/b3/air/AirEliminateDeadCode.cpp153
-rw-r--r--Source/JavaScriptCore/b3/air/AirEliminateDeadCode.h43
-rw-r--r--Source/JavaScriptCore/b3/air/AirEmitShuffle.cpp543
-rw-r--r--Source/JavaScriptCore/b3/air/AirEmitShuffle.h116
-rw-r--r--Source/JavaScriptCore/b3/air/AirFixObviousSpills.cpp569
-rw-r--r--Source/JavaScriptCore/b3/air/AirFixObviousSpills.h41
-rw-r--r--Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.cpp239
-rw-r--r--Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.h46
-rw-r--r--Source/JavaScriptCore/b3/air/AirFrequentedBlock.h40
-rw-r--r--Source/JavaScriptCore/b3/air/AirGenerate.cpp292
-rw-r--r--Source/JavaScriptCore/b3/air/AirGenerate.h48
-rw-r--r--Source/JavaScriptCore/b3/air/AirGenerated.cpp33
-rw-r--r--Source/JavaScriptCore/b3/air/AirGenerationContext.h59
-rw-r--r--Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp78
-rw-r--r--Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h46
-rw-r--r--Source/JavaScriptCore/b3/air/AirInsertionSet.cpp51
-rw-r--r--Source/JavaScriptCore/b3/air/AirInsertionSet.h85
-rw-r--r--Source/JavaScriptCore/b3/air/AirInst.cpp72
-rw-r--r--Source/JavaScriptCore/b3/air/AirInst.h207
-rw-r--r--Source/JavaScriptCore/b3/air/AirInstInlines.h282
-rw-r--r--Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp1656
-rw-r--r--Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h40
-rw-r--r--Source/JavaScriptCore/b3/air/AirKind.cpp49
-rw-r--r--Source/JavaScriptCore/b3/air/AirKind.h97
-rw-r--r--Source/JavaScriptCore/b3/air/AirLiveness.h392
-rw-r--r--Source/JavaScriptCore/b3/air/AirLogRegisterPressure.cpp103
-rw-r--r--Source/JavaScriptCore/b3/air/AirLogRegisterPressure.h39
-rw-r--r--Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp250
-rw-r--r--Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.h41
-rw-r--r--Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.cpp114
-rw-r--r--Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.h41
-rw-r--r--Source/JavaScriptCore/b3/air/AirLowerMacros.cpp108
-rw-r--r--Source/JavaScriptCore/b3/air/AirLowerMacros.h41
-rw-r--r--Source/JavaScriptCore/b3/air/AirOpcode.opcodes943
-rw-r--r--Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.cpp194
-rw-r--r--Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.h49
-rw-r--r--Source/JavaScriptCore/b3/air/AirPadInterference.cpp88
-rw-r--r--Source/JavaScriptCore/b3/air/AirPadInterference.h48
-rw-r--r--Source/JavaScriptCore/b3/air/AirPhaseScope.cpp60
-rw-r--r--Source/JavaScriptCore/b3/air/AirPhaseScope.h53
-rw-r--r--Source/JavaScriptCore/b3/air/AirReportUsedRegisters.cpp96
-rw-r--r--Source/JavaScriptCore/b3/air/AirReportUsedRegisters.h41
-rw-r--r--Source/JavaScriptCore/b3/air/AirSimplifyCFG.cpp171
-rw-r--r--Source/JavaScriptCore/b3/air/AirSimplifyCFG.h40
-rw-r--r--Source/JavaScriptCore/b3/air/AirSpecial.cpp89
-rw-r--r--Source/JavaScriptCore/b3/air/AirSpecial.h140
-rw-r--r--Source/JavaScriptCore/b3/air/AirSpillEverything.cpp190
-rw-r--r--Source/JavaScriptCore/b3/air/AirSpillEverything.h49
-rw-r--r--Source/JavaScriptCore/b3/air/AirStackSlot.cpp74
-rw-r--r--Source/JavaScriptCore/b3/air/AirStackSlot.h133
-rw-r--r--Source/JavaScriptCore/b3/air/AirStackSlotKind.cpp52
-rw-r--r--Source/JavaScriptCore/b3/air/AirStackSlotKind.h63
-rw-r--r--Source/JavaScriptCore/b3/air/AirTmp.cpp55
-rw-r--r--Source/JavaScriptCore/b3/air/AirTmp.h298
-rw-r--r--Source/JavaScriptCore/b3/air/AirTmpInlines.h97
-rw-r--r--Source/JavaScriptCore/b3/air/AirTmpWidth.cpp183
-rw-r--r--Source/JavaScriptCore/b3/air/AirTmpWidth.h114
-rw-r--r--Source/JavaScriptCore/b3/air/AirUseCounts.h118
-rw-r--r--Source/JavaScriptCore/b3/air/AirValidate.cpp159
-rw-r--r--Source/JavaScriptCore/b3/air/AirValidate.h38
-rw-r--r--Source/JavaScriptCore/b3/air/opcode_generator.rb1228
-rw-r--r--Source/JavaScriptCore/b3/air/testair.cpp1964
-rw-r--r--Source/JavaScriptCore/b3/testb3.cpp15923
239 files changed, 57971 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/b3/B3ArgumentRegValue.cpp b/Source/JavaScriptCore/b3/B3ArgumentRegValue.cpp
new file mode 100644
index 000000000..594d0d69b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ArgumentRegValue.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3ArgumentRegValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+ArgumentRegValue::~ArgumentRegValue()
+{
+}
+
+void ArgumentRegValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(comma, m_reg);
+}
+
+Value* ArgumentRegValue::cloneImpl() const
+{
+ return new ArgumentRegValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ArgumentRegValue.h b/Source/JavaScriptCore/b3/B3ArgumentRegValue.h
new file mode 100644
index 000000000..55b365fc2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ArgumentRegValue.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include "Reg.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE ArgumentRegValue : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == ArgumentReg; }
+
+ ~ArgumentRegValue();
+
+ Reg argumentReg() const { return m_reg; }
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ ArgumentRegValue(Origin origin, Reg reg)
+ : Value(CheckedOpcode, ArgumentReg, reg.isGPR() ? pointerType() : Double, origin)
+ , m_reg(reg)
+ {
+ ASSERT(reg.isSet());
+ }
+
+ Reg m_reg;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BasicBlock.cpp b/Source/JavaScriptCore/b3/B3BasicBlock.cpp
new file mode 100644
index 000000000..63a4e58d1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BasicBlock.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3BasicBlock.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BasicBlockUtils.h"
+#include "B3Procedure.h"
+#include "B3ValueInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 {
+
+const char* const BasicBlock::dumpPrefix = "#";
+
+BasicBlock::BasicBlock(unsigned index, double frequency)
+ : m_index(index)
+ , m_frequency(frequency)
+{
+}
+
+BasicBlock::~BasicBlock()
+{
+}
+
+void BasicBlock::append(Value* value)
+{
+ m_values.append(value);
+ value->owner = this;
+}
+
+void BasicBlock::appendNonTerminal(Value* value)
+{
+ m_values.append(m_values.last());
+ m_values[m_values.size() - 1] = value;
+ value->owner = this;
+}
+
+void BasicBlock::removeLast(Procedure& proc)
+{
+ ASSERT(!m_values.isEmpty());
+ proc.deleteValue(m_values.takeLast());
+}
+
+void BasicBlock::replaceLast(Procedure& proc, Value* value)
+{
+ removeLast(proc);
+ append(value);
+}
+
+Value* BasicBlock::appendIntConstant(Procedure& proc, Origin origin, Type type, int64_t value)
+{
+ Value* result = proc.addIntConstant(origin, type, value);
+ append(result);
+ return result;
+}
+
+Value* BasicBlock::appendIntConstant(Procedure& proc, Value* likeValue, int64_t value)
+{
+ return appendIntConstant(proc, likeValue->origin(), likeValue->type(), value);
+}
+
+Value* BasicBlock::appendBoolConstant(Procedure& proc, Origin origin, bool value)
+{
+ return appendIntConstant(proc, origin, Int32, value ? 1 : 0);
+}
+
+void BasicBlock::clearSuccessors()
+{
+ m_successors.clear();
+}
+
+void BasicBlock::appendSuccessor(FrequentedBlock target)
+{
+ m_successors.append(target);
+}
+
+void BasicBlock::setSuccessors(FrequentedBlock target)
+{
+ m_successors.resize(1);
+ m_successors[0] = target;
+}
+
+void BasicBlock::setSuccessors(FrequentedBlock taken, FrequentedBlock notTaken)
+{
+ m_successors.resize(2);
+ m_successors[0] = taken;
+ m_successors[1] = notTaken;
+}
+
+bool BasicBlock::replaceSuccessor(BasicBlock* from, BasicBlock* to)
+{
+ bool result = false;
+ for (BasicBlock*& successor : successorBlocks()) {
+ if (successor == from) {
+ successor = to;
+ result = true;
+
+ // Keep looping because a successor may be mentioned multiple times, like in a Switch.
+ }
+ }
+ return result;
+}
+
+bool BasicBlock::addPredecessor(BasicBlock* block)
+{
+ return B3::addPredecessor(this, block);
+}
+
+bool BasicBlock::removePredecessor(BasicBlock* block)
+{
+ return B3::removePredecessor(this, block);
+}
+
+bool BasicBlock::replacePredecessor(BasicBlock* from, BasicBlock* to)
+{
+ return B3::replacePredecessor(this, from, to);
+}
+
+void BasicBlock::updatePredecessorsAfter()
+{
+ B3::updatePredecessorsAfter(this);
+}
+
+void BasicBlock::dump(PrintStream& out) const
+{
+ out.print(dumpPrefix, m_index);
+}
+
+void BasicBlock::deepDump(const Procedure& proc, PrintStream& out) const
+{
+ out.print("BB", *this, ": ; frequency = ", m_frequency, "\n");
+ if (predecessors().size())
+ out.print(" Predecessors: ", pointerListDump(predecessors()), "\n");
+ for (Value* value : *this)
+ out.print(" ", B3::deepDump(proc, value), "\n");
+ if (!successors().isEmpty()) {
+ out.print(" Successors: ");
+ if (size())
+ last()->dumpSuccessors(this, out);
+ else
+ out.print(listDump(successors()));
+ out.print("\n");
+ }
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin)
+{
+ RELEASE_ASSERT(opcode == Oops || opcode == Return);
+ clearSuccessors();
+ return appendNew<Value>(proc, opcode, origin);
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin, Value* value)
+{
+ RELEASE_ASSERT(opcode == Return);
+ clearSuccessors();
+ return appendNew<Value>(proc, opcode, origin, value);
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin, const FrequentedBlock& target)
+{
+ RELEASE_ASSERT(opcode == Jump);
+ setSuccessors(target);
+ return appendNew<Value>(proc, opcode, origin);
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin, Value* predicate, const FrequentedBlock& taken, const FrequentedBlock& notTaken)
+{
+ RELEASE_ASSERT(opcode == Branch);
+ setSuccessors(taken, notTaken);
+ return appendNew<Value>(proc, opcode, origin, predicate);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BasicBlock.h b/Source/JavaScriptCore/b3/B3BasicBlock.h
new file mode 100644
index 000000000..11f466835
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BasicBlock.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3FrequentedBlock.h"
+#include "B3Opcode.h"
+#include "B3Origin.h"
+#include "B3SuccessorCollection.h"
+#include "B3Type.h"
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class BlockInsertionSet;
+class InsertionSet;
+class Procedure;
+class Value;
+
+class BasicBlock {
+ WTF_MAKE_NONCOPYABLE(BasicBlock);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ typedef Vector<Value*> ValueList;
+ typedef Vector<BasicBlock*, 2> PredecessorList;
+ typedef Vector<FrequentedBlock, 2> SuccessorList;
+
+ static const char* const dumpPrefix;
+
+ ~BasicBlock();
+
+ unsigned index() const { return m_index; }
+
+ ValueList::iterator begin() { return m_values.begin(); }
+ ValueList::iterator end() { return m_values.end(); }
+ ValueList::const_iterator begin() const { return m_values.begin(); }
+ ValueList::const_iterator end() const { return m_values.end(); }
+
+ size_t size() const { return m_values.size(); }
+ Value* at(size_t index) const { return m_values[index]; }
+ Value*& at(size_t index) { return m_values[index]; }
+
+ Value* last() const { return m_values.last(); }
+ Value*& last() { return m_values.last(); }
+
+ const ValueList& values() const { return m_values; }
+ ValueList& values() { return m_values; }
+
+ JS_EXPORT_PRIVATE void append(Value*);
+ JS_EXPORT_PRIVATE void appendNonTerminal(Value*);
+ JS_EXPORT_PRIVATE void replaceLast(Procedure&, Value*);
+
+ template<typename ValueType, typename... Arguments>
+ ValueType* appendNew(Procedure&, Arguments...);
+ template<typename ValueType, typename... Arguments>
+ ValueType* appendNewNonTerminal(Procedure&, Arguments...);
+
+ JS_EXPORT_PRIVATE Value* appendIntConstant(Procedure&, Origin, Type, int64_t value);
+ Value* appendIntConstant(Procedure&, Value* likeValue, int64_t value);
+ Value* appendBoolConstant(Procedure&, Origin, bool);
+
+ void removeLast(Procedure&);
+
+ template<typename ValueType, typename... Arguments>
+ ValueType* replaceLastWithNew(Procedure&, Arguments...);
+
+ unsigned numSuccessors() const { return m_successors.size(); }
+ const FrequentedBlock& successor(unsigned index) const { return m_successors[index]; }
+ FrequentedBlock& successor(unsigned index) { return m_successors[index]; }
+ const SuccessorList& successors() const { return m_successors; }
+ SuccessorList& successors() { return m_successors; }
+
+ void clearSuccessors();
+ JS_EXPORT_PRIVATE void appendSuccessor(FrequentedBlock);
+ JS_EXPORT_PRIVATE void setSuccessors(FrequentedBlock);
+ JS_EXPORT_PRIVATE void setSuccessors(FrequentedBlock, FrequentedBlock);
+
+ BasicBlock* successorBlock(unsigned index) const { return successor(index).block(); }
+ BasicBlock*& successorBlock(unsigned index) { return successor(index).block(); }
+ SuccessorCollection<BasicBlock, SuccessorList> successorBlocks()
+ {
+ return SuccessorCollection<BasicBlock, SuccessorList>(successors());
+ }
+ SuccessorCollection<const BasicBlock, const SuccessorList> successorBlocks() const
+ {
+ return SuccessorCollection<const BasicBlock, const SuccessorList>(successors());
+ }
+
+ bool replaceSuccessor(BasicBlock* from, BasicBlock* to);
+
+ // This is only valid for Jump and Branch.
+ const FrequentedBlock& taken() const;
+ FrequentedBlock& taken();
+ // This is only valid for Branch.
+ const FrequentedBlock& notTaken() const;
+ FrequentedBlock& notTaken();
+ // This is only valid for Branch and Switch.
+ const FrequentedBlock& fallThrough() const;
+ FrequentedBlock& fallThrough();
+
+ unsigned numPredecessors() const { return m_predecessors.size(); }
+ BasicBlock* predecessor(unsigned index) const { return m_predecessors[index]; }
+ BasicBlock*& predecessor(unsigned index) { return m_predecessors[index]; }
+ const PredecessorList& predecessors() const { return m_predecessors; }
+ PredecessorList& predecessors() { return m_predecessors; }
+ bool containsPredecessor(BasicBlock* block) { return m_predecessors.contains(block); }
+
+ bool addPredecessor(BasicBlock*);
+ bool removePredecessor(BasicBlock*);
+ bool replacePredecessor(BasicBlock* from, BasicBlock* to);
+
+ // Update predecessors starting with the successors of this block.
+ void updatePredecessorsAfter();
+
+ double frequency() const { return m_frequency; }
+
+ void dump(PrintStream&) const;
+ void deepDump(const Procedure&, PrintStream&) const;
+
+ // These are deprecated method for compatibility with the old ControlValue class. Don't use them
+ // in new code.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159440
+
+ // Use this for Oops.
+ JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin);
+ // Use this for Return.
+ JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin, Value*);
+ // Use this for Jump.
+ JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin, const FrequentedBlock&);
+ // Use this for Branch.
+ JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin, Value*, const FrequentedBlock&, const FrequentedBlock&);
+
+private:
+ friend class BlockInsertionSet;
+ friend class InsertionSet;
+ friend class Procedure;
+
+ // Instantiate via Procedure.
+ BasicBlock(unsigned index, double frequency);
+
+ unsigned m_index;
+ ValueList m_values;
+ PredecessorList m_predecessors;
+ SuccessorList m_successors;
+ double m_frequency;
+};
+
+class DeepBasicBlockDump {
+public:
+ DeepBasicBlockDump(const Procedure& proc, const BasicBlock* block)
+ : m_proc(proc)
+ , m_block(block)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_block)
+ m_block->deepDump(m_proc, out);
+ else
+ out.print("<null>");
+ }
+
+private:
+ const Procedure& m_proc;
+ const BasicBlock* m_block;
+};
+
+inline DeepBasicBlockDump deepDump(const Procedure& proc, const BasicBlock* block)
+{
+ return DeepBasicBlockDump(proc, block);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BasicBlockInlines.h b/Source/JavaScriptCore/b3/B3BasicBlockInlines.h
new file mode 100644
index 000000000..26c2df41b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BasicBlockInlines.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3ProcedureInlines.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+template<typename ValueType, typename... Arguments>
+ValueType* BasicBlock::appendNew(Procedure& procedure, Arguments... arguments)
+{
+ ValueType* result = procedure.add<ValueType>(arguments...);
+ append(result);
+ return result;
+}
+
+template<typename ValueType, typename... Arguments>
+ValueType* BasicBlock::appendNewNonTerminal(Procedure& procedure, Arguments... arguments)
+{
+ ValueType* result = procedure.add<ValueType>(arguments...);
+ appendNonTerminal(result);
+ return result;
+}
+
+template<typename ValueType, typename... Arguments>
+ValueType* BasicBlock::replaceLastWithNew(Procedure& procedure, Arguments... arguments)
+{
+ ValueType* result = procedure.add<ValueType>(arguments...);
+ replaceLast(procedure, result);
+ return result;
+}
+
+inline const FrequentedBlock& BasicBlock::taken() const
+{
+ ASSERT(last()->opcode() == Jump || last()->opcode() == Branch);
+ return m_successors[0];
+}
+
+inline FrequentedBlock& BasicBlock::taken()
+{
+ ASSERT(last()->opcode() == Jump || last()->opcode() == Branch);
+ return m_successors[0];
+}
+
+inline const FrequentedBlock& BasicBlock::notTaken() const
+{
+ ASSERT(last()->opcode() == Branch);
+ return m_successors[1];
+}
+
+inline FrequentedBlock& BasicBlock::notTaken()
+{
+ ASSERT(last()->opcode() == Branch);
+ return m_successors[1];
+}
+
+inline const FrequentedBlock& BasicBlock::fallThrough() const
+{
+ ASSERT(last()->opcode() == Branch || last()->opcode() == Switch);
+ return m_successors.last();
+}
+
+inline FrequentedBlock& BasicBlock::fallThrough()
+{
+ ASSERT(last()->opcode() == Branch || last()->opcode() == Switch);
+ return m_successors.last();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BasicBlockUtils.h b/Source/JavaScriptCore/b3/B3BasicBlockUtils.h
new file mode 100644
index 000000000..e5998c864
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BasicBlockUtils.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/GraphNodeWorklist.h>
+#include <wtf/IndexSet.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+template<typename BasicBlock>
+bool addPredecessor(BasicBlock* block, BasicBlock* predecessor)
+{
+ auto& predecessors = block->predecessors();
+
+ if (predecessors.contains(predecessor))
+ return false;
+
+ predecessors.append(predecessor);
+ return true;
+}
+
+template<typename BasicBlock>
+bool removePredecessor(BasicBlock* block, BasicBlock* predecessor)
+{
+ auto& predecessors = block->predecessors();
+ for (unsigned i = 0; i < predecessors.size(); ++i) {
+ if (predecessors[i] == predecessor) {
+ predecessors[i--] = predecessors.last();
+ predecessors.removeLast();
+ ASSERT(!predecessors.contains(predecessor));
+ return true;
+ }
+ }
+ return false;
+}
+
+template<typename BasicBlock>
+bool replacePredecessor(BasicBlock* block, BasicBlock* from, BasicBlock* to)
+{
+ bool changed = false;
+ // We do it this way because 'to' may already be a predecessor of 'block'.
+ changed |= removePredecessor(block, from);
+ changed |= addPredecessor(block, to);
+ return changed;
+}
+
+template<typename BasicBlock>
+void updatePredecessorsAfter(BasicBlock* root)
+{
+ Vector<BasicBlock*, 16> worklist;
+ worklist.append(root);
+ while (!worklist.isEmpty()) {
+ BasicBlock* block = worklist.takeLast();
+ for (BasicBlock* successor : block->successorBlocks()) {
+ if (addPredecessor(successor, block))
+ worklist.append(successor);
+ }
+ }
+}
+
+template<typename BasicBlock>
+void clearPredecessors(Vector<std::unique_ptr<BasicBlock>>& blocks)
+{
+ for (auto& block : blocks) {
+ if (block)
+ block->predecessors().resize(0);
+ }
+}
+
+template<typename BasicBlock>
+void recomputePredecessors(Vector<std::unique_ptr<BasicBlock>>& blocks)
+{
+ clearPredecessors(blocks);
+ updatePredecessorsAfter(blocks[0].get());
+}
+
+template<typename BasicBlock>
+bool isBlockDead(BasicBlock* block)
+{
+ if (!block)
+ return false;
+ if (!block->index())
+ return false;
+ return block->predecessors().isEmpty();
+}
+
+template<typename BasicBlock>
+Vector<BasicBlock*> blocksInPreOrder(BasicBlock* root)
+{
+ Vector<BasicBlock*> result;
+ GraphNodeWorklist<BasicBlock*, IndexSet<BasicBlock>> worklist;
+ worklist.push(root);
+ while (BasicBlock* block = worklist.pop()) {
+ result.append(block);
+ for (BasicBlock* successor : block->successorBlocks())
+ worklist.push(successor);
+ }
+ return result;
+}
+
+template<typename BasicBlock>
+Vector<BasicBlock*> blocksInPostOrder(BasicBlock* root)
+{
+ Vector<BasicBlock*> result;
+ PostOrderGraphNodeWorklist<BasicBlock*, IndexSet<BasicBlock>> worklist;
+ worklist.push(root);
+ while (GraphNodeWithOrder<BasicBlock*> item = worklist.pop()) {
+ switch (item.order) {
+ case GraphVisitOrder::Pre:
+ worklist.pushPost(item.node);
+ for (BasicBlock* successor : item.node->successorBlocks())
+ worklist.push(successor);
+ break;
+ case GraphVisitOrder::Post:
+ result.append(item.node);
+ break;
+ }
+ }
+ return result;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BlockInsertionSet.cpp b/Source/JavaScriptCore/b3/B3BlockInsertionSet.cpp
new file mode 100644
index 000000000..76a166820
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BlockInsertionSet.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3BlockInsertionSet.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3InsertionSet.h"
+#include "B3ProcedureInlines.h"
+#include <wtf/BubbleSort.h>
+
+namespace JSC { namespace B3 {
+
+BlockInsertionSet::BlockInsertionSet(Procedure &proc)
+ : m_proc(proc)
+{
+}
+
+BlockInsertionSet::~BlockInsertionSet() { }
+
+void BlockInsertionSet::insert(BlockInsertion&& insertion)
+{
+ m_insertions.append(WTFMove(insertion));
+}
+
+BasicBlock* BlockInsertionSet::insert(unsigned index, double frequency)
+{
+ std::unique_ptr<BasicBlock> block(new BasicBlock(UINT_MAX, frequency));
+ BasicBlock* result = block.get();
+ insert(BlockInsertion(index, WTFMove(block)));
+ return result;
+}
+
+BasicBlock* BlockInsertionSet::insertBefore(BasicBlock* before, double frequency)
+{
+ return insert(before->index(), frequency == frequency ? frequency : before->frequency());
+}
+
+BasicBlock* BlockInsertionSet::insertAfter(BasicBlock* after, double frequency)
+{
+ return insert(after->index() + 1, frequency == frequency ? frequency : after->frequency());
+}
+
+BasicBlock* BlockInsertionSet::splitForward(
+ BasicBlock* block, unsigned& valueIndex, InsertionSet* insertionSet, double frequency)
+{
+ Value* value = block->at(valueIndex);
+
+ // Create a new block that will go just before 'block', and make it contain everything prior
+ // to 'valueIndex'.
+ BasicBlock* result = insertBefore(block, frequency);
+ result->m_values.resize(valueIndex + 1);
+ for (unsigned i = valueIndex; i--;)
+ result->m_values[i] = block->m_values[i];
+
+ // Make the new block jump to 'block'.
+ result->m_values[valueIndex] = m_proc.add<Value>(Jump, value->origin());
+ result->setSuccessors(FrequentedBlock(block));
+
+ // If we had inserted things into 'block' before this, execute those insertions now.
+ if (insertionSet)
+ insertionSet->execute(result);
+
+ // Remove everything prior to 'valueIndex' from 'block', since those things are now in the
+ // new block.
+ block->m_values.remove(0, valueIndex);
+
+ // This is being used in a forward loop over 'block'. Update the index of the loop so that
+ // it can continue to the next block.
+ valueIndex = 0;
+
+ // Fixup the predecessors of 'block'. They now must jump to the new block.
+ result->predecessors() = WTFMove(block->predecessors());
+ block->addPredecessor(result);
+ for (BasicBlock* predecessor : result->predecessors())
+ predecessor->replaceSuccessor(block, result);
+
+ return result;
+}
+
+bool BlockInsertionSet::execute()
+{
+ if (m_insertions.isEmpty())
+ return false;
+
+ // We allow insertions to be given to us in any order. So, we need to sort them before
+ // running WTF::executeInsertions. We strongly prefer a stable sort and we want it to be
+ // fast, so we use bubble sort.
+ bubbleSort(m_insertions.begin(), m_insertions.end());
+
+ executeInsertions(m_proc.m_blocks, m_insertions);
+
+ // Prune out empty entries. This isn't strictly necessary but it's
+ // healthy to keep the block list from growing.
+ m_proc.m_blocks.removeAllMatching(
+ [&] (std::unique_ptr<BasicBlock>& blockPtr) -> bool {
+ return !blockPtr;
+ });
+
+ // Make sure that the blocks know their new indices.
+ for (unsigned i = 0; i < m_proc.m_blocks.size(); ++i)
+ m_proc.m_blocks[i]->m_index = i;
+
+ return true;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3BlockInsertionSet.h b/Source/JavaScriptCore/b3/B3BlockInsertionSet.h
new file mode 100644
index 000000000..b316f646c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BlockInsertionSet.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+#include <wtf/Insertion.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class InsertionSet;
+
+typedef WTF::Insertion<std::unique_ptr<BasicBlock>> BlockInsertion;
+
+class BlockInsertionSet {
+public:
+ BlockInsertionSet(Procedure&);
+ ~BlockInsertionSet();
+
+ void insert(BlockInsertion&&);
+
+ // Insert a new block at a given index.
+ BasicBlock* insert(unsigned index, double frequency = PNaN);
+
+ // Inserts a new block before the given block. Usually you will not pass the frequency
+ // argument. Passing PNaN causes us to just use the frequency of the 'before' block. That's
+ // usually what you want.
+ BasicBlock* insertBefore(BasicBlock* before, double frequency = PNaN);
+
+ // Inserts a new block after the given block.
+ BasicBlock* insertAfter(BasicBlock* after, double frequency = PNaN);
+
+ // A helper to split a block when forward iterating over it. It creates a new block to hold
+ // everything before the instruction at valueIndex. The current block is left with
+ // everything at and after valueIndex. If the optional InsertionSet is provided, it will get
+ // executed on the newly created block - this makes sense if you had previously inserted
+ // things into the original block, since the newly created block will be indexed identically
+ // to how this block was indexed for all values prior to valueIndex. After this runs, it sets
+ // valueIndex to zero. This allows you to use this method for things like:
+ //
+ // for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+ // Value* value = block->at(valueIndex);
+ // if (value->opcode() == Foo) {
+ // BasicBlock* predecessor =
+ // m_blockInsertionSet.splitForward(block, valueIndex, &m_insertionSet);
+ // ... // Now you can append to predecessor, insert new blocks before 'block', and
+ // ... // you can use m_insertionSet to insert more thing before 'value'.
+ // predecessor->updatePredecessorsAfter();
+ // }
+ // }
+ //
+ // Note how usually this idiom ends in a all to updatePredecessorsAftter(), which ensures
+ // that the predecessors involved in any of the new control flow that you've created are up
+ // to date.
+ BasicBlock* splitForward(
+ BasicBlock*, unsigned& valueIndex, InsertionSet* = nullptr,
+ double frequency = PNaN);
+
+ bool execute();
+
+private:
+ Procedure& m_proc;
+ Vector<BlockInsertion, 8> m_insertions;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BlockWorklist.h b/Source/JavaScriptCore/b3/B3BlockWorklist.h
new file mode 100644
index 000000000..6fa197c61
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BlockWorklist.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/GraphNodeWorklist.h>
+#include <wtf/IndexSet.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+
+typedef GraphNodeWorklist<BasicBlock*, IndexSet<BasicBlock>> BlockWorklist;
+
+// When you say BlockWith<int> you should read it as "block with an int".
+template<typename T> using BlockWith = GraphNodeWith<BasicBlock*, T>;
+
+// Extended block worklist is useful for enqueueing some meta-data along with the block. It also
+// permits forcibly enqueueing things even if the block has already been seen. It's useful for
+// things like building a spanning tree, in which case T (the auxiliary payload) would be the
+// successor index.
+template<typename T> using ExtendedBlockWorklist = ExtendedGraphNodeWorklist<BasicBlock*, T, IndexSet<BasicBlock>>;
+
+typedef GraphVisitOrder VisitOrder;
+
+typedef GraphNodeWithOrder<BasicBlock*> BlockWithOrder;
+
+typedef PostOrderGraphNodeWorklist<BasicBlock*, IndexSet<BasicBlock>> PostOrderBlockWorklist;
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BottomProvider.h b/Source/JavaScriptCore/b3/B3BottomProvider.h
new file mode 100644
index 000000000..9a977f0eb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BottomProvider.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3InsertionSet.h"
+
+namespace JSC { namespace B3 {
+
+// This exists because we cannot convert values to constants in-place.
+// FIXME: https://bugs.webkit.org/show_bug.cgi?id=159119
+
+class BottomProvider {
+public:
+ BottomProvider(InsertionSet& insertionSet, size_t index)
+ : m_insertionSet(&insertionSet)
+ , m_index(index)
+ {
+ }
+
+ Value* operator()(Origin origin, Type type) const
+ {
+ return m_insertionSet->insertBottom(m_index, origin, type);
+ }
+
+private:
+ InsertionSet* m_insertionSet;
+ size_t m_index;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3BreakCriticalEdges.cpp b/Source/JavaScriptCore/b3/B3BreakCriticalEdges.cpp
new file mode 100644
index 000000000..abdf0ceeb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BreakCriticalEdges.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3BreakCriticalEdges.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+void breakCriticalEdges(Procedure& proc)
+{
+ BlockInsertionSet insertionSet(proc);
+
+ for (BasicBlock* block : proc) {
+ if (block->numSuccessors() <= 1)
+ continue;
+
+ for (BasicBlock*& successor : block->successorBlocks()) {
+ if (successor->numPredecessors() <= 1)
+ continue;
+
+ BasicBlock* pad =
+ insertionSet.insertBefore(successor, successor->frequency());
+ pad->appendNew<Value>(proc, Jump, successor->at(0)->origin());
+ pad->setSuccessors(FrequentedBlock(successor));
+ pad->addPredecessor(block);
+ successor->replacePredecessor(block, pad);
+ successor = pad;
+ }
+ }
+
+ insertionSet.execute();
+ proc.invalidateCFG();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3BreakCriticalEdges.h b/Source/JavaScriptCore/b3/B3BreakCriticalEdges.h
new file mode 100644
index 000000000..75c324f4e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3BreakCriticalEdges.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+void breakCriticalEdges(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CCallValue.cpp b/Source/JavaScriptCore/b3/B3CCallValue.cpp
new file mode 100644
index 000000000..518d72349
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CCallValue.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3CCallValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+CCallValue::~CCallValue()
+{
+}
+
+Value* CCallValue::cloneImpl() const
+{
+ return new CCallValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3CCallValue.h b/Source/JavaScriptCore/b3/B3CCallValue.h
new file mode 100644
index 000000000..44ec349f2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CCallValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Effects.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE CCallValue : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == CCall; }
+
+ ~CCallValue();
+
+ Effects effects;
+
+protected:
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ template<typename... Arguments>
+ CCallValue(Type type, Origin origin, Arguments... arguments)
+ : Value(CheckedOpcode, CCall, type, origin, arguments...)
+ , effects(Effects::forCall())
+ {
+ RELEASE_ASSERT(numChildren() >= 1);
+ }
+
+ template<typename... Arguments>
+ CCallValue(Type type, Origin origin, const Effects& effects, Arguments... arguments)
+ : Value(CheckedOpcode, CCall, type, origin, arguments...)
+ , effects(effects)
+ {
+ RELEASE_ASSERT(numChildren() >= 1);
+ }
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CFG.h b/Source/JavaScriptCore/b3/B3CFG.h
new file mode 100644
index 000000000..3d1418e8a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CFG.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3Procedure.h"
+#include <wtf/IndexMap.h>
+#include <wtf/IndexSet.h>
+
+namespace JSC { namespace B3 {
+
+class CFG {
+ WTF_MAKE_NONCOPYABLE(CFG);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ typedef BasicBlock* Node;
+ typedef IndexSet<BasicBlock> Set;
+ template<typename T> using Map = IndexMap<BasicBlock, T>;
+ typedef Vector<BasicBlock*, 4> List;
+
+ CFG(Procedure& proc)
+ : m_proc(proc)
+ {
+ }
+
+ Node root() { return m_proc[0]; }
+
+ template<typename T>
+ Map<T> newMap() { return IndexMap<JSC::B3::BasicBlock, T>(m_proc.size()); }
+
+ SuccessorCollection<BasicBlock, BasicBlock::SuccessorList> successors(Node node) { return node->successorBlocks(); }
+ BasicBlock::PredecessorList& predecessors(Node node) { return node->predecessors(); }
+
+ unsigned index(Node node) const { return node->index(); }
+ Node node(unsigned index) const { return m_proc[index]; }
+ unsigned numNodes() const { return m_proc.size(); }
+
+ PointerDump<BasicBlock> dump(Node node) const { return pointerDump(node); }
+
+ void dump(PrintStream& out) const
+ {
+ m_proc.dump(out);
+ }
+
+private:
+ Procedure& m_proc;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CaseCollection.cpp b/Source/JavaScriptCore/b3/B3CaseCollection.cpp
new file mode 100644
index 000000000..5221ebab4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CaseCollection.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3CaseCollection.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CaseCollectionInlines.h"
+#include <wtf/CommaPrinter.h>
+
+namespace JSC { namespace B3 {
+
+void CaseCollection::dump(PrintStream& out) const
+{
+ CommaPrinter comma;
+ for (SwitchCase switchCase : *this)
+ out.print(comma, switchCase);
+ out.print(comma, "default->", fallThrough());
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3CaseCollection.h b/Source/JavaScriptCore/b3/B3CaseCollection.h
new file mode 100644
index 000000000..c45cc641d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CaseCollection.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3SwitchCase.h"
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class SwitchValue;
+
+// NOTE: You'll always want to include B3CaseCollectionInlines.h when you use this.
+
+class CaseCollection {
+public:
+ CaseCollection()
+ {
+ }
+
+ CaseCollection(const SwitchValue* terminal, const BasicBlock* owner)
+ : m_switch(terminal)
+ , m_owner(owner)
+ {
+ }
+
+ const FrequentedBlock& fallThrough() const;
+
+ unsigned size() const;
+ SwitchCase at(unsigned index) const;
+
+ SwitchCase operator[](unsigned index) const
+ {
+ return at(index);
+ }
+
+ class iterator {
+ public:
+ iterator()
+ : m_collection(nullptr)
+ , m_index(0)
+ {
+ }
+
+ iterator(const CaseCollection& collection, unsigned index)
+ : m_collection(&collection)
+ , m_index(index)
+ {
+ }
+
+ SwitchCase operator*()
+ {
+ return m_collection->at(m_index);
+ }
+
+ iterator& operator++()
+ {
+ m_index++;
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ ASSERT(m_collection == other.m_collection);
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ const CaseCollection* m_collection;
+ unsigned m_index;
+ };
+
+ typedef iterator const_iterator;
+
+ iterator begin() const { return iterator(*this, 0); }
+ iterator end() const { return iterator(*this, size()); }
+
+ void dump(PrintStream&) const;
+
+private:
+ const SwitchValue* m_switch { nullptr };
+ const BasicBlock* m_owner { nullptr };
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CaseCollectionInlines.h b/Source/JavaScriptCore/b3/B3CaseCollectionInlines.h
new file mode 100644
index 000000000..237a56822
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CaseCollectionInlines.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CaseCollection.h"
+#include "B3SwitchValue.h"
+#include "B3BasicBlock.h"
+
+namespace JSC { namespace B3 {
+
+inline const FrequentedBlock& CaseCollection::fallThrough() const
+{
+ return m_owner->fallThrough();
+}
+
+inline unsigned CaseCollection::size() const
+{
+ return m_switch->numCaseValues();
+}
+
+inline SwitchCase CaseCollection::at(unsigned index) const
+{
+ return SwitchCase(m_switch->caseValue(index), m_owner->successor(index));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CheckSpecial.cpp b/Source/JavaScriptCore/b3/B3CheckSpecial.cpp
new file mode 100644
index 000000000..6f7826cb5
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CheckSpecial.cpp
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3CheckSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "AirInstInlines.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+namespace {
+
+unsigned numB3Args(B3::Kind kind)
+{
+ switch (kind.opcode()) {
+ case CheckAdd:
+ case CheckSub:
+ case CheckMul:
+ return 2;
+ case Check:
+ return 1;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+}
+
+unsigned numB3Args(Value* value)
+{
+ return numB3Args(value->kind());
+}
+
+unsigned numB3Args(Inst& inst)
+{
+ return numB3Args(inst.origin);
+}
+
+} // anonymous namespace
+
+CheckSpecial::Key::Key(const Inst& inst)
+{
+ m_kind = inst.kind;
+ m_numArgs = inst.args.size();
+ m_stackmapRole = SameAsRep;
+}
+
+void CheckSpecial::Key::dump(PrintStream& out) const
+{
+ out.print(m_kind, "(", m_numArgs, ",", m_stackmapRole, ")");
+}
+
+CheckSpecial::CheckSpecial(Air::Kind kind, unsigned numArgs, RoleMode stackmapRole)
+ : m_checkKind(kind)
+ , m_stackmapRole(stackmapRole)
+ , m_numCheckArgs(numArgs)
+{
+ ASSERT(isDefinitelyTerminal(kind.opcode));
+}
+
+CheckSpecial::CheckSpecial(const CheckSpecial::Key& key)
+ : CheckSpecial(key.kind(), key.numArgs(), key.stackmapRole())
+{
+}
+
+CheckSpecial::~CheckSpecial()
+{
+}
+
+Inst CheckSpecial::hiddenBranch(const Inst& inst) const
+{
+ Inst hiddenBranch(m_checkKind, inst.origin);
+ hiddenBranch.args.reserveInitialCapacity(m_numCheckArgs);
+ for (unsigned i = 0; i < m_numCheckArgs; ++i)
+ hiddenBranch.args.append(inst.args[i + 1]);
+ ASSERT(hiddenBranch.isTerminal());
+ return hiddenBranch;
+}
+
+void CheckSpecial::forEachArg(Inst& inst, const ScopedLambda<Inst::EachArgCallback>& callback)
+{
+ Inst hidden = hiddenBranch(inst);
+ hidden.forEachArg(
+ [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+ unsigned index = &arg - &hidden.args[0];
+ callback(inst.args[1 + index], role, type, width);
+ });
+
+ std::optional<unsigned> firstRecoverableIndex;
+ if (m_checkKind.opcode == BranchAdd32 || m_checkKind.opcode == BranchAdd64)
+ firstRecoverableIndex = 1;
+ forEachArgImpl(numB3Args(inst), m_numCheckArgs + 1, inst, m_stackmapRole, firstRecoverableIndex, callback);
+}
+
+bool CheckSpecial::isValid(Inst& inst)
+{
+ return hiddenBranch(inst).isValidForm()
+ && isValidImpl(numB3Args(inst), m_numCheckArgs + 1, inst)
+ && inst.args.size() - m_numCheckArgs - 1 == inst.origin->numChildren() - numB3Args(inst);
+}
+
+bool CheckSpecial::admitsStack(Inst& inst, unsigned argIndex)
+{
+ if (argIndex >= 1 && argIndex < 1 + m_numCheckArgs)
+ return hiddenBranch(inst).admitsStack(argIndex - 1);
+ return admitsStackImpl(numB3Args(inst), m_numCheckArgs + 1, inst, argIndex);
+}
+
+std::optional<unsigned> CheckSpecial::shouldTryAliasingDef(Inst& inst)
+{
+ if (std::optional<unsigned> branchDef = hiddenBranch(inst).shouldTryAliasingDef())
+ return *branchDef + 1;
+ return std::nullopt;
+}
+
+CCallHelpers::Jump CheckSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext& context)
+{
+ CCallHelpers::Jump fail = hiddenBranch(inst).generate(jit, context);
+ ASSERT(fail.isSet());
+
+ StackmapValue* value = inst.origin->as<StackmapValue>();
+ ASSERT(value);
+
+ Vector<ValueRep> reps = repsImpl(context, numB3Args(inst), m_numCheckArgs + 1, inst);
+
+ // Set aside the args that are relevant to undoing the operation. This is because we don't want to
+ // capture all of inst in the closure below.
+ Vector<Arg, 3> args;
+ for (unsigned i = 0; i < m_numCheckArgs; ++i)
+ args.append(inst.args[1 + i]);
+
+ context.latePaths.append(
+ createSharedTask<GenerationContext::LatePathFunction>(
+ [=] (CCallHelpers& jit, GenerationContext& context) {
+ fail.link(&jit);
+
+ // If necessary, undo the operation.
+ switch (m_checkKind.opcode) {
+ case BranchAdd32:
+ if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3])
+ || (m_numCheckArgs == 3 && args[1] == args[2])) {
+ // This is ugly, but that's fine - we won't have to do this very often.
+ ASSERT(args[1].isGPR());
+ GPRReg valueGPR = args[1].gpr();
+ GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR);
+ jit.pushToSave(scratchGPR);
+ jit.setCarry(scratchGPR);
+ jit.lshift32(CCallHelpers::TrustedImm32(31), scratchGPR);
+ jit.urshift32(CCallHelpers::TrustedImm32(1), valueGPR);
+ jit.or32(scratchGPR, valueGPR);
+ jit.popToRestore(scratchGPR);
+ break;
+ }
+ if (m_numCheckArgs == 4) {
+ if (args[1] == args[3])
+ Inst(Sub32, nullptr, args[2], args[3]).generate(jit, context);
+ else if (args[2] == args[3])
+ Inst(Sub32, nullptr, args[1], args[3]).generate(jit, context);
+ } else if (m_numCheckArgs == 3)
+ Inst(Sub32, nullptr, args[1], args[2]).generate(jit, context);
+ break;
+ case BranchAdd64:
+ if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3])
+ || (m_numCheckArgs == 3 && args[1] == args[2])) {
+ // This is ugly, but that's fine - we won't have to do this very often.
+ ASSERT(args[1].isGPR());
+ GPRReg valueGPR = args[1].gpr();
+ GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR);
+ jit.pushToSave(scratchGPR);
+ jit.setCarry(scratchGPR);
+ jit.lshift64(CCallHelpers::TrustedImm32(63), scratchGPR);
+ jit.urshift64(CCallHelpers::TrustedImm32(1), valueGPR);
+ jit.or64(scratchGPR, valueGPR);
+ jit.popToRestore(scratchGPR);
+ break;
+ }
+ if (m_numCheckArgs == 4) {
+ if (args[1] == args[3])
+ Inst(Sub64, nullptr, args[2], args[3]).generate(jit, context);
+ else if (args[2] == args[3])
+ Inst(Sub64, nullptr, args[1], args[3]).generate(jit, context);
+ } else if (m_numCheckArgs == 3)
+ Inst(Sub64, nullptr, args[1], args[2]).generate(jit, context);
+ break;
+ case BranchSub32:
+ Inst(Add32, nullptr, args[1], args[2]).generate(jit, context);
+ break;
+ case BranchSub64:
+ Inst(Add64, nullptr, args[1], args[2]).generate(jit, context);
+ break;
+ case BranchNeg32:
+ Inst(Neg32, nullptr, args[1]).generate(jit, context);
+ break;
+ case BranchNeg64:
+ Inst(Neg64, nullptr, args[1]).generate(jit, context);
+ break;
+ default:
+ break;
+ }
+
+ value->m_generator->run(jit, StackmapGenerationParams(value, reps, context));
+ }));
+
+ return CCallHelpers::Jump(); // As far as Air thinks, we are not a terminal.
+}
+
+void CheckSpecial::dumpImpl(PrintStream& out) const
+{
+ out.print(m_checkKind, "(", m_numCheckArgs, ",", m_stackmapRole, ")");
+}
+
+void CheckSpecial::deepDumpImpl(PrintStream& out) const
+{
+ out.print("B3::CheckValue lowered to ", m_checkKind, " with ", m_numCheckArgs, " args.");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CheckSpecial.h b/Source/JavaScriptCore/b3/B3CheckSpecial.h
new file mode 100644
index 000000000..aa7f2feab
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CheckSpecial.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirKind.h"
+#include "B3StackmapSpecial.h"
+#include <wtf/HashMap.h>
+
+namespace JSC { namespace B3 {
+
+namespace Air {
+struct Inst;
+}
+
+// We want to lower Check instructions to a branch, but then we want to route that branch to our
+// out-of-line code instead of doing anything else. For this reason, a CheckSpecial will remember
+// which branch opcode we have selected along with the number of args in the overload we want. It
+// will create an Inst with that opcode plus the appropriate args from the owning Inst whenever you
+// call any of the callbacks.
+//
+// Note that for CheckAdd, CheckSub, and CheckMul we expect that the B3 arguments are the reverse
+// of the Air arguments (Add(a, b) => Add32 b, a). Except:
+// - CheckSub(0, x), which turns into BranchNeg32 x.
+// - CheckMul(a, b), which turns into Mul32 b, a but we pass Any for a's ValueRep.
+
+class CheckSpecial : public StackmapSpecial {
+public:
+ // Support for hash consing these things.
+ class Key {
+ public:
+ Key()
+ : m_stackmapRole(SameAsRep)
+ , m_numArgs(0)
+ {
+ }
+
+ Key(Air::Kind kind, unsigned numArgs, RoleMode stackmapRole = SameAsRep)
+ : m_kind(kind)
+ , m_stackmapRole(stackmapRole)
+ , m_numArgs(numArgs)
+ {
+ }
+
+ explicit Key(const Air::Inst&);
+
+ bool operator==(const Key& other) const
+ {
+ return m_kind == other.m_kind
+ && m_numArgs == other.m_numArgs
+ && m_stackmapRole == other.m_stackmapRole;
+ }
+
+ bool operator!=(const Key& other) const
+ {
+ return !(*this == other);
+ }
+
+ explicit operator bool() const { return *this != Key(); }
+
+ Air::Kind kind() const { return m_kind; }
+ unsigned numArgs() const { return m_numArgs; }
+ RoleMode stackmapRole() const { return m_stackmapRole; }
+
+ void dump(PrintStream& out) const;
+
+ Key(WTF::HashTableDeletedValueType)
+ : m_stackmapRole(SameAsRep)
+ , m_numArgs(1)
+ {
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return *this == Key(WTF::HashTableDeletedValue);
+ }
+
+ unsigned hash() const
+ {
+ // Seriously, we don't need to be smart here. It just doesn't matter.
+ return m_kind.hash() + m_numArgs + m_stackmapRole;
+ }
+
+ private:
+ Air::Kind m_kind;
+ RoleMode m_stackmapRole;
+ unsigned m_numArgs;
+ };
+
+ CheckSpecial(Air::Kind, unsigned numArgs, RoleMode stackmapRole = SameAsRep);
+ CheckSpecial(const Key&);
+ ~CheckSpecial();
+
+protected:
+ // Constructs and returns the Inst representing the branch that this will use.
+ Air::Inst hiddenBranch(const Air::Inst&) const;
+
+ void forEachArg(Air::Inst&, const ScopedLambda<Air::Inst::EachArgCallback>&) override;
+ bool isValid(Air::Inst&) override;
+ bool admitsStack(Air::Inst&, unsigned argIndex) override;
+ std::optional<unsigned> shouldTryAliasingDef(Air::Inst&) override;
+
+ // NOTE: the generate method will generate the hidden branch and then register a LatePath that
+ // generates the stackmap. Super crazy dude!
+
+ CCallHelpers::Jump generate(Air::Inst&, CCallHelpers&, Air::GenerationContext&) override;
+
+ void dumpImpl(PrintStream&) const override;
+ void deepDumpImpl(PrintStream&) const override;
+
+private:
+ Air::Kind m_checkKind;
+ RoleMode m_stackmapRole;
+ unsigned m_numCheckArgs;
+};
+
+struct CheckSpecialKeyHash {
+ static unsigned hash(const CheckSpecial::Key& key) { return key.hash(); }
+ static bool equal(const CheckSpecial::Key& a, const CheckSpecial::Key& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::B3::CheckSpecial::Key> {
+ typedef JSC::B3::CheckSpecialKeyHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::B3::CheckSpecial::Key> : SimpleClassHashTraits<JSC::B3::CheckSpecial::Key> {
+ // I don't want to think about this very hard, it's not worth it. I'm a be conservative.
+ static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CheckValue.cpp b/Source/JavaScriptCore/b3/B3CheckValue.cpp
new file mode 100644
index 000000000..79b6c6e72
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CheckValue.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3CheckValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+CheckValue::~CheckValue()
+{
+}
+
+void CheckValue::convertToAdd()
+{
+ RELEASE_ASSERT(opcode() == CheckAdd || opcode() == CheckSub || opcode() == CheckMul);
+ m_kind = CheckAdd;
+}
+
+Value* CheckValue::cloneImpl() const
+{
+ return new CheckValue(*this);
+}
+
+// Use this form for CheckAdd, CheckSub, and CheckMul.
+CheckValue::CheckValue(Kind kind, Origin origin, Value* left, Value* right)
+ : StackmapValue(CheckedOpcode, kind, left->type(), origin)
+{
+ ASSERT(B3::isInt(type()));
+ ASSERT(left->type() == right->type());
+ ASSERT(kind == CheckAdd || kind == CheckSub || kind == CheckMul);
+ append(ConstrainedValue(left, ValueRep::WarmAny));
+ append(ConstrainedValue(right, ValueRep::WarmAny));
+}
+
+// Use this form for Check.
+CheckValue::CheckValue(Kind kind, Origin origin, Value* predicate)
+ : StackmapValue(CheckedOpcode, kind, Void, origin)
+{
+ ASSERT(kind == Check);
+ append(ConstrainedValue(predicate, ValueRep::WarmAny));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3CheckValue.h b/Source/JavaScriptCore/b3/B3CheckValue.h
new file mode 100644
index 000000000..e3d94bace
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3CheckValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackmapValue.h"
+
+namespace JSC { namespace B3 {
+
+class CheckValue : public StackmapValue {
+public:
+ static bool accepts(Kind kind)
+ {
+ switch (kind.opcode()) {
+ case CheckAdd:
+ case CheckSub:
+ case CheckMul:
+ case Check:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ ~CheckValue();
+
+ void convertToAdd();
+
+protected:
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ // Use this form for CheckAdd, CheckSub, and CheckMul.
+ JS_EXPORT_PRIVATE CheckValue(Kind, Origin, Value* left, Value* right);
+
+ // Use this form for Check.
+ JS_EXPORT_PRIVATE CheckValue(Kind, Origin, Value* predicate);
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Common.cpp b/Source/JavaScriptCore/b3/B3Common.cpp
new file mode 100644
index 000000000..60da36291
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Common.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Common.h"
+
+#if ENABLE(B3_JIT)
+
+#include "DFGCommon.h"
+#include "FTLState.h"
+#include "Options.h"
+
+namespace JSC { namespace B3 {
+
+bool shouldDumpIR(B3ComplitationMode mode)
+{
+#if ENABLE(FTL_JIT)
+ return FTL::verboseCompilationEnabled() || FTL::shouldDumpDisassembly() || shouldDumpIRAtEachPhase(mode);
+#else
+ return shouldDumpIRAtEachPhase(mode);
+#endif
+}
+
+bool shouldDumpIRAtEachPhase(B3ComplitationMode mode)
+{
+ if (mode == B3Mode)
+ return Options::dumpGraphAtEachPhase() || Options::dumpB3GraphAtEachPhase();
+ return Options::dumpGraphAtEachPhase() || Options::dumpAirGraphAtEachPhase();
+}
+
+bool shouldValidateIR()
+{
+ return DFG::validationEnabled() || shouldValidateIRAtEachPhase();
+}
+
+bool shouldValidateIRAtEachPhase()
+{
+ return Options::validateGraphAtEachPhase();
+}
+
+bool shouldSaveIRBeforePhase()
+{
+ return Options::verboseValidationFailure();
+}
+
+bool shouldMeasurePhaseTiming()
+{
+ return Options::logB3PhaseTimes();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Common.h b/Source/JavaScriptCore/b3/B3Common.h
new file mode 100644
index 000000000..41e8ee096
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Common.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "JSExportMacros.h"
+#include <wtf/Optional.h>
+
+namespace JSC { namespace B3 {
+
+inline bool is64Bit() { return sizeof(void*) == 8; }
+inline bool is32Bit() { return !is64Bit(); }
+
+enum B3ComplitationMode {
+ B3Mode,
+ AirMode
+};
+
+JS_EXPORT_PRIVATE bool shouldDumpIR(B3ComplitationMode);
+bool shouldDumpIRAtEachPhase(B3ComplitationMode);
+bool shouldValidateIR();
+bool shouldValidateIRAtEachPhase();
+bool shouldSaveIRBeforePhase();
+bool shouldMeasurePhaseTiming();
+
+template<typename BitsType, typename InputType>
+inline bool isIdentical(InputType left, InputType right)
+{
+ BitsType leftBits = bitwise_cast<BitsType>(left);
+ BitsType rightBits = bitwise_cast<BitsType>(right);
+ return leftBits == rightBits;
+}
+
+inline bool isIdentical(int32_t left, int32_t right)
+{
+ return isIdentical<int32_t>(left, right);
+}
+
+inline bool isIdentical(int64_t left, int64_t right)
+{
+ return isIdentical<int64_t>(left, right);
+}
+
+inline bool isIdentical(double left, double right)
+{
+ return isIdentical<int64_t>(left, right);
+}
+
+inline bool isIdentical(float left, float right)
+{
+ return isIdentical<int32_t>(left, right);
+}
+
+template<typename ResultType, typename InputType, typename BitsType>
+inline bool isRepresentableAsImpl(InputType originalValue)
+{
+ // Convert the original value to the desired result type.
+ ResultType result = static_cast<ResultType>(originalValue);
+
+ // Convert the converted value back to the original type. The original value is representable
+ // using the new type if such round-tripping doesn't lose bits.
+ InputType newValue = static_cast<InputType>(result);
+
+ return isIdentical<BitsType>(originalValue, newValue);
+}
+
+template<typename ResultType>
+inline bool isRepresentableAs(int32_t value)
+{
+ return isRepresentableAsImpl<ResultType, int32_t, int32_t>(value);
+}
+
+template<typename ResultType>
+inline bool isRepresentableAs(int64_t value)
+{
+ return isRepresentableAsImpl<ResultType, int64_t, int64_t>(value);
+}
+
+template<typename ResultType>
+inline bool isRepresentableAs(double value)
+{
+ return isRepresentableAsImpl<ResultType, double, int64_t>(value);
+}
+
+template<typename IntType>
+static IntType chillDiv(IntType numerator, IntType denominator)
+{
+ if (!denominator)
+ return 0;
+ if (denominator == -1 && numerator == std::numeric_limits<IntType>::min())
+ return std::numeric_limits<IntType>::min();
+ return numerator / denominator;
+}
+
+template<typename IntType>
+static IntType chillMod(IntType numerator, IntType denominator)
+{
+ if (!denominator)
+ return 0;
+ if (denominator == -1 && numerator == std::numeric_limits<IntType>::min())
+ return 0;
+ return numerator % denominator;
+}
+
+template<typename IntType>
+static IntType chillUDiv(IntType numerator, IntType denominator)
+{
+ typedef typename std::make_unsigned<IntType>::type UnsignedIntType;
+ UnsignedIntType unsignedNumerator = static_cast<UnsignedIntType>(numerator);
+ UnsignedIntType unsignedDenominator = static_cast<UnsignedIntType>(denominator);
+ if (!unsignedDenominator)
+ return 0;
+ return unsignedNumerator / unsignedDenominator;
+}
+
+template<typename IntType>
+static IntType chillUMod(IntType numerator, IntType denominator)
+{
+ typedef typename std::make_unsigned<IntType>::type UnsignedIntType;
+ UnsignedIntType unsignedNumerator = static_cast<UnsignedIntType>(numerator);
+ UnsignedIntType unsignedDenominator = static_cast<UnsignedIntType>(denominator);
+ if (!unsignedDenominator)
+ return 0;
+ return unsignedNumerator % unsignedDenominator;
+}
+
+template<typename IntType>
+static IntType rotateRight(IntType value, int32_t shift)
+{
+ typedef typename std::make_unsigned<IntType>::type UnsignedIntType;
+ UnsignedIntType uValue = static_cast<UnsignedIntType>(value);
+ int32_t bits = sizeof(IntType) * 8;
+ int32_t mask = bits - 1;
+ shift &= mask;
+ return (uValue >> shift) | (uValue << ((bits - shift) & mask));
+}
+
+template<typename IntType>
+static IntType rotateLeft(IntType value, int32_t shift)
+{
+ typedef typename std::make_unsigned<IntType>::type UnsignedIntType;
+ UnsignedIntType uValue = static_cast<UnsignedIntType>(value);
+ int32_t bits = sizeof(IntType) * 8;
+ int32_t mask = bits - 1;
+ shift &= mask;
+ return (uValue << shift) | (uValue >> ((bits - shift) & mask));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Commutativity.cpp b/Source/JavaScriptCore/b3/B3Commutativity.cpp
new file mode 100644
index 000000000..5de43e648
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Commutativity.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Commutativity.h"
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, Commutativity commutativity)
+{
+ switch (commutativity) {
+ case Commutative:
+ out.print("Commutative");
+ return;
+ case NotCommutative:
+ out.print("NotCommutative");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Commutativity.h b/Source/JavaScriptCore/b3/B3Commutativity.h
new file mode 100644
index 000000000..bf0de7537
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Commutativity.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+enum Commutativity {
+ Commutative,
+ NotCommutative
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::B3::Commutativity);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Compilation.cpp b/Source/JavaScriptCore/b3/B3Compilation.cpp
new file mode 100644
index 000000000..9e20a6b84
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Compilation.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Compilation.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproducts.h"
+#include "CCallHelpers.h"
+
+namespace JSC { namespace B3 {
+
+Compilation::Compilation(MacroAssemblerCodeRef codeRef, std::unique_ptr<OpaqueByproducts> byproducts)
+ : m_codeRef(codeRef)
+ , m_byproducts(WTFMove(byproducts))
+{
+}
+
+Compilation::Compilation(Compilation&& other)
+ : m_codeRef(WTFMove(other.m_codeRef))
+ , m_byproducts(WTFMove(other.m_byproducts))
+{
+}
+
+Compilation::~Compilation()
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Compilation.h b/Source/JavaScriptCore/b3/B3Compilation.h
new file mode 100644
index 000000000..739865256
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Compilation.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "MacroAssemblerCodeRef.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class VM;
+
+namespace B3 {
+
+class OpaqueByproducts;
+class Procedure;
+
+// This class is a way to keep the result of a B3 compilation alive
+// and runnable.
+
+class Compilation {
+ WTF_MAKE_NONCOPYABLE(Compilation);
+ WTF_MAKE_FAST_ALLOCATED;
+
+public:
+ JS_EXPORT_PRIVATE Compilation(MacroAssemblerCodeRef, std::unique_ptr<OpaqueByproducts>);
+ JS_EXPORT_PRIVATE Compilation(Compilation&&);
+ JS_EXPORT_PRIVATE ~Compilation();
+
+ MacroAssemblerCodePtr code() const { return m_codeRef.code(); }
+ MacroAssemblerCodeRef codeRef() const { return m_codeRef; }
+
+ CString disassembly() const { return m_codeRef.disassembly(); }
+
+private:
+ MacroAssemblerCodeRef m_codeRef;
+ std::unique_ptr<OpaqueByproducts> m_byproducts;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Compile.cpp b/Source/JavaScriptCore/b3/B3Compile.cpp
new file mode 100644
index 000000000..980390ac0
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Compile.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Compile.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Generate.h"
+#include "B3OpaqueByproducts.h"
+#include "B3Procedure.h"
+#include "B3TimingScope.h"
+#include "CCallHelpers.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+
+namespace JSC { namespace B3 {
+
+Compilation compile(VM& vm, Procedure& proc, unsigned optLevel)
+{
+ TimingScope timingScope("Compilation");
+
+ prepareForGeneration(proc, optLevel);
+
+ CCallHelpers jit(&vm);
+ generate(proc, jit);
+ LinkBuffer linkBuffer(vm, jit, nullptr);
+
+ return Compilation(FINALIZE_CODE(linkBuffer, ("B3::Compilation")), proc.releaseByproducts());
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Compile.h b/Source/JavaScriptCore/b3/B3Compile.h
new file mode 100644
index 000000000..37db1608f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Compile.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Compilation.h"
+
+namespace JSC {
+
+class VM;
+
+namespace B3 {
+
+class Procedure;
+
+// This is a fool-proof API for compiling a Procedure to code and then running that code. You compile
+// a Procedure using this API by doing:
+//
+// Compilation compilation = B3::compile(vm, proc);
+//
+// Then you keep the Compilation object alive for as long as you want to be able to run the code.
+// If this API feels too high-level, you can use B3::generate() directly.
+
+JS_EXPORT_PRIVATE Compilation compile(VM&, Procedure&, unsigned optLevel = 1);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ComputeDivisionMagic.h b/Source/JavaScriptCore/b3/B3ComputeDivisionMagic.h
new file mode 100644
index 000000000..8c17ed669
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ComputeDivisionMagic.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This contains code taken from LLVM's APInt class. That code implements finding the magic
+ * numbers for strength-reducing division. The LLVM code on which this code is based was
+ * implemented using "Hacker's Delight", Henry S. Warren, Jr., chapter 10.
+ *
+ * ==============================================================================
+ * LLVM Release License
+ * ==============================================================================
+ * University of Illinois/NCSA
+ * Open Source License
+ *
+ * Copyright (c) 2003-2014 University of Illinois at Urbana-Champaign.
+ * All rights reserved.
+ *
+ * Developed by:
+ *
+ * LLVM Team
+ *
+ * University of Illinois at Urbana-Champaign
+ *
+ * http://llvm.org
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal with
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimers.
+ *
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimers in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the names of the LLVM Team, University of Illinois at
+ * Urbana-Champaign, nor the names of its contributors may be used to
+ * endorse or promote products derived from this Software without specific
+ * prior written permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+template<typename T>
+struct DivisionMagic {
+ T magicMultiplier;
+ unsigned shift;
+};
+
+// This contains code taken from LLVM's APInt::magic(). It's modestly adapted to our style, but
+// not completely, to make it easier to apply their changes in the future.
+template<typename T>
+DivisionMagic<T> computeDivisionMagic(T divisor)
+{
+ typedef typename std::make_unsigned<T>::type UnsignedT;
+ UnsignedT d = divisor;
+ unsigned p;
+ UnsignedT ad, anc, delta, q1, r1, q2, r2, t;
+ UnsignedT signedMin = static_cast<UnsignedT>(std::numeric_limits<T>::min());
+ DivisionMagic<T> mag;
+ unsigned bitWidth = sizeof(divisor) * 8;
+
+ // This code doesn't like to think of signedness as a type. Instead it likes to think that
+ // operations have signedness. This is how we generally do it in B3 as well. For this reason,
+ // we cast all the operated values once to unsigned. And later, we convert it to signed.
+ // Only `divisor` have signedness here.
+
+ ad = divisor < 0 ? -divisor : divisor; // -(signed min value) < signed max value. So there is no loss.
+ t = signedMin + (d >> (bitWidth - 1));
+ anc = t - 1 - (t % ad); // absolute value of nc
+ p = bitWidth - 1; // initialize p
+ q1 = signedMin / anc; // initialize q1 = 2p/abs(nc)
+ r1 = signedMin - q1*anc; // initialize r1 = rem(2p,abs(nc))
+ q2 = signedMin / ad; // initialize q2 = 2p/abs(d)
+ r2 = signedMin - q2*ad; // initialize r2 = rem(2p,abs(d))
+ do {
+ p = p + 1;
+ q1 = q1 << 1; // update q1 = 2p/abs(nc)
+ r1 = r1 << 1; // update r1 = rem(2p/abs(nc))
+ if (r1 >= anc) { // must be unsigned comparison
+ q1 = q1 + 1;
+ r1 = r1 - anc;
+ }
+ q2 = q2 << 1; // update q2 = 2p/abs(d)
+ r2 = r2 << 1; // update r2 = rem(2p/abs(d))
+ if (r2 >= ad) { // must be unsigned comparison
+ q2 = q2 + 1;
+ r2 = r2 - ad;
+ }
+ delta = ad - r2;
+ } while (q1 < delta || (q1 == delta && r1 == 0));
+
+ mag.magicMultiplier = q2 + 1;
+ if (divisor < 0)
+ mag.magicMultiplier = -mag.magicMultiplier; // resulting magic number
+ mag.shift = p - bitWidth; // resulting shift
+
+ return mag;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Const32Value.cpp b/Source/JavaScriptCore/b3/B3Const32Value.cpp
new file mode 100644
index 000000000..49a7453a4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Const32Value.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Const32Value.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+Const32Value::~Const32Value()
+{
+}
+
+Value* Const32Value::negConstant(Procedure& proc) const
+{
+ return proc.add<Const32Value>(origin(), -m_value);
+}
+
+Value* Const32Value::addConstant(Procedure& proc, int32_t other) const
+{
+ return proc.add<Const32Value>(origin(), m_value + other);
+}
+
+Value* Const32Value::addConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), m_value + other->asInt32());
+}
+
+Value* Const32Value::subConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), m_value - other->asInt32());
+}
+
+Value* Const32Value::mulConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), m_value * other->asInt32());
+}
+
+Value* Const32Value::checkAddConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ CheckedInt32 result = CheckedInt32(m_value) + CheckedInt32(other->asInt32());
+ if (result.hasOverflowed())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), result.unsafeGet());
+}
+
+Value* Const32Value::checkSubConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ CheckedInt32 result = CheckedInt32(m_value) - CheckedInt32(other->asInt32());
+ if (result.hasOverflowed())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), result.unsafeGet());
+}
+
+Value* Const32Value::checkMulConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ CheckedInt32 result = CheckedInt32(m_value) * CheckedInt32(other->asInt32());
+ if (result.hasOverflowed())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), result.unsafeGet());
+}
+
+Value* Const32Value::checkNegConstant(Procedure& proc) const
+{
+ if (m_value == -m_value)
+ return nullptr;
+ return negConstant(proc);
+}
+
+Value* Const32Value::divConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), chillDiv(m_value, other->asInt32()));
+}
+
+Value* Const32Value::uDivConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), chillUDiv(m_value, other->asInt32()));
+}
+
+Value* Const32Value::modConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), chillMod(m_value, other->asInt32()));
+}
+
+Value* Const32Value::uModConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), chillUMod(m_value, other->asInt32()));
+}
+
+Value* Const32Value::bitAndConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), m_value & other->asInt32());
+}
+
+Value* Const32Value::bitOrConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), m_value | other->asInt32());
+}
+
+Value* Const32Value::bitXorConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), m_value ^ other->asInt32());
+}
+
+Value* Const32Value::shlConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), m_value << (other->asInt32() & 31));
+}
+
+Value* Const32Value::sShrConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), m_value >> (other->asInt32() & 31));
+}
+
+Value* Const32Value::zShrConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), static_cast<int32_t>(static_cast<uint32_t>(m_value) >> (other->asInt32() & 31)));
+}
+
+Value* Const32Value::rotRConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), rotateRight(m_value, other->asInt32()));
+}
+
+Value* Const32Value::rotLConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const32Value>(origin(), rotateLeft(m_value, other->asInt32()));
+}
+
+Value* Const32Value::bitwiseCastConstant(Procedure& proc) const
+{
+ return proc.add<ConstFloatValue>(origin(), bitwise_cast<float>(m_value));
+}
+
+Value* Const32Value::iToDConstant(Procedure& proc) const
+{
+ return proc.add<ConstDoubleValue>(origin(), static_cast<double>(m_value));
+}
+
+Value* Const32Value::iToFConstant(Procedure& proc) const
+{
+ return proc.add<ConstFloatValue>(origin(), static_cast<float>(m_value));
+}
+
+TriState Const32Value::equalConstant(const Value* other) const
+{
+ if (!other->hasInt32())
+ return MixedTriState;
+ return triState(m_value == other->asInt32());
+}
+
+TriState Const32Value::notEqualConstant(const Value* other) const
+{
+ if (!other->hasInt32())
+ return MixedTriState;
+ return triState(m_value != other->asInt32());
+}
+
+TriState Const32Value::lessThanConstant(const Value* other) const
+{
+ if (!other->hasInt32())
+ return MixedTriState;
+ return triState(m_value < other->asInt32());
+}
+
+TriState Const32Value::greaterThanConstant(const Value* other) const
+{
+ if (!other->hasInt32())
+ return MixedTriState;
+ return triState(m_value > other->asInt32());
+}
+
+TriState Const32Value::lessEqualConstant(const Value* other) const
+{
+ if (!other->hasInt32())
+ return MixedTriState;
+ return triState(m_value <= other->asInt32());
+}
+
+TriState Const32Value::greaterEqualConstant(const Value* other) const
+{
+ if (!other->hasInt32())
+ return MixedTriState;
+ return triState(m_value >= other->asInt32());
+}
+
+TriState Const32Value::aboveConstant(const Value* other) const
+{
+ if (!other->hasInt32())
+ return MixedTriState;
+ return triState(static_cast<uint32_t>(m_value) > static_cast<uint32_t>(other->asInt32()));
+}
+
+TriState Const32Value::belowConstant(const Value* other) const
+{
+ if (!other->hasInt32())
+ return MixedTriState;
+ return triState(static_cast<uint32_t>(m_value) < static_cast<uint32_t>(other->asInt32()));
+}
+
+TriState Const32Value::aboveEqualConstant(const Value* other) const
+{
+ if (!other->hasInt32())
+ return MixedTriState;
+ return triState(static_cast<uint32_t>(m_value) >= static_cast<uint32_t>(other->asInt32()));
+}
+
+TriState Const32Value::belowEqualConstant(const Value* other) const
+{
+ if (!other->hasInt32())
+ return MixedTriState;
+ return triState(static_cast<uint32_t>(m_value) <= static_cast<uint32_t>(other->asInt32()));
+}
+
+void Const32Value::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(comma, m_value);
+}
+
+Value* Const32Value::cloneImpl() const
+{
+ return new Const32Value(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Const32Value.h b/Source/JavaScriptCore/b3/B3Const32Value.h
new file mode 100644
index 000000000..af4d08b05
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Const32Value.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE Const32Value : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == Const32; }
+
+ ~Const32Value();
+
+ int32_t value() const { return m_value; }
+
+ Value* negConstant(Procedure&) const override;
+ Value* addConstant(Procedure&, int32_t other) const override;
+ Value* addConstant(Procedure&, const Value* other) const override;
+ Value* subConstant(Procedure&, const Value* other) const override;
+ Value* mulConstant(Procedure&, const Value* other) const override;
+ Value* checkAddConstant(Procedure&, const Value* other) const override;
+ Value* checkSubConstant(Procedure&, const Value* other) const override;
+ Value* checkMulConstant(Procedure&, const Value* other) const override;
+ Value* checkNegConstant(Procedure&) const override;
+ Value* divConstant(Procedure&, const Value* other) const override;
+ Value* uDivConstant(Procedure&, const Value* other) const override;
+ Value* modConstant(Procedure&, const Value* other) const override;
+ Value* uModConstant(Procedure&, const Value* other) const override;
+ Value* bitAndConstant(Procedure&, const Value* other) const override;
+ Value* bitOrConstant(Procedure&, const Value* other) const override;
+ Value* bitXorConstant(Procedure&, const Value* other) const override;
+ Value* shlConstant(Procedure&, const Value* other) const override;
+ Value* sShrConstant(Procedure&, const Value* other) const override;
+ Value* zShrConstant(Procedure&, const Value* other) const override;
+ Value* rotRConstant(Procedure&, const Value* other) const override;
+ Value* rotLConstant(Procedure&, const Value* other) const override;
+ Value* bitwiseCastConstant(Procedure&) const override;
+ Value* iToDConstant(Procedure&) const override;
+ Value* iToFConstant(Procedure&) const override;
+
+ TriState equalConstant(const Value* other) const override;
+ TriState notEqualConstant(const Value* other) const override;
+ TriState lessThanConstant(const Value* other) const override;
+ TriState greaterThanConstant(const Value* other) const override;
+ TriState lessEqualConstant(const Value* other) const override;
+ TriState greaterEqualConstant(const Value* other) const override;
+ TriState aboveConstant(const Value* other) const override;
+ TriState belowConstant(const Value* other) const override;
+ TriState aboveEqualConstant(const Value* other) const override;
+ TriState belowEqualConstant(const Value* other) const override;
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+ friend class Procedure;
+
+ Const32Value(Origin origin, int32_t value)
+ : Value(CheckedOpcode, Const32, Int32, origin)
+ , m_value(value)
+ {
+ }
+
+private:
+ int32_t m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Const64Value.cpp b/Source/JavaScriptCore/b3/B3Const64Value.cpp
new file mode 100644
index 000000000..4f7b86b2e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Const64Value.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Const64Value.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+Const64Value::~Const64Value()
+{
+}
+
+Value* Const64Value::negConstant(Procedure& proc) const
+{
+ return proc.add<Const64Value>(origin(), -m_value);
+}
+
+Value* Const64Value::addConstant(Procedure& proc, int32_t other) const
+{
+ return proc.add<Const64Value>(origin(), m_value + static_cast<int64_t>(other));
+}
+
+Value* Const64Value::addConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), m_value + other->asInt64());
+}
+
+Value* Const64Value::subConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), m_value - other->asInt64());
+}
+
+Value* Const64Value::mulConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), m_value * other->asInt64());
+}
+
+Value* Const64Value::checkAddConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ CheckedInt64 result = CheckedInt64(m_value) + CheckedInt64(other->asInt64());
+ if (result.hasOverflowed())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), result.unsafeGet());
+}
+
+Value* Const64Value::checkSubConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ CheckedInt64 result = CheckedInt64(m_value) - CheckedInt64(other->asInt64());
+ if (result.hasOverflowed())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), result.unsafeGet());
+}
+
+Value* Const64Value::checkMulConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ CheckedInt64 result = CheckedInt64(m_value) * CheckedInt64(other->asInt64());
+ if (result.hasOverflowed())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), result.unsafeGet());
+}
+
+Value* Const64Value::checkNegConstant(Procedure& proc) const
+{
+ if (m_value == -m_value)
+ return nullptr;
+ return negConstant(proc);
+}
+
+Value* Const64Value::divConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), chillDiv(m_value, other->asInt64()));
+}
+
+Value* Const64Value::uDivConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), chillUDiv(m_value, other->asInt64()));
+}
+
+Value* Const64Value::modConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), chillMod(m_value, other->asInt64()));
+}
+
+Value* Const64Value::uModConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), chillUMod(m_value, other->asInt64()));
+}
+
+Value* Const64Value::bitAndConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), m_value & other->asInt64());
+}
+
+Value* Const64Value::bitOrConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), m_value | other->asInt64());
+}
+
+Value* Const64Value::bitXorConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt64())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), m_value ^ other->asInt64());
+}
+
+Value* Const64Value::shlConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), m_value << (other->asInt32() & 63));
+}
+
+Value* Const64Value::sShrConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), m_value >> (other->asInt32() & 63));
+}
+
+Value* Const64Value::zShrConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), static_cast<int64_t>(static_cast<uint64_t>(m_value) >> (other->asInt32() & 63)));
+}
+
+Value* Const64Value::rotRConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), rotateRight(m_value, other->asInt32()));
+}
+
+Value* Const64Value::rotLConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasInt32())
+ return nullptr;
+ return proc.add<Const64Value>(origin(), rotateLeft(m_value, other->asInt32()));
+}
+
+Value* Const64Value::bitwiseCastConstant(Procedure& proc) const
+{
+ return proc.add<ConstDoubleValue>(origin(), bitwise_cast<double>(m_value));
+}
+
+Value* Const64Value::iToDConstant(Procedure& proc) const
+{
+ return proc.add<ConstDoubleValue>(origin(), static_cast<double>(m_value));
+}
+
+Value* Const64Value::iToFConstant(Procedure& proc) const
+{
+ return proc.add<ConstFloatValue>(origin(), static_cast<float>(m_value));
+}
+
+TriState Const64Value::equalConstant(const Value* other) const
+{
+ if (!other->hasInt64())
+ return MixedTriState;
+ return triState(m_value == other->asInt64());
+}
+
+TriState Const64Value::notEqualConstant(const Value* other) const
+{
+ if (!other->hasInt64())
+ return MixedTriState;
+ return triState(m_value != other->asInt64());
+}
+
+TriState Const64Value::lessThanConstant(const Value* other) const
+{
+ if (!other->hasInt64())
+ return MixedTriState;
+ return triState(m_value < other->asInt64());
+}
+
+TriState Const64Value::greaterThanConstant(const Value* other) const
+{
+ if (!other->hasInt64())
+ return MixedTriState;
+ return triState(m_value > other->asInt64());
+}
+
+TriState Const64Value::lessEqualConstant(const Value* other) const
+{
+ if (!other->hasInt64())
+ return MixedTriState;
+ return triState(m_value <= other->asInt64());
+}
+
+TriState Const64Value::greaterEqualConstant(const Value* other) const
+{
+ if (!other->hasInt64())
+ return MixedTriState;
+ return triState(m_value >= other->asInt64());
+}
+
+TriState Const64Value::aboveConstant(const Value* other) const
+{
+ if (!other->hasInt64())
+ return MixedTriState;
+ return triState(static_cast<uint64_t>(m_value) > static_cast<uint64_t>(other->asInt64()));
+}
+
+TriState Const64Value::belowConstant(const Value* other) const
+{
+ if (!other->hasInt64())
+ return MixedTriState;
+ return triState(static_cast<uint64_t>(m_value) < static_cast<uint64_t>(other->asInt64()));
+}
+
+TriState Const64Value::aboveEqualConstant(const Value* other) const
+{
+ if (!other->hasInt64())
+ return MixedTriState;
+ return triState(static_cast<uint64_t>(m_value) >= static_cast<uint64_t>(other->asInt64()));
+}
+
+TriState Const64Value::belowEqualConstant(const Value* other) const
+{
+ if (!other->hasInt64())
+ return MixedTriState;
+ return triState(static_cast<uint64_t>(m_value) <= static_cast<uint64_t>(other->asInt64()));
+}
+
+void Const64Value::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(comma, m_value);
+}
+
+Value* Const64Value::cloneImpl() const
+{
+ return new Const64Value(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Const64Value.h b/Source/JavaScriptCore/b3/B3Const64Value.h
new file mode 100644
index 000000000..3efd55847
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Const64Value.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE Const64Value : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == Const64; }
+
+ ~Const64Value();
+
+ int64_t value() const { return m_value; }
+
+ Value* negConstant(Procedure&) const override;
+ Value* addConstant(Procedure&, int32_t other) const override;
+ Value* addConstant(Procedure&, const Value* other) const override;
+ Value* subConstant(Procedure&, const Value* other) const override;
+ Value* mulConstant(Procedure&, const Value* other) const override;
+ Value* checkAddConstant(Procedure&, const Value* other) const override;
+ Value* checkSubConstant(Procedure&, const Value* other) const override;
+ Value* checkMulConstant(Procedure&, const Value* other) const override;
+ Value* checkNegConstant(Procedure&) const override;
+ Value* divConstant(Procedure&, const Value* other) const override;
+ Value* uDivConstant(Procedure&, const Value* other) const override;
+ Value* modConstant(Procedure&, const Value* other) const override;
+ Value* uModConstant(Procedure&, const Value* other) const override;
+ Value* bitAndConstant(Procedure&, const Value* other) const override;
+ Value* bitOrConstant(Procedure&, const Value* other) const override;
+ Value* bitXorConstant(Procedure&, const Value* other) const override;
+ Value* shlConstant(Procedure&, const Value* other) const override;
+ Value* sShrConstant(Procedure&, const Value* other) const override;
+ Value* zShrConstant(Procedure&, const Value* other) const override;
+ Value* rotRConstant(Procedure&, const Value* other) const override;
+ Value* rotLConstant(Procedure&, const Value* other) const override;
+ Value* bitwiseCastConstant(Procedure&) const override;
+ Value* iToDConstant(Procedure&) const override;
+ Value* iToFConstant(Procedure&) const override;
+
+ TriState equalConstant(const Value* other) const override;
+ TriState notEqualConstant(const Value* other) const override;
+ TriState lessThanConstant(const Value* other) const override;
+ TriState greaterThanConstant(const Value* other) const override;
+ TriState lessEqualConstant(const Value* other) const override;
+ TriState greaterEqualConstant(const Value* other) const override;
+ TriState aboveConstant(const Value* other) const override;
+ TriState belowConstant(const Value* other) const override;
+ TriState aboveEqualConstant(const Value* other) const override;
+ TriState belowEqualConstant(const Value* other) const override;
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+ friend class Procedure;
+
+ Const64Value(Origin origin, int64_t value)
+ : Value(CheckedOpcode, Const64, Int64, origin)
+ , m_value(value)
+ {
+ }
+
+private:
+ int64_t m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstDoubleValue.cpp b/Source/JavaScriptCore/b3/B3ConstDoubleValue.cpp
new file mode 100644
index 000000000..0a7d7482c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstDoubleValue.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3ConstDoubleValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ConstFloatValue.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+ConstDoubleValue::~ConstDoubleValue()
+{
+}
+
+Value* ConstDoubleValue::negConstant(Procedure& proc) const
+{
+ return proc.add<ConstDoubleValue>(origin(), -m_value);
+}
+
+Value* ConstDoubleValue::addConstant(Procedure& proc, int32_t other) const
+{
+ return proc.add<ConstDoubleValue>(origin(), m_value + static_cast<double>(other));
+}
+
+Value* ConstDoubleValue::addConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasDouble())
+ return nullptr;
+ return proc.add<ConstDoubleValue>(origin(), m_value + other->asDouble());
+}
+
+Value* ConstDoubleValue::subConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasDouble())
+ return nullptr;
+ return proc.add<ConstDoubleValue>(origin(), m_value - other->asDouble());
+}
+
+Value* ConstDoubleValue::mulConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasDouble())
+ return nullptr;
+ return proc.add<ConstDoubleValue>(origin(), m_value * other->asDouble());
+}
+
+Value* ConstDoubleValue::bitAndConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasDouble())
+ return nullptr;
+ double result = bitwise_cast<double>(bitwise_cast<uint64_t>(m_value) & bitwise_cast<uint64_t>(other->asDouble()));
+ return proc.add<ConstDoubleValue>(origin(), result);
+}
+
+Value* ConstDoubleValue::bitOrConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasDouble())
+ return nullptr;
+ double result = bitwise_cast<double>(bitwise_cast<uint64_t>(m_value) | bitwise_cast<uint64_t>(other->asDouble()));
+ return proc.add<ConstDoubleValue>(origin(), result);
+}
+
+Value* ConstDoubleValue::bitXorConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasDouble())
+ return nullptr;
+ double result = bitwise_cast<double>(bitwise_cast<uint64_t>(m_value) ^ bitwise_cast<uint64_t>(other->asDouble()));
+ return proc.add<ConstDoubleValue>(origin(), result);
+}
+
+
+Value* ConstDoubleValue::bitwiseCastConstant(Procedure& proc) const
+{
+ return proc.add<Const64Value>(origin(), bitwise_cast<int64_t>(m_value));
+}
+
+Value* ConstDoubleValue::doubleToFloatConstant(Procedure& proc) const
+{
+ return proc.add<ConstFloatValue>(origin(), static_cast<float>(m_value));
+}
+
+Value* ConstDoubleValue::absConstant(Procedure& proc) const
+{
+ return proc.add<ConstDoubleValue>(origin(), fabs(m_value));
+}
+
+Value* ConstDoubleValue::ceilConstant(Procedure& proc) const
+{
+ return proc.add<ConstDoubleValue>(origin(), ceil(m_value));
+}
+
+Value* ConstDoubleValue::floorConstant(Procedure& proc) const
+{
+ return proc.add<ConstDoubleValue>(origin(), floor(m_value));
+}
+
+Value* ConstDoubleValue::sqrtConstant(Procedure& proc) const
+{
+ return proc.add<ConstDoubleValue>(origin(), sqrt(m_value));
+}
+
+Value* ConstDoubleValue::divConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasDouble())
+ return nullptr;
+ return proc.add<ConstDoubleValue>(origin(), m_value / other->asDouble());
+}
+
+Value* ConstDoubleValue::modConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasDouble())
+ return nullptr;
+ return proc.add<ConstDoubleValue>(origin(), fmod(m_value, other->asDouble()));
+}
+
+TriState ConstDoubleValue::equalConstant(const Value* other) const
+{
+ if (!other->hasDouble())
+ return MixedTriState;
+ return triState(m_value == other->asDouble());
+}
+
+TriState ConstDoubleValue::notEqualConstant(const Value* other) const
+{
+ if (!other->hasDouble())
+ return MixedTriState;
+ return triState(m_value != other->asDouble());
+}
+
+TriState ConstDoubleValue::lessThanConstant(const Value* other) const
+{
+ if (!other->hasDouble())
+ return MixedTriState;
+ return triState(m_value < other->asDouble());
+}
+
+TriState ConstDoubleValue::greaterThanConstant(const Value* other) const
+{
+ if (!other->hasDouble())
+ return MixedTriState;
+ return triState(m_value > other->asDouble());
+}
+
+TriState ConstDoubleValue::lessEqualConstant(const Value* other) const
+{
+ if (!other->hasDouble())
+ return MixedTriState;
+ return triState(m_value <= other->asDouble());
+}
+
+TriState ConstDoubleValue::greaterEqualConstant(const Value* other) const
+{
+ if (!other->hasDouble())
+ return MixedTriState;
+ return triState(m_value >= other->asDouble());
+}
+
+TriState ConstDoubleValue::equalOrUnorderedConstant(const Value* other) const
+{
+ if (std::isnan(m_value))
+ return TrueTriState;
+
+ if (!other->hasDouble())
+ return MixedTriState;
+ double otherValue = other->asDouble();
+ return triState(std::isunordered(m_value, otherValue) || m_value == otherValue);
+}
+
+void ConstDoubleValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(comma);
+ out.printf("%le", m_value);
+}
+
+Value* ConstDoubleValue::cloneImpl() const
+{
+ return new ConstDoubleValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstDoubleValue.h b/Source/JavaScriptCore/b3/B3ConstDoubleValue.h
new file mode 100644
index 000000000..fdfaddc1e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstDoubleValue.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE ConstDoubleValue : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == ConstDouble; }
+
+ ~ConstDoubleValue();
+
+ double value() const { return m_value; }
+
+ Value* negConstant(Procedure&) const override;
+ Value* addConstant(Procedure&, int32_t other) const override;
+ Value* addConstant(Procedure&, const Value* other) const override;
+ Value* subConstant(Procedure&, const Value* other) const override;
+ Value* divConstant(Procedure&, const Value* other) const override;
+ Value* modConstant(Procedure&, const Value* other) const override;
+ Value* mulConstant(Procedure&, const Value* other) const override;
+ Value* bitAndConstant(Procedure&, const Value* other) const override;
+ Value* bitOrConstant(Procedure&, const Value* other) const override;
+ Value* bitXorConstant(Procedure&, const Value* other) const override;
+ Value* bitwiseCastConstant(Procedure&) const override;
+ Value* doubleToFloatConstant(Procedure&) const override;
+ Value* absConstant(Procedure&) const override;
+ Value* ceilConstant(Procedure&) const override;
+ Value* floorConstant(Procedure&) const override;
+ Value* sqrtConstant(Procedure&) const override;
+
+ TriState equalConstant(const Value* other) const override;
+ TriState notEqualConstant(const Value* other) const override;
+ TriState lessThanConstant(const Value* other) const override;
+ TriState greaterThanConstant(const Value* other) const override;
+ TriState lessEqualConstant(const Value* other) const override;
+ TriState greaterEqualConstant(const Value* other) const override;
+ TriState equalOrUnorderedConstant(const Value* other) const override;
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ ConstDoubleValue(Origin origin, double value)
+ : Value(CheckedOpcode, ConstDouble, Double, origin)
+ , m_value(value)
+ {
+ }
+
+ double m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstFloatValue.cpp b/Source/JavaScriptCore/b3/B3ConstFloatValue.cpp
new file mode 100644
index 000000000..76facae32
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstFloatValue.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3ConstFloatValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ConstDoubleValue.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+ConstFloatValue::~ConstFloatValue()
+{
+}
+
+Value* ConstFloatValue::negConstant(Procedure& proc) const
+{
+ return proc.add<ConstFloatValue>(origin(), -m_value);
+}
+
+Value* ConstFloatValue::addConstant(Procedure& proc, int32_t other) const
+{
+ return proc.add<ConstFloatValue>(origin(), m_value + static_cast<float>(other));
+}
+
+Value* ConstFloatValue::addConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasFloat())
+ return nullptr;
+ return proc.add<ConstFloatValue>(origin(), m_value + other->asFloat());
+}
+
+Value* ConstFloatValue::subConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasFloat())
+ return nullptr;
+ return proc.add<ConstFloatValue>(origin(), m_value - other->asFloat());
+}
+
+Value* ConstFloatValue::mulConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasFloat())
+ return nullptr;
+ return proc.add<ConstFloatValue>(origin(), m_value * other->asFloat());
+}
+
+Value* ConstFloatValue::bitAndConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasFloat())
+ return nullptr;
+ float result = bitwise_cast<float>(bitwise_cast<uint32_t>(m_value) & bitwise_cast<uint32_t>(other->asFloat()));
+ return proc.add<ConstFloatValue>(origin(), result);
+}
+
+Value* ConstFloatValue::bitOrConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasFloat())
+ return nullptr;
+ float result = bitwise_cast<float>(bitwise_cast<uint32_t>(m_value) | bitwise_cast<uint32_t>(other->asFloat()));
+ return proc.add<ConstFloatValue>(origin(), result);
+}
+
+Value* ConstFloatValue::bitXorConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasFloat())
+ return nullptr;
+ float result = bitwise_cast<float>(bitwise_cast<uint32_t>(m_value) ^ bitwise_cast<uint32_t>(other->asFloat()));
+ return proc.add<ConstFloatValue>(origin(), result);
+}
+
+Value* ConstFloatValue::bitwiseCastConstant(Procedure& proc) const
+{
+ return proc.add<Const32Value>(origin(), bitwise_cast<int32_t>(m_value));
+}
+
+Value* ConstFloatValue::floatToDoubleConstant(Procedure& proc) const
+{
+ return proc.add<ConstDoubleValue>(origin(), static_cast<double>(m_value));
+}
+
+Value* ConstFloatValue::absConstant(Procedure& proc) const
+{
+ return proc.add<ConstFloatValue>(origin(), static_cast<float>(fabs(m_value)));
+}
+
+Value* ConstFloatValue::ceilConstant(Procedure& proc) const
+{
+ return proc.add<ConstFloatValue>(origin(), ceilf(m_value));
+}
+
+Value* ConstFloatValue::floorConstant(Procedure& proc) const
+{
+ return proc.add<ConstFloatValue>(origin(), floorf(m_value));
+}
+
+Value* ConstFloatValue::sqrtConstant(Procedure& proc) const
+{
+ return proc.add<ConstFloatValue>(origin(), static_cast<float>(sqrt(m_value)));
+}
+
+Value* ConstFloatValue::divConstant(Procedure& proc, const Value* other) const
+{
+ if (!other->hasFloat())
+ return nullptr;
+ return proc.add<ConstFloatValue>(origin(), m_value / other->asFloat());
+}
+
+TriState ConstFloatValue::equalConstant(const Value* other) const
+{
+ if (!other->hasFloat())
+ return MixedTriState;
+ return triState(m_value == other->asFloat());
+}
+
+TriState ConstFloatValue::notEqualConstant(const Value* other) const
+{
+ if (!other->hasFloat())
+ return MixedTriState;
+ return triState(m_value != other->asFloat());
+}
+
+TriState ConstFloatValue::lessThanConstant(const Value* other) const
+{
+ if (!other->hasFloat())
+ return MixedTriState;
+ return triState(m_value < other->asFloat());
+}
+
+TriState ConstFloatValue::greaterThanConstant(const Value* other) const
+{
+ if (!other->hasFloat())
+ return MixedTriState;
+ return triState(m_value > other->asFloat());
+}
+
+TriState ConstFloatValue::lessEqualConstant(const Value* other) const
+{
+ if (!other->hasFloat())
+ return MixedTriState;
+ return triState(m_value <= other->asFloat());
+}
+
+TriState ConstFloatValue::greaterEqualConstant(const Value* other) const
+{
+ if (!other->hasFloat())
+ return MixedTriState;
+ return triState(m_value >= other->asFloat());
+}
+
+void ConstFloatValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(comma);
+ out.printf("%le", m_value);
+}
+
+Value* ConstFloatValue::cloneImpl() const
+{
+ return new ConstFloatValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstFloatValue.h b/Source/JavaScriptCore/b3/B3ConstFloatValue.h
new file mode 100644
index 000000000..185583c07
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstFloatValue.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE ConstFloatValue : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == ConstFloat; }
+
+ ~ConstFloatValue();
+
+ float value() const { return m_value; }
+
+ Value* negConstant(Procedure&) const override;
+ Value* addConstant(Procedure&, int32_t other) const override;
+ Value* addConstant(Procedure&, const Value* other) const override;
+ Value* subConstant(Procedure&, const Value* other) const override;
+ Value* divConstant(Procedure&, const Value* other) const override;
+ Value* mulConstant(Procedure&, const Value* other) const override;
+ Value* bitAndConstant(Procedure&, const Value* other) const override;
+ Value* bitOrConstant(Procedure&, const Value* other) const override;
+ Value* bitXorConstant(Procedure&, const Value* other) const override;
+ Value* bitwiseCastConstant(Procedure&) const override;
+ Value* floatToDoubleConstant(Procedure&) const override;
+ Value* absConstant(Procedure&) const override;
+ Value* ceilConstant(Procedure&) const override;
+ Value* floorConstant(Procedure&) const override;
+ Value* sqrtConstant(Procedure&) const override;
+
+ TriState equalConstant(const Value* other) const override;
+ TriState notEqualConstant(const Value* other) const override;
+ TriState lessThanConstant(const Value* other) const override;
+ TriState greaterThanConstant(const Value* other) const override;
+ TriState lessEqualConstant(const Value* other) const override;
+ TriState greaterEqualConstant(const Value* other) const override;
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ ConstFloatValue(Origin origin, float value)
+ : Value(CheckedOpcode, ConstFloat, Float, origin)
+ , m_value(value)
+ {
+ }
+
+ float m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstPtrValue.h b/Source/JavaScriptCore/b3/B3ConstPtrValue.h
new file mode 100644
index 000000000..78bcba39b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstPtrValue.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Const32Value.h"
+#include "B3Const64Value.h"
+
+namespace JSC { namespace B3 {
+
+// Usually you want to use Const32Value or Const64Value directly. But this is useful for writing
+// platform-agnostic code. Note that a ConstPtrValue will behave like either a Const32Value or
+// Const64Value depending on platform.
+
+#if USE(JSVALUE64)
+typedef Const64Value ConstPtrValueBase;
+#else
+typedef Const32Value ConstPtrValueBase;
+#endif
+
+class ConstPtrValue : public ConstPtrValueBase {
+public:
+ void* value() const
+ {
+ return bitwise_cast<void*>(ConstPtrValueBase::value());
+ }
+
+private:
+ friend class Procedure;
+
+ template<typename T>
+ ConstPtrValue(Origin origin, T* pointer)
+ : ConstPtrValueBase(origin, bitwise_cast<intptr_t>(pointer))
+ {
+ }
+ template<typename T>
+ ConstPtrValue(Origin origin, T pointer)
+ : ConstPtrValueBase(origin, static_cast<intptr_t>(pointer))
+ {
+ }
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ConstrainedValue.cpp b/Source/JavaScriptCore/b3/B3ConstrainedValue.cpp
new file mode 100644
index 000000000..dd1762ff2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstrainedValue.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3ConstrainedValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+void ConstrainedValue::dump(PrintStream& out) const
+{
+ out.print(pointerDump(m_value), ":", m_rep);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3ConstrainedValue.h b/Source/JavaScriptCore/b3/B3ConstrainedValue.h
new file mode 100644
index 000000000..d2cd31fe1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ConstrainedValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3ValueRep.h"
+
+namespace JSC { namespace B3 {
+
+class Value;
+
+class ConstrainedValue {
+public:
+ ConstrainedValue()
+ {
+ }
+
+ ConstrainedValue(Value* value)
+ : m_value(value)
+ , m_rep(ValueRep::WarmAny)
+ {
+ }
+
+ ConstrainedValue(Value* value, const ValueRep& rep)
+ : m_value(value)
+ , m_rep(rep)
+ {
+ }
+
+ explicit operator bool() const { return m_value || m_rep; }
+
+ Value* value() const { return m_value; }
+ const ValueRep& rep() const { return m_rep; }
+
+ void dump(PrintStream& out) const;
+
+private:
+ Value* m_value;
+ ValueRep m_rep;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3DataSection.cpp b/Source/JavaScriptCore/b3/B3DataSection.cpp
new file mode 100644
index 000000000..f4e68bca2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3DataSection.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3DataSection.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+DataSection::DataSection(size_t size)
+ : m_data(fastZeroedMalloc(size))
+ , m_size(size)
+{
+}
+
+DataSection::~DataSection()
+{
+ fastFree(m_data);
+}
+
+void DataSection::dump(PrintStream& out) const
+{
+ out.print("DataSection at ", RawPointer(m_data), " with ", m_size, " bytes.");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3DataSection.h b/Source/JavaScriptCore/b3/B3DataSection.h
new file mode 100644
index 000000000..0bca40ed4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3DataSection.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproduct.h"
+
+namespace JSC { namespace B3 {
+
+class DataSection : public OpaqueByproduct {
+public:
+ DataSection(size_t size);
+ virtual ~DataSection();
+
+ void* data() const { return m_data; }
+ size_t size() const { return m_size; }
+
+ void dump(PrintStream&) const override;
+
+private:
+ void* m_data;
+ size_t m_size;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Dominators.h b/Source/JavaScriptCore/b3/B3Dominators.h
new file mode 100644
index 000000000..4a9d085f1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Dominators.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CFG.h"
+#include "B3Procedure.h"
+#include <wtf/Dominators.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC { namespace B3 {
+
+class Dominators : public WTF::Dominators<CFG> {
+ WTF_MAKE_NONCOPYABLE(Dominators);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ Dominators(Procedure& proc)
+ : WTF::Dominators<CFG>(proc.cfg())
+ {
+ }
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3DuplicateTails.cpp b/Source/JavaScriptCore/b3/B3DuplicateTails.cpp
new file mode 100644
index 000000000..fe94a607d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3DuplicateTails.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3DuplicateTails.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BreakCriticalEdges.h"
+#include "B3Dominators.h"
+#include "B3FixSSA.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include <wtf/IndexSet.h>
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+class DuplicateTails {
+public:
+ DuplicateTails(Procedure& proc)
+ : m_proc(proc)
+ , m_insertionSet(proc)
+ , m_maxSize(Options::maxB3TailDupBlockSize())
+ , m_maxSuccessors(Options::maxB3TailDupBlockSuccessors())
+ {
+ }
+
+ void run()
+ {
+ // Breaking critical edges introduces blocks that jump to things. Those Jumps' successors
+ // become candidates for tail duplication. Prior to critical edge breaking, some of those
+ // Jumps would have been Branches, and so no tail duplication would have happened.
+ breakCriticalEdges(m_proc);
+
+ // Find blocks that would be candidates for tail duplication. They must be small enough
+ // and they much not have too many successors.
+
+ m_proc.resetValueOwners();
+
+ IndexSet<BasicBlock> candidates;
+
+ for (BasicBlock* block : m_proc) {
+ if (block->size() > m_maxSize)
+ continue;
+ if (block->numSuccessors() > m_maxSuccessors)
+ continue;
+ if (block->last()->type() != Void) // Demoting doesn't handle terminals with values.
+ continue;
+
+ candidates.add(block);
+ }
+
+ // Collect the set of values that must be de-SSA'd.
+ IndexSet<Value> valuesToDemote;
+ for (BasicBlock* block : m_proc) {
+ for (Value* value : *block) {
+ if (value->opcode() == Phi && candidates.contains(block))
+ valuesToDemote.add(value);
+ for (Value* child : value->children()) {
+ if (child->owner != block && candidates.contains(child->owner))
+ valuesToDemote.add(child);
+ }
+ }
+ }
+ demoteValues(m_proc, valuesToDemote);
+ if (verbose) {
+ dataLog("Procedure after value demotion:\n");
+ dataLog(m_proc);
+ }
+
+ for (BasicBlock* block : m_proc) {
+ if (block->last()->opcode() != Jump)
+ continue;
+
+ BasicBlock* tail = block->successorBlock(0);
+ if (!candidates.contains(tail))
+ continue;
+
+ // Don't tail duplicate a trivial self-loop, because the code below can't handle block and
+ // tail being the same block.
+ if (block == tail)
+ continue;
+
+ // We're about to change 'block'. Make sure that nobody duplicates block after this
+ // point.
+ candidates.remove(block);
+
+ if (verbose)
+ dataLog("Duplicating ", *tail, " into ", *block, "\n");
+
+ block->removeLast(m_proc);
+
+ HashMap<Value*, Value*> map;
+ for (Value* value : *tail) {
+ Value* clone = m_proc.clone(value);
+ for (Value*& child : clone->children()) {
+ if (Value* replacement = map.get(child))
+ child = replacement;
+ }
+ if (value->type() != Void)
+ map.add(value, clone);
+ block->append(clone);
+ }
+ block->successors() = tail->successors();
+ }
+
+ m_proc.resetReachability();
+ m_proc.invalidateCFG();
+ }
+
+private:
+
+ Procedure& m_proc;
+ InsertionSet m_insertionSet;
+ unsigned m_maxSize;
+ unsigned m_maxSuccessors;
+};
+
+} // anonymous namespace
+
+void duplicateTails(Procedure& proc)
+{
+ PhaseScope phaseScope(proc, "duplicateTails");
+ DuplicateTails duplicateTails(proc);
+ duplicateTails.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3DuplicateTails.h b/Source/JavaScriptCore/b3/B3DuplicateTails.h
new file mode 100644
index 000000000..443adafb6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3DuplicateTails.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Replaces jumps to tiny basic blocks with the contents of those basic blocks. Also simplifies
+// branches that are path-redundant. Does not do a fixpoint, because it does not have a good way
+// of detecting termination.
+
+void duplicateTails(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Effects.cpp b/Source/JavaScriptCore/b3/B3Effects.cpp
new file mode 100644
index 000000000..aeda46f83
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Effects.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Effects.h"
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/CommaPrinter.h>
+#include <wtf/DataLog.h>
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+// These helpers cascade in such a way that after the helper for terminal, we don't have to worry
+// about terminal again, since the terminal case considers all ways that a terminal may interfere
+// with something else. And after the exit sideways case, we don't have to worry about either
+// exitsSideways or terminal. And so on...
+
+bool interferesWithTerminal(const Effects& terminal, const Effects& other)
+{
+ if (!terminal.terminal)
+ return false;
+ return other.terminal || other.controlDependent || other.writesLocalState || other.writes || other.writesPinned;
+}
+
+bool interferesWithExitSideways(const Effects& exitsSideways, const Effects& other)
+{
+ if (!exitsSideways.exitsSideways)
+ return false;
+ return other.controlDependent || other.writes || other.writesPinned;
+}
+
+bool interferesWithWritesLocalState(const Effects& writesLocalState, const Effects& other)
+{
+ if (!writesLocalState.writesLocalState)
+ return false;
+ return other.writesLocalState || other.readsLocalState;
+}
+
+bool interferesWithWritesPinned(const Effects& writesPinned, const Effects& other)
+{
+ if (!writesPinned.writesPinned)
+ return false;
+ return other.writesPinned || other.readsPinned;
+}
+
+} // anonymous namespace
+
+bool Effects::interferes(const Effects& other) const
+{
+ return interferesWithTerminal(*this, other)
+ || interferesWithTerminal(other, *this)
+ || interferesWithExitSideways(*this, other)
+ || interferesWithExitSideways(other, *this)
+ || interferesWithWritesLocalState(*this, other)
+ || interferesWithWritesLocalState(other, *this)
+ || interferesWithWritesPinned(*this, other)
+ || interferesWithWritesPinned(other, *this)
+ || writes.overlaps(other.writes)
+ || writes.overlaps(other.reads)
+ || reads.overlaps(other.writes);
+}
+
+bool Effects::operator==(const Effects& other) const
+{
+ return terminal == other.terminal
+ && exitsSideways == other.exitsSideways
+ && controlDependent == other.controlDependent
+ && writesLocalState == other.writesLocalState
+ && readsLocalState == other.readsLocalState
+ && writesPinned == other.writesPinned
+ && readsPinned == other.readsPinned
+ && writes == other.writes
+ && reads == other.reads;
+}
+
+bool Effects::operator!=(const Effects& other) const
+{
+ return !(*this == other);
+}
+
+void Effects::dump(PrintStream& out) const
+{
+ CommaPrinter comma("|");
+ if (terminal)
+ out.print(comma, "Terminal");
+ if (exitsSideways)
+ out.print(comma, "ExitsSideways");
+ if (controlDependent)
+ out.print(comma, "ControlDependent");
+ if (writesLocalState)
+ out.print(comma, "WritesLocalState");
+ if (readsLocalState)
+ out.print(comma, "ReadsLocalState");
+ if (writesPinned)
+ out.print(comma, "WritesPinned");
+ if (readsPinned)
+ out.print(comma, "ReadsPinned");
+ if (writes)
+ out.print(comma, "Writes:", writes);
+ if (reads)
+ out.print(comma, "Reads:", reads);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Effects.h b/Source/JavaScriptCore/b3/B3Effects.h
new file mode 100644
index 000000000..7a088535b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Effects.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+struct Effects {
+ // True if this cannot continue execution in the current block.
+ bool terminal { false };
+
+ // True if this value can cause execution to terminate abruptly, and that this abrupt termination is
+ // observable. An example of how this gets used is to limit the hoisting of controlDependent values.
+ // Note that if exitsSideways is set to true but reads is bottom, then B3 is free to assume that
+ // after abrupt termination of this procedure, none of the heap will be read. That's usually false,
+ // so make sure that reads corresponds to the set of things that are readable after this function
+ // terminates abruptly.
+ bool exitsSideways { false };
+
+ // True if the instruction may change semantics if hoisted above some control flow. For example,
+ // loads are usually control-dependent because we must assume that any control construct (either
+ // a terminal like Branch or anything that exits sideways, like Check) validates whether the
+ // pointer is valid. Hoisting the load above control may cause the load to trap even though it
+ // would not have otherwise trapped.
+ bool controlDependent { false };
+
+ // True if this writes to the local state. Operations that write local state don't write to anything
+ // in "memory" but they have a side-effect anyway. This is for modeling Upsilons, Sets, and Fences.
+ // This is a way of saying: even though this operation is not a terminal, does not exit sideways,
+ // and does not write to the heap, you still cannot kill this operation.
+ bool writesLocalState { false };
+
+ // True if this reads from the local state. This is only used for Phi and Get.
+ bool readsLocalState { false };
+
+ // B3 understands things about pinned registers. Therefore, it needs to know who reads them and
+ // who writes them. We don't track this on a per-register basis because that would be harder and
+ // we don't need it. Note that if you want to construct an immutable pinned register while also
+ // having other pinned registers that are mutable, then you can use ArgumentReg. Also note that
+ // nobody will stop you from making this get out-of-sync with your clobbered register sets in
+ // Patchpoint. It's recommended that you err on the side of being conservative.
+ // FIXME: Explore making these be RegisterSets. That's mainly hard because it would be awkward to
+ // reconcile with StackmapValue's support for clobbered regs.
+ // https://bugs.webkit.org/show_bug.cgi?id=163173
+ bool readsPinned { false };
+ bool writesPinned { false };
+
+ HeapRange writes;
+ HeapRange reads;
+
+ static Effects none()
+ {
+ return Effects();
+ }
+
+ static Effects forCall()
+ {
+ Effects result;
+ result.exitsSideways = true;
+ result.controlDependent = true;
+ result.writes = HeapRange::top();
+ result.reads = HeapRange::top();
+ result.readsPinned = true;
+ result.writesPinned = true;
+ return result;
+ }
+
+ static Effects forCheck()
+ {
+ Effects result;
+ result.exitsSideways = true;
+ // The program could read anything after exiting, and it's on us to declare this.
+ result.reads = HeapRange::top();
+ return result;
+ }
+
+ bool mustExecute() const
+ {
+ return terminal || exitsSideways || writesLocalState || writes || writesPinned;
+ }
+
+ // Returns true if reordering instructions with these respective effects would change program
+ // behavior in an observable way.
+ bool interferes(const Effects&) const;
+
+ JS_EXPORT_PRIVATE bool operator==(const Effects&) const;
+ JS_EXPORT_PRIVATE bool operator!=(const Effects&) const;
+
+ JS_EXPORT_PRIVATE void dump(PrintStream& out) const;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.cpp b/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.cpp
new file mode 100644
index 000000000..feaacdac4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.cpp
@@ -0,0 +1,703 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3EliminateCommonSubexpressions.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockWorklist.h"
+#include "B3Dominators.h"
+#include "B3HeapRange.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3PureCSE.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3ValueKey.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include "DFGGraph.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/HashMap.h>
+#include <wtf/ListDump.h>
+#include <wtf/RangeSet.h>
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+// FIXME: We could treat Patchpoints with a non-empty set of reads as a "memory value" and somehow
+// eliminate redundant ones. We would need some way of determining if two patchpoints are replacable.
+// It doesn't seem right to use the reads set for this. We could use the generator, but that feels
+// lame because the FTL will pretty much use a unique generator for each patchpoint even when two
+// patchpoints have the same semantics as far as CSE would be concerned. We could invent something
+// like a "value ID" for patchpoints. By default, each one gets a unique value ID, but FTL could force
+// some patchpoints to share the same one as a signal that they will return the same value if executed
+// in the same heap with the same inputs.
+
+typedef Vector<MemoryValue*, 1> MemoryMatches;
+
+class MemoryValueMap {
+public:
+ MemoryValueMap() { }
+
+ void add(MemoryValue* memory)
+ {
+ Matches& matches = m_map.add(memory->lastChild(), Matches()).iterator->value;
+ if (matches.contains(memory))
+ return;
+ matches.append(memory);
+ }
+
+ template<typename Functor>
+ void removeIf(const Functor& functor)
+ {
+ m_map.removeIf(
+ [&] (HashMap<Value*, Matches>::KeyValuePairType& entry) -> bool {
+ entry.value.removeAllMatching(
+ [&] (Value* value) -> bool {
+ if (MemoryValue* memory = value->as<MemoryValue>())
+ return functor(memory);
+ return true;
+ });
+ return entry.value.isEmpty();
+ });
+ }
+
+ Matches* find(Value* ptr)
+ {
+ auto iter = m_map.find(ptr);
+ if (iter == m_map.end())
+ return nullptr;
+ return &iter->value;
+ }
+
+ template<typename Functor>
+ MemoryValue* find(Value* ptr, const Functor& functor)
+ {
+ if (Matches* matches = find(ptr)) {
+ for (Value* candidateValue : *matches) {
+ if (MemoryValue* candidateMemory = candidateValue->as<MemoryValue>()) {
+ if (functor(candidateMemory))
+ return candidateMemory;
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ void dump(PrintStream& out) const
+ {
+ out.print("{");
+ CommaPrinter comma;
+ for (auto& entry : m_map)
+ out.print(comma, pointerDump(entry.key), "=>", pointerListDump(entry.value));
+ out.print("}");
+ }
+
+private:
+ // This uses Matches for two reasons:
+ // - It cannot be a MemoryValue* because the key is imprecise. Many MemoryValues could have the
+ // same key while being unaliased.
+ // - It can't be a MemoryMatches array because the MemoryValue*'s could be turned into Identity's.
+ HashMap<Value*, Matches> m_map;
+};
+
+struct ImpureBlockData {
+ void dump(PrintStream& out) const
+ {
+ out.print(
+ "{reads = ", reads, ", writes = ", writes, ", storesAtHead = ", storesAtHead,
+ ", memoryValuesAtTail = ", memoryValuesAtTail, "}");
+ }
+
+ RangeSet<HeapRange> reads; // This only gets used for forward store elimination.
+ RangeSet<HeapRange> writes; // This gets used for both load and store elimination.
+
+ MemoryValueMap storesAtHead;
+ MemoryValueMap memoryValuesAtTail;
+};
+
+class CSE {
+public:
+ CSE(Procedure& proc)
+ : m_proc(proc)
+ , m_dominators(proc.dominators())
+ , m_impureBlockData(proc.size())
+ , m_insertionSet(proc)
+ {
+ }
+
+ bool run()
+ {
+ if (verbose)
+ dataLog("B3 before CSE:\n", m_proc);
+
+ m_proc.resetValueOwners();
+
+ // Summarize the impure effects of each block, and the impure values available at the end of
+ // each block. This doesn't edit code yet.
+ for (BasicBlock* block : m_proc) {
+ ImpureBlockData& data = m_impureBlockData[block];
+ for (Value* value : *block) {
+ Effects effects = value->effects();
+ MemoryValue* memory = value->as<MemoryValue>();
+
+ if (memory && memory->isStore()
+ && !data.reads.overlaps(memory->range())
+ && !data.writes.overlaps(memory->range()))
+ data.storesAtHead.add(memory);
+ data.reads.add(effects.reads);
+
+ if (HeapRange writes = effects.writes)
+ clobber(data, writes);
+
+ if (memory)
+ data.memoryValuesAtTail.add(memory);
+ }
+
+ if (verbose)
+ dataLog("Block ", *block, ": ", data, "\n");
+ }
+
+ // Perform CSE. This edits code.
+ Vector<BasicBlock*> postOrder = m_proc.blocksInPostOrder();
+ for (unsigned i = postOrder.size(); i--;) {
+ m_block = postOrder[i];
+ if (verbose)
+ dataLog("Looking at ", *m_block, ":\n");
+
+ m_data = ImpureBlockData();
+ for (m_index = 0; m_index < m_block->size(); ++m_index) {
+ m_value = m_block->at(m_index);
+ process();
+ }
+ m_insertionSet.execute(m_block);
+ m_impureBlockData[m_block] = m_data;
+ }
+
+ // The previous pass might have requested that we insert code in some basic block other than
+ // the one that it was looking at. This inserts them.
+ for (BasicBlock* block : m_proc) {
+ for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+ auto iter = m_sets.find(block->at(valueIndex));
+ if (iter == m_sets.end())
+ continue;
+
+ for (Value* value : iter->value)
+ m_insertionSet.insertValue(valueIndex + 1, value);
+ }
+ m_insertionSet.execute(block);
+ }
+
+ if (verbose)
+ dataLog("B3 after CSE:\n", m_proc);
+
+ return m_changed;
+ }
+
+private:
+ void process()
+ {
+ m_value->performSubstitution();
+
+ if (m_pureCSE.process(m_value, m_dominators)) {
+ ASSERT(!m_value->effects().writes);
+ m_changed = true;
+ return;
+ }
+
+ MemoryValue* memory = m_value->as<MemoryValue>();
+ if (memory && processMemoryBeforeClobber(memory))
+ return;
+
+ if (HeapRange writes = m_value->effects().writes)
+ clobber(m_data, writes);
+
+ if (memory)
+ processMemoryAfterClobber(memory);
+ }
+
+ // Return true if we got rid of the operation. If you changed IR in this function, you have to
+ // set m_changed even if you also return true.
+ bool processMemoryBeforeClobber(MemoryValue* memory)
+ {
+ Value* value = memory->child(0);
+ Value* ptr = memory->lastChild();
+ HeapRange range = memory->range();
+ int32_t offset = memory->offset();
+
+ switch (memory->opcode()) {
+ case Store8:
+ return handleStoreBeforeClobber(
+ ptr, range,
+ [&] (MemoryValue* candidate) -> bool {
+ return candidate->offset() == offset
+ && ((candidate->opcode() == Store8 && candidate->child(0) == value)
+ || ((candidate->opcode() == Load8Z || candidate->opcode() == Load8S)
+ && candidate == value));
+ });
+ case Store16:
+ return handleStoreBeforeClobber(
+ ptr, range,
+ [&] (MemoryValue* candidate) -> bool {
+ return candidate->offset() == offset
+ && ((candidate->opcode() == Store16 && candidate->child(0) == value)
+ || ((candidate->opcode() == Load16Z || candidate->opcode() == Load16S)
+ && candidate == value));
+ });
+ case Store:
+ return handleStoreBeforeClobber(
+ ptr, range,
+ [&] (MemoryValue* candidate) -> bool {
+ return candidate->offset() == offset
+ && ((candidate->opcode() == Store && candidate->child(0) == value)
+ || (candidate->opcode() == Load && candidate == value));
+ });
+ default:
+ return false;
+ }
+ }
+
+ void clobber(ImpureBlockData& data, HeapRange writes)
+ {
+ data.writes.add(writes);
+
+ data.memoryValuesAtTail.removeIf(
+ [&] (MemoryValue* memory) {
+ return memory->range().overlaps(writes);
+ });
+ }
+
+ void processMemoryAfterClobber(MemoryValue* memory)
+ {
+ Value* ptr = memory->lastChild();
+ HeapRange range = memory->range();
+ int32_t offset = memory->offset();
+ Type type = memory->type();
+
+ // FIXME: Empower this to insert more casts and shifts. For example, a Load8 could match a
+ // Store and mask the result. You could even have:
+ //
+ // Store(@value, @ptr, offset = 0)
+ // Load8Z(@ptr, offset = 2)
+ //
+ // Which could be turned into something like this:
+ //
+ // Store(@value, @ptr, offset = 0)
+ // ZShr(@value, 16)
+
+ switch (memory->opcode()) {
+ case Load8Z: {
+ handleMemoryValue(
+ ptr, range,
+ [&] (MemoryValue* candidate) -> bool {
+ return candidate->offset() == offset
+ && (candidate->opcode() == Load8Z || candidate->opcode() == Store8);
+ },
+ [&] (MemoryValue* match, Vector<Value*>& fixups) -> Value* {
+ if (match->opcode() == Store8) {
+ Value* mask = m_proc.add<Const32Value>(m_value->origin(), 0xff);
+ fixups.append(mask);
+ Value* zext = m_proc.add<Value>(
+ BitAnd, m_value->origin(), match->child(0), mask);
+ fixups.append(zext);
+ return zext;
+ }
+ return nullptr;
+ });
+ break;
+ }
+
+ case Load8S: {
+ handleMemoryValue(
+ ptr, range,
+ [&] (MemoryValue* candidate) -> bool {
+ return candidate->offset() == offset
+ && (candidate->opcode() == Load8S || candidate->opcode() == Store8);
+ },
+ [&] (MemoryValue* match, Vector<Value*>& fixups) -> Value* {
+ if (match->opcode() == Store8) {
+ Value* sext = m_proc.add<Value>(
+ SExt8, m_value->origin(), match->child(0));
+ fixups.append(sext);
+ return sext;
+ }
+ return nullptr;
+ });
+ break;
+ }
+
+ case Load16Z: {
+ handleMemoryValue(
+ ptr, range,
+ [&] (MemoryValue* candidate) -> bool {
+ return candidate->offset() == offset
+ && (candidate->opcode() == Load16Z || candidate->opcode() == Store16);
+ },
+ [&] (MemoryValue* match, Vector<Value*>& fixups) -> Value* {
+ if (match->opcode() == Store16) {
+ Value* mask = m_proc.add<Const32Value>(m_value->origin(), 0xffff);
+ fixups.append(mask);
+ Value* zext = m_proc.add<Value>(
+ BitAnd, m_value->origin(), match->child(0), mask);
+ fixups.append(zext);
+ return zext;
+ }
+ return nullptr;
+ });
+ break;
+ }
+
+ case Load16S: {
+ handleMemoryValue(
+ ptr, range, [&] (MemoryValue* candidate) -> bool {
+ return candidate->offset() == offset
+ && (candidate->opcode() == Load16S || candidate->opcode() == Store16);
+ },
+ [&] (MemoryValue* match, Vector<Value*>& fixups) -> Value* {
+ if (match->opcode() == Store16) {
+ Value* sext = m_proc.add<Value>(
+ SExt16, m_value->origin(), match->child(0));
+ fixups.append(sext);
+ return sext;
+ }
+ return nullptr;
+ });
+ break;
+ }
+
+ case Load: {
+ handleMemoryValue(
+ ptr, range,
+ [&] (MemoryValue* candidate) -> bool {
+ if (candidate->offset() != offset)
+ return false;
+
+ if (candidate->opcode() == Load && candidate->type() == type)
+ return true;
+
+ if (candidate->opcode() == Store && candidate->child(0)->type() == type)
+ return true;
+
+ return false;
+ });
+ break;
+ }
+
+ case Store8: {
+ handleStoreAfterClobber(
+ ptr, range,
+ [&] (MemoryValue* candidate) -> bool {
+ return candidate->opcode() == Store8
+ && candidate->offset() == offset;
+ });
+ break;
+ }
+
+ case Store16: {
+ handleStoreAfterClobber(
+ ptr, range,
+ [&] (MemoryValue* candidate) -> bool {
+ return candidate->opcode() == Store16
+ && candidate->offset() == offset;
+ });
+ break;
+ }
+
+ case Store: {
+ handleStoreAfterClobber(
+ ptr, range,
+ [&] (MemoryValue* candidate) -> bool {
+ return candidate->opcode() == Store
+ && candidate->offset() == offset;
+ });
+ break;
+ }
+
+ default:
+ dataLog("Bad memory value: ", deepDump(m_proc, m_value), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ template<typename Filter>
+ bool handleStoreBeforeClobber(Value* ptr, HeapRange range, const Filter& filter)
+ {
+ MemoryMatches matches = findMemoryValue(ptr, range, filter);
+ if (matches.isEmpty())
+ return false;
+
+ m_value->replaceWithNop();
+ m_changed = true;
+ return true;
+ }
+
+ template<typename Filter>
+ void handleStoreAfterClobber(Value* ptr, HeapRange range, const Filter& filter)
+ {
+ if (!m_value->traps() && findStoreAfterClobber(ptr, range, filter)) {
+ m_value->replaceWithNop();
+ m_changed = true;
+ return;
+ }
+
+ m_data.memoryValuesAtTail.add(m_value->as<MemoryValue>());
+ }
+
+ template<typename Filter>
+ bool findStoreAfterClobber(Value* ptr, HeapRange range, const Filter& filter)
+ {
+ // We can eliminate a store if every forward path hits a store to the same location before
+ // hitting any operation that observes the store. This search seems like it should be
+ // expensive, but in the overwhelming majority of cases it will almost immediately hit an
+ // operation that interferes.
+
+ if (verbose)
+ dataLog(*m_value, ": looking forward for stores to ", *ptr, "...\n");
+
+ // First search forward in this basic block.
+ // FIXME: It would be cool to get rid of this linear search. It's not super critical since
+ // we will probably bail out very quickly, but it *is* annoying.
+ for (unsigned index = m_index + 1; index < m_block->size(); ++index) {
+ Value* value = m_block->at(index);
+
+ if (MemoryValue* memoryValue = value->as<MemoryValue>()) {
+ if (memoryValue->lastChild() == ptr && filter(memoryValue))
+ return true;
+ }
+
+ Effects effects = value->effects();
+ if (effects.reads.overlaps(range) || effects.writes.overlaps(range))
+ return false;
+ }
+
+ if (!m_block->numSuccessors())
+ return false;
+
+ BlockWorklist worklist;
+ worklist.pushAll(m_block->successorBlocks());
+
+ while (BasicBlock* block = worklist.pop()) {
+ ImpureBlockData& data = m_impureBlockData[block];
+
+ MemoryValue* match = data.storesAtHead.find(ptr, filter);
+ if (match && match != m_value)
+ continue;
+
+ if (data.writes.overlaps(range) || data.reads.overlaps(range))
+ return false;
+
+ if (!block->numSuccessors())
+ return false;
+
+ worklist.pushAll(block->successorBlocks());
+ }
+
+ return true;
+ }
+
+ template<typename Filter>
+ void handleMemoryValue(Value* ptr, HeapRange range, const Filter& filter)
+ {
+ handleMemoryValue(
+ ptr, range, filter,
+ [] (MemoryValue*, Vector<Value*>&) -> Value* {
+ return nullptr;
+ });
+ }
+
+ template<typename Filter, typename Replace>
+ void handleMemoryValue(
+ Value* ptr, HeapRange range, const Filter& filter, const Replace& replace)
+ {
+ MemoryMatches matches = findMemoryValue(ptr, range, filter);
+ if (replaceMemoryValue(matches, replace))
+ return;
+ m_data.memoryValuesAtTail.add(m_value->as<MemoryValue>());
+ }
+
+ template<typename Replace>
+ bool replaceMemoryValue(const MemoryMatches& matches, const Replace& replace)
+ {
+ if (matches.isEmpty())
+ return false;
+
+ if (verbose)
+ dataLog("Eliminating ", *m_value, " due to ", pointerListDump(matches), "\n");
+
+ m_changed = true;
+
+ if (matches.size() == 1) {
+ MemoryValue* dominatingMatch = matches[0];
+ RELEASE_ASSERT(m_dominators.dominates(dominatingMatch->owner, m_block));
+
+ if (verbose)
+ dataLog(" Eliminating using ", *dominatingMatch, "\n");
+ Vector<Value*> extraValues;
+ if (Value* value = replace(dominatingMatch, extraValues)) {
+ for (Value* extraValue : extraValues)
+ m_insertionSet.insertValue(m_index, extraValue);
+ m_value->replaceWithIdentity(value);
+ } else {
+ if (dominatingMatch->isStore())
+ m_value->replaceWithIdentity(dominatingMatch->child(0));
+ else
+ m_value->replaceWithIdentity(dominatingMatch);
+ }
+ return true;
+ }
+
+ // FIXME: It would be way better if this phase just did SSA calculation directly.
+ // Right now we're relying on the fact that CSE's position in the phase order is
+ // almost right before SSA fixup.
+
+ Variable* variable = m_proc.addVariable(m_value->type());
+
+ VariableValue* get = m_insertionSet.insert<VariableValue>(
+ m_index, Get, m_value->origin(), variable);
+ if (verbose)
+ dataLog(" Inserting get of value: ", *get, "\n");
+ m_value->replaceWithIdentity(get);
+
+ for (MemoryValue* match : matches) {
+ Vector<Value*>& sets = m_sets.add(match, Vector<Value*>()).iterator->value;
+
+ Value* value = replace(match, sets);
+ if (!value) {
+ if (match->isStore())
+ value = match->child(0);
+ else
+ value = match;
+ }
+
+ Value* set = m_proc.add<VariableValue>(Set, m_value->origin(), variable, value);
+ sets.append(set);
+ }
+
+ return true;
+ }
+
+ template<typename Filter>
+ MemoryMatches findMemoryValue(Value* ptr, HeapRange range, const Filter& filter)
+ {
+ if (verbose)
+ dataLog(*m_value, ": looking backward for ", *ptr, "...\n");
+
+ if (MemoryValue* match = m_data.memoryValuesAtTail.find(ptr, filter)) {
+ if (verbose)
+ dataLog(" Found ", *match, " locally.\n");
+ return { match };
+ }
+
+ if (m_data.writes.overlaps(range)) {
+ if (verbose)
+ dataLog(" Giving up because of writes.\n");
+ return { };
+ }
+
+ BlockWorklist worklist;
+ worklist.pushAll(m_block->predecessors());
+
+ MemoryMatches matches;
+
+ while (BasicBlock* block = worklist.pop()) {
+ if (verbose)
+ dataLog(" Looking at ", *block, "\n");
+
+ ImpureBlockData& data = m_impureBlockData[block];
+
+ MemoryValue* match = data.memoryValuesAtTail.find(ptr, filter);
+ if (match && match != m_value) {
+ if (verbose)
+ dataLog(" Found match: ", *match, "\n");
+ matches.append(match);
+ continue;
+ }
+
+ if (data.writes.overlaps(range)) {
+ if (verbose)
+ dataLog(" Giving up because of writes.\n");
+ return { };
+ }
+
+ if (!block->numPredecessors()) {
+ if (verbose)
+ dataLog(" Giving up because it's live at root.\n");
+ // This essentially proves that this is live at the prologue. That means that we
+ // cannot reliably optimize this case.
+ return { };
+ }
+
+ worklist.pushAll(block->predecessors());
+ }
+
+ if (verbose)
+ dataLog(" Got matches: ", pointerListDump(matches), "\n");
+ return matches;
+ }
+
+ Procedure& m_proc;
+
+ Dominators& m_dominators;
+ PureCSE m_pureCSE;
+
+ IndexMap<BasicBlock, ImpureBlockData> m_impureBlockData;
+
+ ImpureBlockData m_data;
+
+ BasicBlock* m_block;
+ unsigned m_index;
+ Value* m_value;
+
+ HashMap<Value*, Vector<Value*>> m_sets;
+
+ InsertionSet m_insertionSet;
+
+ bool m_changed { false };
+};
+
+} // anonymous namespace
+
+bool eliminateCommonSubexpressions(Procedure& proc)
+{
+ PhaseScope phaseScope(proc, "eliminateCommonSubexpressions");
+
+ CSE cse(proc);
+ return cse.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.h b/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.h
new file mode 100644
index 000000000..ce994beb9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3EliminateCommonSubexpressions.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// This does global common subexpression elimination (CSE) over both pure values and memory accesses.
+
+bool eliminateCommonSubexpressions(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3FenceValue.cpp b/Source/JavaScriptCore/b3/B3FenceValue.cpp
new file mode 100644
index 000000000..80e27928c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FenceValue.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3FenceValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+FenceValue::~FenceValue()
+{
+}
+
+Value* FenceValue::cloneImpl() const
+{
+ return new FenceValue(*this);
+}
+
+FenceValue::FenceValue(Origin origin, HeapRange read, HeapRange write)
+ : Value(CheckedOpcode, Fence, Void, origin)
+ , read(read)
+ , write(write)
+{
+}
+
+FenceValue::FenceValue(Origin origin)
+ : FenceValue(origin, HeapRange::top(), HeapRange::top())
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3FenceValue.h b/Source/JavaScriptCore/b3/B3FenceValue.h
new file mode 100644
index 000000000..d147052d1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FenceValue.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE FenceValue : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == Fence; }
+
+ ~FenceValue();
+
+ // The read/write heaps are reflected in the effects() of this value. The compiler may change
+ // the lowering of a Fence based on the heaps. For example, if a fence does not write anything
+ // then it is understood to be a store-store fence. On x86, this may lead us to not emit any
+ // code, while on ARM we may emit a cheaper fence (dmb ishst instead of dmb ish). We will do
+ // the same optimization for load-load fences, which are expressed as a Fence that writes but
+ // does not read.
+ //
+ // This abstraction allows us to cover all of the fences on x86 and all of the standalone fences
+ // on ARM. X86 really just has one fence: mfence. This fence should be used to protect stores
+ // from being sunk below loads. WTF calls it the storeLoadFence. A classic example is the Steele
+ // barrier:
+ //
+ // o.f = v => o.f = v
+ // if (color(o) == black)
+ // log(o)
+ //
+ // We are trying to ensure that if the store to o.f occurs after the collector has started
+ // visiting o, then we will log o. Under sequential consistency, this would work. The collector
+ // would set color(o) to black just before it started visiting. But x86's illusion of sequential
+ // consistency is broken in exactly just this store->load ordering case. The store to o.f may
+ // get buffered, and it may occur some time after we have loaded and checked color(o). As well,
+ // the collector's store to set color(o) to black may get buffered and it may occur some time
+ // after the collector has finished visiting o. Therefore, we need mfences. In B3 we model this
+ // as a Fence that reads and writes some heaps. Setting writes to the empty set will cause B3 to
+ // not emit any barrier on x86.
+ //
+ // On ARM there are many more fences. The Fence instruction is meant to model just two of them:
+ // dmb ish and dmb ishst. You can emit a dmb ishst by using a Fence with an empty write heap.
+ // Otherwise, you will get a dmb ish.
+ // FIXME: Add fenced memory accesses. https://bugs.webkit.org/show_bug.cgi?id=162349
+ // FIXME: Add a Depend operation. https://bugs.webkit.org/show_bug.cgi?id=162350
+ HeapRange read { HeapRange::top() };
+ HeapRange write { HeapRange::top() };
+
+protected:
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ JS_EXPORT_PRIVATE FenceValue(Origin origin, HeapRange read, HeapRange write);
+
+ JS_EXPORT_PRIVATE FenceValue(Origin origin);
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3FixSSA.cpp b/Source/JavaScriptCore/b3/B3FixSSA.cpp
new file mode 100644
index 000000000..730c2c876
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FixSSA.cpp
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3FixSSA.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BreakCriticalEdges.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SSACalculator.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/IndexSet.h>
+
+namespace JSC { namespace B3 {
+
+namespace {
+const bool verbose = false;
+} // anonymous namespace
+
+void demoteValues(Procedure& proc, const IndexSet<Value>& values)
+{
+ HashMap<Value*, Variable*> map;
+ HashMap<Value*, Variable*> phiMap;
+
+ // Create stack slots.
+ for (Value* value : values.values(proc.values())) {
+ map.add(value, proc.addVariable(value->type()));
+
+ if (value->opcode() == Phi)
+ phiMap.add(value, proc.addVariable(value->type()));
+ }
+
+ if (verbose) {
+ dataLog("Demoting values as follows:\n");
+ dataLog(" map = ");
+ CommaPrinter comma;
+ for (auto& entry : map)
+ dataLog(comma, *entry.key, "=>", *entry.value);
+ dataLog("\n");
+ dataLog(" phiMap = ");
+ comma = CommaPrinter();
+ for (auto& entry : phiMap)
+ dataLog(comma, *entry.key, "=>", *entry.value);
+ dataLog("\n");
+ }
+
+ // Change accesses to the values to accesses to the stack slots.
+ InsertionSet insertionSet(proc);
+ for (BasicBlock* block : proc) {
+ for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+ Value* value = block->at(valueIndex);
+
+ if (value->opcode() == Phi) {
+ if (Variable* variable = phiMap.get(value)) {
+ value->replaceWithIdentity(
+ insertionSet.insert<VariableValue>(
+ valueIndex, Get, value->origin(), variable));
+ }
+ } else {
+ for (Value*& child : value->children()) {
+ if (Variable* variable = map.get(child)) {
+ child = insertionSet.insert<VariableValue>(
+ valueIndex, Get, value->origin(), variable);
+ }
+ }
+
+ if (UpsilonValue* upsilon = value->as<UpsilonValue>()) {
+ if (Variable* variable = phiMap.get(upsilon->phi())) {
+ insertionSet.insert<VariableValue>(
+ valueIndex, Set, upsilon->origin(), variable, upsilon->child(0));
+ value->replaceWithNop();
+ }
+ }
+ }
+
+ if (Variable* variable = map.get(value)) {
+ insertionSet.insert<VariableValue>(
+ valueIndex + 1, Set, value->origin(), variable, value);
+ }
+ }
+ insertionSet.execute(block);
+ }
+}
+
+bool fixSSA(Procedure& proc)
+{
+ PhaseScope phaseScope(proc, "fixSSA");
+
+ // Just for sanity, remove any unused variables first. It's unlikely that this code has any
+ // bugs having to do with dead variables, but it would be silly to have to fix such a bug if
+ // it did arise.
+ IndexSet<Variable> liveVariables;
+ for (Value* value : proc.values()) {
+ if (VariableValue* variableValue = value->as<VariableValue>())
+ liveVariables.add(variableValue->variable());
+ }
+
+ for (Variable* variable : proc.variables()) {
+ if (!liveVariables.contains(variable))
+ proc.deleteVariable(variable);
+ }
+
+ if (proc.variables().isEmpty())
+ return false;
+
+ // We know that we have variables to optimize, so do that now.
+ breakCriticalEdges(proc);
+
+ SSACalculator ssa(proc);
+
+ // Create a SSACalculator::Variable ("calcVar") for every variable.
+ Vector<Variable*> calcVarToVariable;
+ IndexMap<Variable, SSACalculator::Variable*> variableToCalcVar(proc.variables().size());
+
+ for (Variable* variable : proc.variables()) {
+ SSACalculator::Variable* calcVar = ssa.newVariable();
+ RELEASE_ASSERT(calcVar->index() == calcVarToVariable.size());
+ calcVarToVariable.append(variable);
+ variableToCalcVar[variable] = calcVar;
+ }
+
+ // Create Defs for all of the stores to the stack variable.
+ for (BasicBlock* block : proc) {
+ for (Value* value : *block) {
+ if (value->opcode() != Set)
+ continue;
+
+ Variable* variable = value->as<VariableValue>()->variable();
+
+ if (SSACalculator::Variable* calcVar = variableToCalcVar[variable])
+ ssa.newDef(calcVar, block, value->child(0));
+ }
+ }
+
+ // Decide where Phis are to be inserted. This creates them but does not insert them.
+ ssa.computePhis(
+ [&] (SSACalculator::Variable* calcVar, BasicBlock* block) -> Value* {
+ Variable* variable = calcVarToVariable[calcVar->index()];
+ Value* phi = proc.add<Value>(Phi, variable->type(), block->at(0)->origin());
+ if (verbose) {
+ dataLog(
+ "Adding Phi for ", pointerDump(variable), " at ", *block, ": ",
+ deepDump(proc, phi), "\n");
+ }
+ return phi;
+ });
+
+ // Now perform the conversion.
+ InsertionSet insertionSet(proc);
+ IndexMap<Variable, Value*> mapping(proc.variables().size());
+ for (BasicBlock* block : proc.blocksInPreOrder()) {
+ mapping.clear();
+
+ for (unsigned index = calcVarToVariable.size(); index--;) {
+ Variable* variable = calcVarToVariable[index];
+ SSACalculator::Variable* calcVar = ssa.variable(index);
+
+ SSACalculator::Def* def = ssa.reachingDefAtHead(block, calcVar);
+ if (def)
+ mapping[variable] = def->value();
+ }
+
+ for (SSACalculator::Def* phiDef : ssa.phisForBlock(block)) {
+ Variable* variable = calcVarToVariable[phiDef->variable()->index()];
+
+ insertionSet.insertValue(0, phiDef->value());
+ mapping[variable] = phiDef->value();
+ }
+
+ for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+ Value* value = block->at(valueIndex);
+ value->performSubstitution();
+
+ switch (value->opcode()) {
+ case Get: {
+ VariableValue* variableValue = value->as<VariableValue>();
+ Variable* variable = variableValue->variable();
+
+ if (Value* replacement = mapping[variable])
+ value->replaceWithIdentity(replacement);
+ else {
+ value->replaceWithIdentity(
+ insertionSet.insertBottom(valueIndex, value));
+ }
+ break;
+ }
+
+ case Set: {
+ VariableValue* variableValue = value->as<VariableValue>();
+ Variable* variable = variableValue->variable();
+
+ mapping[variable] = value->child(0);
+ value->replaceWithNop();
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ unsigned upsilonInsertionPoint = block->size() - 1;
+ Origin upsilonOrigin = block->last()->origin();
+ for (BasicBlock* successorBlock : block->successorBlocks()) {
+ for (SSACalculator::Def* phiDef : ssa.phisForBlock(successorBlock)) {
+ Value* phi = phiDef->value();
+ SSACalculator::Variable* calcVar = phiDef->variable();
+ Variable* variable = calcVarToVariable[calcVar->index()];
+
+ Value* mappedValue = mapping[variable];
+ if (verbose) {
+ dataLog(
+ "Mapped value for ", *variable, " with successor Phi ", *phi,
+ " at end of ", *block, ": ", pointerDump(mappedValue), "\n");
+ }
+
+ if (!mappedValue)
+ mappedValue = insertionSet.insertBottom(upsilonInsertionPoint, phi);
+
+ insertionSet.insert<UpsilonValue>(
+ upsilonInsertionPoint, upsilonOrigin, mappedValue, phi);
+ }
+ }
+
+ insertionSet.execute(block);
+ }
+
+ if (verbose) {
+ dataLog("B3 after SSA conversion:\n");
+ dataLog(proc);
+ }
+
+ return true;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3FixSSA.h b/Source/JavaScriptCore/b3/B3FixSSA.h
new file mode 100644
index 000000000..775c32237
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FixSSA.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include <wtf/IndexSet.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Turns all mentions of the given values into accesses to variables. This is meant to be used
+// from phases that don't like SSA for whatever reason.
+void demoteValues(Procedure&, const IndexSet<Value>&);
+
+// This fixes SSA for you. Use this after you have done demoteValues() and you have performed
+// whatever evil transformation you needed.
+bool fixSSA(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3FoldPathConstants.cpp b/Source/JavaScriptCore/b3/B3FoldPathConstants.cpp
new file mode 100644
index 000000000..24a01340b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FoldPathConstants.cpp
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3FoldPathConstants.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CaseCollectionInlines.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SwitchValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+class FoldPathConstants {
+public:
+ FoldPathConstants(Procedure& proc)
+ : m_proc(proc)
+ , m_insertionSet(proc)
+ {
+ }
+
+ void run()
+ {
+ bool changed = false;
+
+ if (verbose)
+ dataLog("B3 before folding path constants: \n", m_proc, "\n");
+
+ // Find all of the values that are the subject of a branch or switch. For any successor
+ // that we dominate, install a value override at that block.
+
+ HashMap<Value*, Vector<Override>> overrides;
+
+ Dominators& dominators = m_proc.dominators();
+
+ auto addOverride = [&] (
+ BasicBlock* from, Value* value, const Override& override) {
+
+ if (override.block->numPredecessors() != 1)
+ return;
+ ASSERT(override.block->predecessor(0) == from);
+
+ Vector<Override>& forValue =
+ overrides.add(value, Vector<Override>()).iterator->value;
+
+ if (!ASSERT_DISABLED) {
+ for (const Override& otherOverride : forValue)
+ ASSERT_UNUSED(otherOverride, otherOverride.block != override.block);
+ }
+
+ if (verbose)
+ dataLog("Overriding ", *value, " from ", *from, ": ", override, "\n");
+
+ forValue.append(override);
+ };
+
+ for (BasicBlock* block : m_proc) {
+ Value* branch = block->last();
+ switch (branch->opcode()) {
+ case Branch:
+ if (block->successorBlock(0) == block->successorBlock(1))
+ continue;
+ addOverride(
+ block, branch->child(0),
+ Override::nonZero(block->successorBlock(0)));
+ addOverride(
+ block, branch->child(0),
+ Override::constant(block->successorBlock(1), 0));
+ break;
+ case Switch: {
+ HashMap<BasicBlock*, unsigned> targetUses;
+ for (const SwitchCase& switchCase : branch->as<SwitchValue>()->cases(block))
+ targetUses.add(switchCase.targetBlock(), 0).iterator->value++;
+
+ for (const SwitchCase& switchCase : branch->as<SwitchValue>()->cases(block)) {
+ if (targetUses.find(switchCase.targetBlock())->value != 1)
+ continue;
+
+ addOverride(
+ block, branch->child(0),
+ Override::constant(switchCase.targetBlock(), switchCase.caseValue()));
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ // Install the constants in the override blocks. We use one-shot insertion sets because
+ // each block will get at most one thing inserted into it anyway.
+ for (auto& entry : overrides) {
+ for (Override& override : entry.value) {
+ if (!override.hasValue)
+ continue;
+ override.valueNode =
+ m_insertionSet.insertIntConstant(0, entry.key, override.value);
+ m_insertionSet.execute(override.block);
+ }
+ }
+
+ // Replace all uses of a value that has an override with that override, if appropriate.
+ // Certain instructions get special treatment.
+ auto getOverride = [&] (BasicBlock* block, Value* value) -> Override {
+ auto iter = overrides.find(value);
+ if (iter == overrides.end())
+ return Override();
+
+ Vector<Override>& forValue = iter->value;
+ Override result;
+ for (Override& override : forValue) {
+ if (dominators.dominates(override.block, block)
+ && override.isBetterThan(result))
+ result = override;
+ }
+
+ if (verbose)
+ dataLog("In block ", *block, " getting override for ", *value, ": ", result, "\n");
+
+ return result;
+ };
+
+ for (BasicBlock* block : m_proc) {
+ for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+ Value* value = block->at(valueIndex);
+
+ switch (value->opcode()) {
+ case Branch: {
+ if (getOverride(block, value->child(0)).isNonZero) {
+ value->replaceWithJump(block, block->taken());
+ changed = true;
+ }
+ break;
+ }
+
+ case Equal: {
+ if (value->child(1)->isInt(0)
+ && getOverride(block, value->child(0)).isNonZero) {
+ value->replaceWithIdentity(
+ m_insertionSet.insertIntConstant(valueIndex, value, 0));
+ }
+ break;
+ }
+
+ case NotEqual: {
+ if (value->child(1)->isInt(0)
+ && getOverride(block, value->child(0)).isNonZero) {
+ value->replaceWithIdentity(
+ m_insertionSet.insertIntConstant(valueIndex, value, 1));
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ for (Value*& child : value->children()) {
+ Override override = getOverride(block, child);
+ if (override.valueNode)
+ child = override.valueNode;
+ }
+ }
+ m_insertionSet.execute(block);
+ }
+
+ if (changed) {
+ m_proc.resetReachability();
+ m_proc.invalidateCFG();
+ }
+ }
+
+private:
+ struct Override {
+ Override()
+ {
+ }
+
+ static Override constant(BasicBlock* block, int64_t value)
+ {
+ Override result;
+ result.block = block;
+ result.hasValue = true;
+ result.value = value;
+ if (value)
+ result.isNonZero = true;
+ return result;
+ }
+
+ static Override nonZero(BasicBlock* block)
+ {
+ Override result;
+ result.block = block;
+ result.isNonZero = true;
+ return result;
+ }
+
+ bool isBetterThan(const Override& override)
+ {
+ if (hasValue && !override.hasValue)
+ return true;
+ if (isNonZero && !override.isNonZero)
+ return true;
+ return false;
+ }
+
+ void dump(PrintStream& out) const
+ {
+ out.print("{block = ", pointerDump(block), ", value = ");
+ if (hasValue)
+ out.print(value);
+ else
+ out.print("<none>");
+ out.print(", isNonZero = ", isNonZero);
+ if (valueNode)
+ out.print(", valueNode = ", *valueNode);
+ out.print("}");
+ }
+
+ BasicBlock* block { nullptr };
+ bool hasValue { false };
+ bool isNonZero { false };
+ int64_t value { 0 };
+ Value* valueNode { nullptr };
+ };
+
+ Procedure& m_proc;
+ InsertionSet m_insertionSet;
+};
+
+} // anonymous namespace
+
+void foldPathConstants(Procedure& proc)
+{
+ PhaseScope phaseScope(proc, "foldPathConstants");
+ FoldPathConstants foldPathConstants(proc);
+ foldPathConstants.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3FoldPathConstants.h b/Source/JavaScriptCore/b3/B3FoldPathConstants.h
new file mode 100644
index 000000000..a55c770b9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FoldPathConstants.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Does very basic simplification of uses of values that were branched on by a dominating branch.
+
+void foldPathConstants(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3FrequencyClass.cpp b/Source/JavaScriptCore/b3/B3FrequencyClass.cpp
new file mode 100644
index 000000000..816850c59
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FrequencyClass.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3FrequencyClass.h"
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, FrequencyClass frequency)
+{
+ switch (frequency) {
+ case FrequencyClass::Normal:
+ out.print("Normal");
+ return;
+ case FrequencyClass::Rare:
+ out.print("Rare");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3FrequencyClass.h b/Source/JavaScriptCore/b3/B3FrequencyClass.h
new file mode 100644
index 000000000..607c439ac
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FrequencyClass.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+enum class FrequencyClass : uint8_t {
+ // We don't have any hypothesis about the frequency of this control flow construct. This is
+ // the common case. We can still use basic block frequency in this case.
+ Normal,
+
+ // We expect that this control flow construct will be reached super rarely. It's valid to
+ // perform optimizations that punish Rare code. Note that there will be situations where you
+ // have to somehow construct a new frequency class from a merging of multiple classes. When
+ // this happens, never choose Rare; always go with Normal. This is necessary because we
+ // really do punish Rare code very badly.
+ Rare
+};
+
+inline FrequencyClass maxFrequency(FrequencyClass a, FrequencyClass b)
+{
+ if (a == FrequencyClass::Normal)
+ return FrequencyClass::Normal;
+ return b;
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::B3::FrequencyClass);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3FrequentedBlock.h b/Source/JavaScriptCore/b3/B3FrequentedBlock.h
new file mode 100644
index 000000000..9b63ff4fd
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3FrequentedBlock.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3GenericFrequentedBlock.h"
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+
+typedef GenericFrequentedBlock<BasicBlock> FrequentedBlock;
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Generate.cpp b/Source/JavaScriptCore/b3/B3Generate.cpp
new file mode 100644
index 000000000..e328c6a9e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Generate.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Generate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerate.h"
+#include "AirInstInlines.h"
+#include "B3Common.h"
+#include "B3DuplicateTails.h"
+#include "B3EliminateCommonSubexpressions.h"
+#include "B3FixSSA.h"
+#include "B3FoldPathConstants.h"
+#include "B3InferSwitches.h"
+#include "B3LegalizeMemoryOffsets.h"
+#include "B3LowerMacros.h"
+#include "B3LowerMacrosAfterOptimizations.h"
+#include "B3LowerToAir.h"
+#include "B3MoveConstants.h"
+#include "B3Procedure.h"
+#include "B3ReduceDoubleToFloat.h"
+#include "B3ReduceStrength.h"
+#include "B3TimingScope.h"
+#include "B3Validate.h"
+#include "PCToCodeOriginMap.h"
+
+namespace JSC { namespace B3 {
+
+void prepareForGeneration(Procedure& procedure, unsigned optLevel)
+{
+ TimingScope timingScope("prepareForGeneration");
+
+ generateToAir(procedure, optLevel);
+ Air::prepareForGeneration(procedure.code());
+}
+
+void generate(Procedure& procedure, CCallHelpers& jit)
+{
+ Air::generate(procedure.code(), jit);
+}
+
+void generateToAir(Procedure& procedure, unsigned optLevel)
+{
+ TimingScope timingScope("generateToAir");
+
+ if (shouldDumpIR(B3Mode) && !shouldDumpIRAtEachPhase(B3Mode)) {
+ dataLog("Initial B3:\n");
+ dataLog(procedure);
+ }
+
+ // We don't require the incoming IR to have predecessors computed.
+ procedure.resetReachability();
+
+ if (shouldValidateIR())
+ validate(procedure);
+
+ if (optLevel >= 1) {
+ reduceDoubleToFloat(procedure);
+ reduceStrength(procedure);
+ eliminateCommonSubexpressions(procedure);
+ inferSwitches(procedure);
+ duplicateTails(procedure);
+ fixSSA(procedure);
+ foldPathConstants(procedure);
+
+ // FIXME: Add more optimizations here.
+ // https://bugs.webkit.org/show_bug.cgi?id=150507
+ }
+
+ lowerMacros(procedure);
+
+ if (optLevel >= 1) {
+ reduceStrength(procedure);
+
+ // FIXME: Add more optimizations here.
+ // https://bugs.webkit.org/show_bug.cgi?id=150507
+ }
+
+ lowerMacrosAfterOptimizations(procedure);
+ legalizeMemoryOffsets(procedure);
+ moveConstants(procedure);
+
+ // FIXME: We should run pureCSE here to clean up some platform specific changes from the previous phases.
+ // https://bugs.webkit.org/show_bug.cgi?id=164873
+
+ if (shouldValidateIR())
+ validate(procedure);
+
+ // If we're doing super verbose dumping, the phase scope of any phase will already do a dump.
+ // Note that lowerToAir() acts like a phase in this regard.
+ if (shouldDumpIR(B3Mode) && !shouldDumpIRAtEachPhase(B3Mode)) {
+ dataLog("B3 after ", procedure.lastPhaseName(), ", before generation:\n");
+ dataLog(procedure);
+ }
+
+ lowerToAir(procedure);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Generate.h b/Source/JavaScriptCore/b3/B3Generate.h
new file mode 100644
index 000000000..2ffcd0ea8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Generate.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC {
+
+class CCallHelpers;
+
+namespace B3 {
+
+class Procedure;
+namespace Air { class Code; }
+
+// This takes a B3::Procedure, optimizes it in-place, lowers it to Air, and prepares the Air for
+// generation.
+JS_EXPORT_PRIVATE void prepareForGeneration(Procedure&, unsigned optLevel = 1);
+
+// This takes a B3::Procedure that has been prepared for generation (i.e. it has been lowered to Air and
+// the Air has been prepared for generation) and generates it. This is the equivalent of calling
+// Air::generate() on the Procedure::code().
+JS_EXPORT_PRIVATE void generate(Procedure&, CCallHelpers&);
+
+// This takes a B3::Procedure, optimizes it in-place, and lowers it to Air. You can then generate
+// the Air to machine code using Air::prepareForGeneration() and Air::generate() on the Procedure's
+// code().
+void generateToAir(Procedure&, unsigned optLevel = 1);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3GenericFrequentedBlock.h b/Source/JavaScriptCore/b3/B3GenericFrequentedBlock.h
new file mode 100644
index 000000000..1c5e75cfe
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3GenericFrequentedBlock.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3FrequencyClass.h"
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+// A frequented block is a tuple of BasicBlock* and FrequencyClass. It's usually used as a
+// successor edge.
+
+template<typename BasicBlock>
+class GenericFrequentedBlock {
+public:
+ GenericFrequentedBlock(
+ BasicBlock* block = nullptr, FrequencyClass frequency = FrequencyClass::Normal)
+ : m_block(block)
+ , m_frequency(frequency)
+ {
+ }
+
+ bool operator==(const GenericFrequentedBlock& other) const
+ {
+ return m_block == other.m_block
+ && m_frequency == other.m_frequency;
+ }
+
+ bool operator!=(const GenericFrequentedBlock& other) const
+ {
+ return !(*this == other);
+ }
+
+ explicit operator bool() const
+ {
+ return *this != GenericFrequentedBlock();
+ }
+
+ BasicBlock* block() const { return m_block; }
+ BasicBlock*& block() { return m_block; }
+ FrequencyClass frequency() const { return m_frequency; }
+ FrequencyClass& frequency() { return m_frequency; }
+
+ bool isRare() const { return frequency() == FrequencyClass::Rare; }
+
+ void dump(PrintStream& out) const
+ {
+ if (frequency() != FrequencyClass::Normal)
+ out.print(frequency(), ":");
+ out.print(pointerDump(m_block));
+ }
+
+private:
+ BasicBlock* m_block;
+ FrequencyClass m_frequency;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3HeapRange.cpp b/Source/JavaScriptCore/b3/B3HeapRange.cpp
new file mode 100644
index 000000000..a5768f9f6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3HeapRange.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3HeapRange.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+void HeapRange::dump(PrintStream& out) const
+{
+ if (*this == HeapRange()) {
+ out.print("Bottom");
+ return;
+ }
+ if (*this == top()) {
+ out.print("Top");
+ return;
+ }
+ out.print(m_begin, "...", m_end);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3HeapRange.h b/Source/JavaScriptCore/b3/B3HeapRange.h
new file mode 100644
index 000000000..03866bdab
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3HeapRange.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include <limits.h>
+#include <wtf/MathExtras.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+// Alias analysis in B3 is done by checking if two integer ranges overlap. This is powerful enough
+// to be used for TBAA-style alias analysis used by the DFG, FTL, and LLVM: you just turn each node
+// in the tree of abstract heaps into a pre/post range.
+//
+// Note that the 'begin' is inclusive, while the 'end' is exclusive. These two ranges are non-
+// overlapping:
+//
+// rangeA = 0...8
+// rangeB = 8...16
+
+class HeapRange {
+public:
+ typedef unsigned Type;
+
+ HeapRange()
+ : m_begin(0)
+ , m_end(0)
+ {
+ }
+
+ explicit HeapRange(unsigned value)
+ : m_begin(value)
+ , m_end(value + 1)
+ {
+ ASSERT(m_end >= m_begin);
+ }
+
+ HeapRange(unsigned begin, unsigned end)
+ : m_begin(begin)
+ , m_end(end)
+ {
+ ASSERT(m_end >= m_begin);
+ if (m_begin == m_end) {
+ // Canonicalize empty ranges.
+ m_begin = 0;
+ m_end = 0;
+ }
+ }
+
+ static HeapRange top()
+ {
+ return HeapRange(0, UINT_MAX);
+ }
+
+ bool operator==(const HeapRange& other) const
+ {
+ return m_begin == other.m_begin
+ && m_end == other.m_end;
+ }
+
+ bool operator!=(const HeapRange& other) const
+ {
+ return !(*this == other);
+ }
+
+ explicit operator bool() const { return m_begin != m_end; }
+
+ unsigned begin() const { return m_begin; }
+ unsigned end() const { return m_end; }
+
+ bool overlaps(const HeapRange& other) const
+ {
+ return WTF::rangesOverlap(m_begin, m_end, other.m_begin, other.m_end);
+ }
+
+ JS_EXPORT_PRIVATE void dump(PrintStream& out) const;
+
+private:
+ unsigned m_begin;
+ unsigned m_end;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3InferSwitches.cpp b/Source/JavaScriptCore/b3/B3InferSwitches.cpp
new file mode 100644
index 000000000..2f1781241
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3InferSwitches.cpp
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3InferSwitches.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CaseCollectionInlines.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SwitchValue.h"
+#include "B3UseCounts.h"
+#include "B3ValueInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+class InferSwitches {
+public:
+ InferSwitches(Procedure& proc)
+ : m_proc(proc)
+ , m_insertionSet(proc)
+ , m_useCounts(proc)
+ {
+ }
+
+ bool run()
+ {
+ if (verbose)
+ dataLog("B3 before inferSwitches:\n", m_proc);
+
+ bool changed = true;
+ bool everChanged = false;
+ while (changed) {
+ changed = false;
+
+ if (verbose)
+ dataLog("Performing fixpoint iteration:\n");
+
+ for (BasicBlock* block : m_proc)
+ changed |= attemptToMergeWithPredecessor(block);
+
+ everChanged |= changed;
+ }
+
+ if (everChanged) {
+ m_proc.resetReachability();
+ m_proc.invalidateCFG();
+
+ m_proc.deleteOrphans();
+
+ if (verbose)
+ dataLog("B3 after inferSwitches:\n", m_proc);
+ return true;
+ }
+
+ return false;
+ }
+
+private:
+ bool attemptToMergeWithPredecessor(BasicBlock* block)
+ {
+ // No point in considering the root block. We also don't consider blocks with multiple
+ // predecessors, but we could handle this if we made this code a bit more general and we were
+ // not afraid of code bloat.
+ if (block->numPredecessors() != 1)
+ return false;
+
+ SwitchDescription description = describe(block);
+ if (verbose)
+ dataLog("Description of primary block ", *block, ": ", description, "\n");
+ if (!description) {
+ if (verbose)
+ dataLog(" Bailing because not switch-like.\n");
+ return false;
+ }
+
+ // We know that this block behaves like a switch. But we need to verify that it doesn't also
+ // perform any effects or do expensive things. We don't want to create a switch if that will
+ // make expensive things execute unconditionally. We're very conservative about how we define
+ // "expensive".
+ for (Value* value : *block) {
+ if (value->isFree())
+ continue;
+ if (value == description.extra)
+ continue;
+ if (value == description.branch)
+ continue;
+ if (verbose)
+ dataLog(" Bailing because of ", deepDump(m_proc, value), "\n");
+ return false;
+ }
+
+ BasicBlock* predecessor = block->predecessor(0);
+ SwitchDescription predecessorDescription = describe(predecessor);
+ if (verbose)
+ dataLog(" Description of predecessor block ", *predecessor, ": ", predecessorDescription, "\n");
+ if (!predecessorDescription) {
+ if (verbose)
+ dataLog(" Bailing because not switch-like.\n");
+ return false;
+ }
+
+ // Both us and the predecessor are switch-like, but that doesn't mean that we're compatible.
+ // We may be switching on different values!
+ if (description.source != predecessorDescription.source) {
+ if (verbose)
+ dataLog(" Bailing because sources don't match.\n");
+ return false;
+ }
+
+ // We expect that we are the fall-through destination of the predecessor. This is a bit of a
+ // goofy condition. If we were not the fall-through destination then our switch is probably
+ // just totally redundant and we should be getting rid of it. But we don't handle that here,
+ // yet.
+ if (predecessorDescription.fallThrough.block() != block) {
+ if (verbose)
+ dataLog(" Bailing because fall-through of predecessor is not the primary block.\n");
+ return false;
+ }
+
+ // Make sure that there ain't no loops.
+ if (description.fallThrough.block() == block
+ || description.fallThrough.block() == predecessor) {
+ if (verbose)
+ dataLog(" Bailing because of fall-through loop.\n");
+ return false;
+ }
+ for (SwitchCase switchCase : description.cases) {
+ if (switchCase.targetBlock() == block
+ || switchCase.targetBlock() == predecessor) {
+ if (verbose)
+ dataLog(" Bailing because of loop in primary cases.\n");
+ return false;
+ }
+ }
+ for (SwitchCase switchCase : predecessorDescription.cases) {
+ if (switchCase.targetBlock() == block
+ || switchCase.targetBlock() == predecessor) {
+ if (verbose)
+ dataLog(" Bailing because of loop in predecessor cases.\n");
+ return false;
+ }
+ }
+
+ if (verbose)
+ dataLog(" Doing it!\n");
+ // We're committed to doing the thing.
+
+ // Delete the extra value from the predecessor, since that would break downstream inference
+ // on the next fixpoint iteration. We would think that this block is too expensive to merge
+ // because of the Equal or NotEqual value even though that value is dead! We know it's dead
+ // so we kill it ourselves.
+ for (Value* value : *predecessor) {
+ if (value == predecessorDescription.extra)
+ value->replaceWithNopIgnoringType();
+ }
+
+ // Insert all non-terminal values from our block into our predecessor. We definitely need to
+ // do this for constants. We must not do it for the extra value, since that would break
+ // downstream inference on the next fixpoint iteration. As a bonus, we don't do it for nops,
+ // so that we limit how big blocks get in this phase.
+ for (unsigned i = 0; i < block->size() - 1; ++i) {
+ Value* value = block->at(i);
+ if (value != description.extra && value->opcode() != Nop)
+ m_insertionSet.insertValue(predecessor->size() - 1, value);
+ }
+ m_insertionSet.execute(predecessor);
+ block->values().resize(0);
+ block->appendNew<Value>(m_proc, Oops, description.branch->origin());
+ block->removePredecessor(predecessor);
+
+ for (BasicBlock* successorBlock : description.block->successorBlocks())
+ successorBlock->replacePredecessor(block, predecessor);
+
+ block->clearSuccessors();
+
+ SwitchValue* switchValue = predecessor->replaceLastWithNew<SwitchValue>(
+ m_proc, predecessor->last()->origin(), description.source);
+ predecessor->clearSuccessors();
+ switchValue->setFallThrough(description.fallThrough);
+
+ Vector<int64_t> predecessorCases;
+ for (SwitchCase switchCase : predecessorDescription.cases) {
+ switchValue->appendCase(switchCase);
+ predecessorCases.append(switchCase.caseValue());
+ }
+ std::sort(predecessorCases.begin(), predecessorCases.end());
+ auto isPredecessorCase = [&] (int64_t value) -> bool {
+ return !!tryBinarySearch<int64_t>(
+ predecessorCases, predecessorCases.size(), value,
+ [] (int64_t* element) -> int64_t { return *element; });
+ };
+
+ for (SwitchCase switchCase : description.cases) {
+ if (!isPredecessorCase(switchCase.caseValue()))
+ switchValue->appendCase(switchCase);
+ }
+ return true;
+ }
+
+ struct SwitchDescription {
+ SwitchDescription()
+ {
+ }
+
+ explicit operator bool() { return !!block; }
+
+ void dump(PrintStream& out) const
+ {
+ out.print(
+ "{block = ", pointerDump(block),
+ ", branch = ", pointerDump(branch),
+ ", extra = ", pointerDump(extra),
+ ", source = ", pointerDump(source),
+ ", cases = ", listDump(cases),
+ ", fallThrough = ", fallThrough, "}");
+ }
+
+ BasicBlock* block { nullptr };
+ Value* branch { nullptr };
+ Value* extra { nullptr }; // This is the Equal or NotEqual value, if applicable.
+ Value* source { nullptr };
+ Vector<SwitchCase, 1> cases;
+ FrequentedBlock fallThrough;
+ };
+
+ SwitchDescription describe(BasicBlock* block)
+ {
+ SwitchDescription result;
+ result.block = block;
+ result.branch = block->last();
+
+ switch (result.branch->opcode()) {
+ case Branch: {
+ Value* predicate = result.branch->child(0);
+ FrequentedBlock taken = result.block->taken();
+ FrequentedBlock notTaken = result.block->notTaken();
+ bool handled = false;
+ // NOTE: This uses UseCounts that we computed before any transformation. This is fine
+ // because although we may have mutated the IR, we would not have added any new
+ // predicates.
+ if (predicate->numChildren() == 2
+ && predicate->child(1)->hasInt()
+ && m_useCounts.numUses(predicate) == 1) {
+ switch (predicate->opcode()) {
+ case Equal:
+ result.source = predicate->child(0);
+ result.extra = predicate;
+ result.cases.append(SwitchCase(predicate->child(1)->asInt(), taken));
+ result.fallThrough = notTaken;
+ handled = true;
+ break;
+ case NotEqual:
+ result.source = predicate->child(0);
+ result.extra = predicate;
+ result.cases.append(SwitchCase(predicate->child(1)->asInt(), notTaken));
+ result.fallThrough = taken;
+ handled = true;
+ break;
+ default:
+ break;
+ }
+ }
+ if (handled)
+ break;
+ result.source = predicate;
+ result.cases.append(SwitchCase(0, notTaken));
+ result.fallThrough = taken;
+ break;
+ }
+
+ case Switch: {
+ SwitchValue* switchValue = result.branch->as<SwitchValue>();
+ result.source = switchValue->child(0);
+ for (SwitchCase switchCase : switchValue->cases(result.block))
+ result.cases.append(switchCase);
+ result.fallThrough = result.block->fallThrough();
+ break;
+ }
+
+ default:
+ result.block = nullptr;
+ result.branch = nullptr;
+ break;
+ }
+
+ return result;
+ }
+
+ Procedure& m_proc;
+ InsertionSet m_insertionSet;
+ UseCounts m_useCounts;
+};
+
+} // anonymous namespace
+
+bool inferSwitches(Procedure& proc)
+{
+ PhaseScope phaseScope(proc, "inferSwitches");
+ InferSwitches inferSwitches(proc);
+ return inferSwitches.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3InferSwitches.h b/Source/JavaScriptCore/b3/B3InferSwitches.h
new file mode 100644
index 000000000..d0466f840
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3InferSwitches.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Fixpoints to convert chains of branches into switches.
+
+bool inferSwitches(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3InsertionSet.cpp b/Source/JavaScriptCore/b3/B3InsertionSet.cpp
new file mode 100644
index 000000000..a6e119fa2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3InsertionSet.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3InsertionSet.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+#include <wtf/BubbleSort.h>
+
+namespace JSC { namespace B3 {
+
+Value* InsertionSet::insertIntConstant(size_t index, Origin origin, Type type, int64_t value)
+{
+ return insertValue(index, m_procedure.addIntConstant(origin, type, value));
+}
+
+Value* InsertionSet::insertIntConstant(size_t index, Value* likeValue, int64_t value)
+{
+ return insertIntConstant(index, likeValue->origin(), likeValue->type(), value);
+}
+
+Value* InsertionSet::insertBottom(size_t index, Origin origin, Type type)
+{
+ Value*& bottom = m_bottomForType[type];
+ if (!bottom)
+ bottom = insertValue(index, m_procedure.addBottom(origin, type));
+ return bottom;
+}
+
+Value* InsertionSet::insertBottom(size_t index, Value* likeValue)
+{
+ return insertBottom(index, likeValue->origin(), likeValue->type());
+}
+
+void InsertionSet::execute(BasicBlock* block)
+{
+ bubbleSort(m_insertions.begin(), m_insertions.end());
+ executeInsertions(block->m_values, m_insertions);
+ m_bottomForType = TypeMap<Value*>();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3InsertionSet.h b/Source/JavaScriptCore/b3/B3InsertionSet.h
new file mode 100644
index 000000000..1eb527287
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3InsertionSet.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+#include "B3Type.h"
+#include "B3TypeMap.h"
+#include <wtf/Insertion.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class Procedure;
+class Value;
+
+typedef WTF::Insertion<Value*> Insertion;
+
+class InsertionSet {
+public:
+ InsertionSet(Procedure& procedure)
+ : m_procedure(procedure)
+ {
+ }
+
+ bool isEmpty() const { return m_insertions.isEmpty(); }
+
+ Procedure& code() { return m_procedure; }
+
+ void appendInsertion(const Insertion& insertion)
+ {
+ m_insertions.append(insertion);
+ }
+
+ Value* insertValue(size_t index, Value* value)
+ {
+ appendInsertion(Insertion(index, value));
+ return value;
+ }
+
+ template<typename ValueType, typename... Arguments>
+ ValueType* insert(size_t index, Arguments... arguments);
+
+ Value* insertIntConstant(size_t index, Origin, Type, int64_t value);
+ Value* insertIntConstant(size_t index, Value* likeValue, int64_t value);
+
+ Value* insertBottom(size_t index, Origin, Type);
+ Value* insertBottom(size_t index, Value*);
+
+ void execute(BasicBlock*);
+
+private:
+ Procedure& m_procedure;
+ Vector<Insertion, 8> m_insertions;
+
+ TypeMap<Value*> m_bottomForType;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3InsertionSetInlines.h b/Source/JavaScriptCore/b3/B3InsertionSetInlines.h
new file mode 100644
index 000000000..c5b03df03
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3InsertionSetInlines.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3InsertionSet.h"
+#include "B3ProcedureInlines.h"
+
+namespace JSC { namespace B3 {
+
+template<typename ValueType, typename... Arguments>
+ValueType* InsertionSet::insert(size_t index, Arguments... arguments)
+{
+ return static_cast<ValueType*>(insertValue(index, m_procedure.add<ValueType>(arguments...)));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Kind.cpp b/Source/JavaScriptCore/b3/B3Kind.cpp
new file mode 100644
index 000000000..147ab23c6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Kind.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Kind.h"
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/CommaPrinter.h>
+
+namespace JSC { namespace B3 {
+
+void Kind::dump(PrintStream& out) const
+{
+ out.print(m_opcode);
+
+ CommaPrinter comma(", ", "<");
+ if (isChill())
+ out.print(comma, "Chill");
+ if (traps())
+ out.print(comma, "Traps");
+ if (comma.didPrint())
+ out.print(">");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Kind.h b/Source/JavaScriptCore/b3/B3Kind.h
new file mode 100644
index 000000000..268c8e766
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Kind.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef B3Kind_h
+#define B3Kind_h
+
+#if ENABLE(B3_JIT)
+
+#include "B3Opcode.h"
+#include <wtf/HashTable.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+// A Kind is a terse summary of what a Value does. There is a fixed number of possible
+// Kinds. Kind is a tuple of Opcode (see B3Opcode.h) and some extra bits. Most opcodes don't
+// get any extra bits, and those bits must remain zero if the Kind's opcode field is set to
+// one of those opcodes. The purpose of Kind is to be like an opcode in other IRs, but to
+// be multidimensional. For example, a Load has many dimensions of customization that we may
+// eventually implement. A Load can have different alignments, alignment failure modes,
+// temporality modes, trapping modes, ordering modes, etc. It's fine to put such flags into
+// subclasses of Value, but in some cases that would be overkill, particularly since if you
+// did that for a pure value then you'd also have to thread it through ValueKey. It's much
+// easier to put it in Kind, and then your extra bit will get carried around by everyone who
+// knows how to carry around Kinds. Most importantly, putting flags into Kind allows you to
+// use them as part of B3::Value's dynamic cast facility. For example we could have a
+// trapping Load that uses a Value subclass that has a stackmap while non-trapping Loads
+// continue to use the normal MemoryValue.
+//
+// Note that any code in the compiler that transcribes IR (like a strength reduction that
+// replaces an Add with a different Add, or even with a different opcode entirely) will
+// probably drop unknown bits by default. This is definitely not correct for many bits (like
+// isChill for Div/Mod and all of the envisioned Load/Store flags), so if you add a new bit
+// you will probably have to audit the compiler to make sure that phases that transcribe
+// your opcode do the right thing with your bit.
+
+class Kind {
+public:
+ Kind(Opcode opcode)
+ : m_opcode(opcode)
+ , m_isChill(false)
+ , m_traps(false)
+ {
+ }
+
+ Kind()
+ : Kind(Oops)
+ {
+ }
+
+ Opcode opcode() const { return m_opcode; }
+ void setOpcode(Opcode opcode) { m_opcode = opcode; }
+
+ bool hasExtraBits() const { return m_isChill || m_traps; }
+
+ // Chill bit. This applies to division-based arithmetic ops, which may trap on some
+ // platforms or exhibit bizarre behavior when passed certain inputs. The non-chill
+ // version will behave as unpredictably as it wants. For example, it's legal to
+ // constant-fold Div(x, 0) to any value or to replace it with any effectful operation.
+ // But when it's chill, that means that the semantics when it would have trapped are
+ // the JS semantics. For example, Div<Chill>(@a, @b) means:
+ //
+ // ((a | 0) / (b | 0)) | 0
+ //
+ // And Mod<Chill>(a, b) means:
+ //
+ // ((a | 0) % (b | 0)) | 0
+ //
+ // Note that Div<Chill> matches exactly how ARM handles integer division.
+ bool hasIsChill() const
+ {
+ switch (m_opcode) {
+ case Div:
+ case Mod:
+ return true;
+ default:
+ return false;
+ }
+ }
+ bool isChill() const
+ {
+ return m_isChill;
+ }
+ void setIsChill(bool isChill)
+ {
+ ASSERT(hasIsChill());
+ m_isChill = isChill;
+ }
+
+ // Traps bit. This applies to memory access ops. It means that the instruction could
+ // trap as part of some check it performs, and that we mean to make this observable. This
+ // currently only applies to memory accesses (loads and stores). You don't get to find out where
+ // in the Procedure the trap happened. If you try to work it out using Origin, you'll have a bad
+ // time because the instruction selector is too sloppy with Origin().
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=162688
+ bool hasTraps() const
+ {
+ switch (m_opcode) {
+ case Load8Z:
+ case Load8S:
+ case Load16Z:
+ case Load16S:
+ case Load:
+ case Store8:
+ case Store16:
+ case Store:
+ return true;
+ default:
+ return false;
+ }
+ }
+ bool traps() const
+ {
+ return m_traps;
+ }
+ void setTraps(bool traps)
+ {
+ ASSERT(hasTraps());
+ m_traps = traps;
+ }
+
+ // Rules for adding new properties:
+ // - Put the accessors here.
+ // - hasBlah() should check if the opcode allows for your property.
+ // - blah() returns a default value if !hasBlah()
+ // - setBlah() asserts if !hasBlah()
+ // - Try not to increase the size of Kind too much. But it wouldn't be the end of the
+ // world if it bloated to 64 bits.
+
+ bool operator==(const Kind& other) const
+ {
+ return m_opcode == other.m_opcode
+ && m_isChill == other.m_isChill
+ && m_traps == other.m_traps;
+ }
+
+ bool operator!=(const Kind& other) const
+ {
+ return !(*this == other);
+ }
+
+ void dump(PrintStream&) const;
+
+ unsigned hash() const
+ {
+ // It's almost certainly more important that this hash function is cheap to compute than
+ // anything else. We can live with some kind hash collisions.
+ return m_opcode + (static_cast<unsigned>(m_isChill) << 16) + (static_cast<unsigned>(m_traps) << 7);
+ }
+
+ Kind(WTF::HashTableDeletedValueType)
+ : m_opcode(Oops)
+ , m_isChill(true)
+ , m_traps(false)
+ {
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return *this == Kind(WTF::HashTableDeletedValue);
+ }
+
+private:
+ Opcode m_opcode;
+ bool m_isChill : 1;
+ bool m_traps : 1;
+};
+
+// For every flag 'foo' you add, it's customary to create a Kind B3::foo(Kind) function that makes
+// a kind with the flag set. For example, for chill, this lets us say:
+//
+// block->appendNew<Value>(m_proc, chill(Mod), Origin(), a, b);
+//
+// I like to make the flag name fill in the sentence "Mod _____" (like "isChill" or "traps") while
+// the flag constructor fills in the phrase "_____ Mod" (like "chill" or "trapping").
+
+inline Kind chill(Kind kind)
+{
+ kind.setIsChill(true);
+ return kind;
+}
+
+inline Kind trapping(Kind kind)
+{
+ kind.setTraps(true);
+ return kind;
+}
+
+struct KindHash {
+ static unsigned hash(const Kind& key) { return key.hash(); }
+ static bool equal(const Kind& a, const Kind& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::B3::Kind> {
+ typedef JSC::B3::KindHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::B3::Kind> : public SimpleClassHashTraits<JSC::B3::Kind> {
+ static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
+
+#endif // B3Kind_h
+
diff --git a/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.cpp b/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.cpp
new file mode 100644
index 000000000..8c17ff58e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3LegalizeMemoryOffsets.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class LegalizeMemoryOffsets {
+public:
+ LegalizeMemoryOffsets(Procedure& proc)
+ : m_proc(proc)
+ , m_insertionSet(proc)
+ {
+ }
+
+ void run()
+ {
+ if (!isARM64())
+ return;
+
+ for (BasicBlock* block : m_proc) {
+ for (unsigned index = 0; index < block->size(); ++index) {
+ MemoryValue* memoryValue = block->at(index)->as<MemoryValue>();
+ if (!memoryValue)
+ continue;
+
+ int32_t offset = memoryValue->offset();
+ Air::Arg::Width width = Air::Arg::widthForBytes(memoryValue->accessByteSize());
+ if (!Air::Arg::isValidAddrForm(offset, width)) {
+ Value* base = memoryValue->lastChild();
+ Value* offsetValue = m_insertionSet.insertIntConstant(index, memoryValue->origin(), pointerType(), offset);
+ Value* resolvedAddress = m_proc.add<Value>(Add, memoryValue->origin(), base, offsetValue);
+ m_insertionSet.insertValue(index, resolvedAddress);
+
+ memoryValue->lastChild() = resolvedAddress;
+ memoryValue->setOffset(0);
+ }
+ }
+ m_insertionSet.execute(block);
+ }
+ }
+
+ Procedure& m_proc;
+ InsertionSet m_insertionSet;
+};
+
+} // anonymous namespace
+
+void legalizeMemoryOffsets(Procedure& proc)
+{
+ PhaseScope phaseScope(proc, "legalizeMemoryOffsets");
+ LegalizeMemoryOffsets legalizeMemoryOffsets(proc);
+ legalizeMemoryOffsets.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.h b/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.h
new file mode 100644
index 000000000..c482ab230
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LegalizeMemoryOffsets.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// If the offsets of a MemoryValue cannot be represented in the target instruction set,
+// compute it explicitly.
+void legalizeMemoryOffsets(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3LowerMacros.cpp b/Source/JavaScriptCore/b3/B3LowerMacros.cpp
new file mode 100644
index 000000000..68415108d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerMacros.cpp
@@ -0,0 +1,500 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3LowerMacros.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AllowMacroScratchRegisterUsage.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3CCallValue.h"
+#include "B3CaseCollectionInlines.h"
+#include "B3ConstPtrValue.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PatchpointValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "CCallHelpers.h"
+#include "LinkBuffer.h"
+#include <cmath>
+#include <wtf/BitVector.h>
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class LowerMacros {
+public:
+ LowerMacros(Procedure& proc)
+ : m_proc(proc)
+ , m_blockInsertionSet(proc)
+ , m_insertionSet(proc)
+ {
+ }
+
+ bool run()
+ {
+ for (BasicBlock* block : m_proc) {
+ m_block = block;
+ processCurrentBlock();
+ }
+ m_changed |= m_blockInsertionSet.execute();
+ if (m_changed) {
+ m_proc.resetReachability();
+ m_proc.invalidateCFG();
+ }
+ return m_changed;
+ }
+
+private:
+ void processCurrentBlock()
+ {
+ for (m_index = 0; m_index < m_block->size(); ++m_index) {
+ m_value = m_block->at(m_index);
+ m_origin = m_value->origin();
+ switch (m_value->opcode()) {
+ case Mod: {
+ if (m_value->isChill()) {
+ if (isARM64()) {
+ BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet);
+ BasicBlock* zeroDenCase = m_blockInsertionSet.insertBefore(m_block);
+ BasicBlock* normalModCase = m_blockInsertionSet.insertBefore(m_block);
+
+ before->replaceLastWithNew<Value>(m_proc, Branch, m_origin, m_value->child(1));
+ before->setSuccessors(
+ FrequentedBlock(normalModCase, FrequencyClass::Normal),
+ FrequentedBlock(zeroDenCase, FrequencyClass::Rare));
+
+ Value* divResult = normalModCase->appendNew<Value>(m_proc, chill(Div), m_origin, m_value->child(0), m_value->child(1));
+ Value* multipliedBack = normalModCase->appendNew<Value>(m_proc, Mul, m_origin, divResult, m_value->child(1));
+ Value* result = normalModCase->appendNew<Value>(m_proc, Sub, m_origin, m_value->child(0), multipliedBack);
+ UpsilonValue* normalResult = normalModCase->appendNew<UpsilonValue>(m_proc, m_origin, result);
+ normalModCase->appendNew<Value>(m_proc, Jump, m_origin);
+ normalModCase->setSuccessors(FrequentedBlock(m_block));
+
+ UpsilonValue* zeroResult = zeroDenCase->appendNew<UpsilonValue>(
+ m_proc, m_origin,
+ zeroDenCase->appendIntConstant(m_proc, m_value, 0));
+ zeroDenCase->appendNew<Value>(m_proc, Jump, m_origin);
+ zeroDenCase->setSuccessors(FrequentedBlock(m_block));
+
+ Value* phi = m_insertionSet.insert<Value>(m_index, Phi, m_value->type(), m_origin);
+ normalResult->setPhi(phi);
+ zeroResult->setPhi(phi);
+ m_value->replaceWithIdentity(phi);
+ before->updatePredecessorsAfter();
+ m_changed = true;
+ } else
+ makeDivisionChill(Mod);
+ break;
+ }
+
+ double (*fmodDouble)(double, double) = fmod;
+ if (m_value->type() == Double) {
+ Value* functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, fmodDouble);
+ Value* result = m_insertionSet.insert<CCallValue>(m_index, Double, m_origin,
+ Effects::none(),
+ functionAddress,
+ m_value->child(0),
+ m_value->child(1));
+ m_value->replaceWithIdentity(result);
+ m_changed = true;
+ } else if (m_value->type() == Float) {
+ Value* numeratorAsDouble = m_insertionSet.insert<Value>(m_index, FloatToDouble, m_origin, m_value->child(0));
+ Value* denominatorAsDouble = m_insertionSet.insert<Value>(m_index, FloatToDouble, m_origin, m_value->child(1));
+ Value* functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, fmodDouble);
+ Value* doubleMod = m_insertionSet.insert<CCallValue>(m_index, Double, m_origin,
+ Effects::none(),
+ functionAddress,
+ numeratorAsDouble,
+ denominatorAsDouble);
+ Value* result = m_insertionSet.insert<Value>(m_index, DoubleToFloat, m_origin, doubleMod);
+ m_value->replaceWithIdentity(result);
+ m_changed = true;
+ } else if (isARM64()) {
+ Value* divResult = m_insertionSet.insert<Value>(m_index, chill(Div), m_origin, m_value->child(0), m_value->child(1));
+ Value* multipliedBack = m_insertionSet.insert<Value>(m_index, Mul, m_origin, divResult, m_value->child(1));
+ Value* result = m_insertionSet.insert<Value>(m_index, Sub, m_origin, m_value->child(0), multipliedBack);
+ m_value->replaceWithIdentity(result);
+ m_changed = true;
+ }
+ break;
+ }
+
+ case UMod: {
+ if (isARM64()) {
+ Value* divResult = m_insertionSet.insert<Value>(m_index, UDiv, m_origin, m_value->child(0), m_value->child(1));
+ Value* multipliedBack = m_insertionSet.insert<Value>(m_index, Mul, m_origin, divResult, m_value->child(1));
+ Value* result = m_insertionSet.insert<Value>(m_index, Sub, m_origin, m_value->child(0), multipliedBack);
+ m_value->replaceWithIdentity(result);
+ m_changed = true;
+ }
+ break;
+ }
+
+ case Div: {
+ if (m_value->isChill())
+ makeDivisionChill(Div);
+ break;
+ }
+
+ case Switch: {
+ SwitchValue* switchValue = m_value->as<SwitchValue>();
+ Vector<SwitchCase> cases;
+ for (const SwitchCase& switchCase : switchValue->cases(m_block))
+ cases.append(switchCase);
+ std::sort(
+ cases.begin(), cases.end(),
+ [] (const SwitchCase& left, const SwitchCase& right) {
+ return left.caseValue() < right.caseValue();
+ });
+ FrequentedBlock fallThrough = m_block->fallThrough();
+ m_block->values().removeLast();
+ recursivelyBuildSwitch(cases, fallThrough, 0, false, cases.size(), m_block);
+ m_proc.deleteValue(switchValue);
+ m_block->updatePredecessorsAfter();
+ m_changed = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ m_insertionSet.execute(m_block);
+ }
+
+ void makeDivisionChill(Opcode nonChillOpcode)
+ {
+ ASSERT(nonChillOpcode == Div || nonChillOpcode == Mod);
+
+ // ARM supports this instruction natively.
+ if (isARM64())
+ return;
+
+ // We implement "res = Div<Chill>/Mod<Chill>(num, den)" as follows:
+ //
+ // if (den + 1 <=_unsigned 1) {
+ // if (!den) {
+ // res = 0;
+ // goto done;
+ // }
+ // if (num == -2147483648) {
+ // res = isDiv ? num : 0;
+ // goto done;
+ // }
+ // }
+ // res = num (/ or %) dev;
+ // done:
+ m_changed = true;
+
+ Value* num = m_value->child(0);
+ Value* den = m_value->child(1);
+
+ Value* one = m_insertionSet.insertIntConstant(m_index, m_value, 1);
+ Value* isDenOK = m_insertionSet.insert<Value>(
+ m_index, Above, m_origin,
+ m_insertionSet.insert<Value>(m_index, Add, m_origin, den, one),
+ one);
+
+ BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet);
+
+ BasicBlock* normalDivCase = m_blockInsertionSet.insertBefore(m_block);
+ BasicBlock* shadyDenCase = m_blockInsertionSet.insertBefore(m_block);
+ BasicBlock* zeroDenCase = m_blockInsertionSet.insertBefore(m_block);
+ BasicBlock* neg1DenCase = m_blockInsertionSet.insertBefore(m_block);
+ BasicBlock* intMinCase = m_blockInsertionSet.insertBefore(m_block);
+
+ before->replaceLastWithNew<Value>(m_proc, Branch, m_origin, isDenOK);
+ before->setSuccessors(
+ FrequentedBlock(normalDivCase, FrequencyClass::Normal),
+ FrequentedBlock(shadyDenCase, FrequencyClass::Rare));
+
+ UpsilonValue* normalResult = normalDivCase->appendNew<UpsilonValue>(
+ m_proc, m_origin,
+ normalDivCase->appendNew<Value>(m_proc, nonChillOpcode, m_origin, num, den));
+ normalDivCase->appendNew<Value>(m_proc, Jump, m_origin);
+ normalDivCase->setSuccessors(FrequentedBlock(m_block));
+
+ shadyDenCase->appendNew<Value>(m_proc, Branch, m_origin, den);
+ shadyDenCase->setSuccessors(
+ FrequentedBlock(neg1DenCase, FrequencyClass::Normal),
+ FrequentedBlock(zeroDenCase, FrequencyClass::Rare));
+
+ UpsilonValue* zeroResult = zeroDenCase->appendNew<UpsilonValue>(
+ m_proc, m_origin,
+ zeroDenCase->appendIntConstant(m_proc, m_value, 0));
+ zeroDenCase->appendNew<Value>(m_proc, Jump, m_origin);
+ zeroDenCase->setSuccessors(FrequentedBlock(m_block));
+
+ int64_t badNumeratorConst = 0;
+ switch (m_value->type()) {
+ case Int32:
+ badNumeratorConst = std::numeric_limits<int32_t>::min();
+ break;
+ case Int64:
+ badNumeratorConst = std::numeric_limits<int64_t>::min();
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ badNumeratorConst = 0;
+ }
+
+ Value* badNumerator =
+ neg1DenCase->appendIntConstant(m_proc, m_value, badNumeratorConst);
+
+ neg1DenCase->appendNew<Value>(
+ m_proc, Branch, m_origin,
+ neg1DenCase->appendNew<Value>(
+ m_proc, Equal, m_origin, num, badNumerator));
+ neg1DenCase->setSuccessors(
+ FrequentedBlock(intMinCase, FrequencyClass::Rare),
+ FrequentedBlock(normalDivCase, FrequencyClass::Normal));
+
+ Value* intMinResult = nonChillOpcode == Div ? badNumerator : intMinCase->appendIntConstant(m_proc, m_value, 0);
+ UpsilonValue* intMinResultUpsilon = intMinCase->appendNew<UpsilonValue>(
+ m_proc, m_origin, intMinResult);
+ intMinCase->appendNew<Value>(m_proc, Jump, m_origin);
+ intMinCase->setSuccessors(FrequentedBlock(m_block));
+
+ Value* phi = m_insertionSet.insert<Value>(
+ m_index, Phi, m_value->type(), m_origin);
+ normalResult->setPhi(phi);
+ zeroResult->setPhi(phi);
+ intMinResultUpsilon->setPhi(phi);
+
+ m_value->replaceWithIdentity(phi);
+ before->updatePredecessorsAfter();
+ }
+
+ void recursivelyBuildSwitch(
+ const Vector<SwitchCase>& cases, FrequentedBlock fallThrough, unsigned start, bool hardStart,
+ unsigned end, BasicBlock* before)
+ {
+ Value* child = m_value->child(0);
+ Type type = child->type();
+
+ // It's a good idea to use a table-based switch in some cases: the number of cases has to be
+ // large enough and they have to be dense enough. This could probably be improved a lot. For
+ // example, we could still use a jump table in cases where the inputs are sparse so long as we
+ // shift off the uninteresting bits. On the other hand, it's not clear that this would
+ // actually be any better than what we have done here and it's not clear that it would be
+ // better than a binary switch.
+ const unsigned minCasesForTable = 7;
+ const unsigned densityLimit = 4;
+ if (end - start >= minCasesForTable) {
+ int64_t firstValue = cases[start].caseValue();
+ int64_t lastValue = cases[end - 1].caseValue();
+ if ((lastValue - firstValue + 1) / (end - start) < densityLimit) {
+ BasicBlock* switchBlock = m_blockInsertionSet.insertAfter(m_block);
+ Value* index = before->appendNew<Value>(
+ m_proc, Sub, m_origin, child,
+ before->appendIntConstant(m_proc, m_origin, type, firstValue));
+ before->appendNew<Value>(
+ m_proc, Branch, m_origin,
+ before->appendNew<Value>(
+ m_proc, Above, m_origin, index,
+ before->appendIntConstant(m_proc, m_origin, type, lastValue - firstValue)));
+ before->setSuccessors(fallThrough, FrequentedBlock(switchBlock));
+
+ size_t tableSize = lastValue - firstValue + 1;
+
+ if (index->type() != pointerType() && index->type() == Int32)
+ index = switchBlock->appendNew<Value>(m_proc, ZExt32, m_origin, index);
+
+ PatchpointValue* patchpoint =
+ switchBlock->appendNew<PatchpointValue>(m_proc, Void, m_origin);
+
+ // Even though this loads from the jump table, the jump table is immutable. For the
+ // purpose of alias analysis, reading something immutable is like reading nothing.
+ patchpoint->effects = Effects();
+ patchpoint->effects.terminal = true;
+
+ patchpoint->appendSomeRegister(index);
+ patchpoint->numGPScratchRegisters++;
+ // Technically, we don't have to clobber macro registers on X86_64. This is probably
+ // OK though.
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+
+ BitVector handledIndices;
+ for (unsigned i = start; i < end; ++i) {
+ FrequentedBlock block = cases[i].target();
+ int64_t value = cases[i].caseValue();
+ switchBlock->appendSuccessor(block);
+ size_t index = value - firstValue;
+ ASSERT(!handledIndices.get(index));
+ handledIndices.set(index);
+ }
+
+ bool hasUnhandledIndex = false;
+ for (unsigned i = 0; i < tableSize; ++i) {
+ if (!handledIndices.get(i)) {
+ hasUnhandledIndex = true;
+ break;
+ }
+ }
+
+ if (hasUnhandledIndex)
+ switchBlock->appendSuccessor(fallThrough);
+
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ MacroAssemblerCodePtr* jumpTable = static_cast<MacroAssemblerCodePtr*>(
+ params.proc().addDataSection(sizeof(MacroAssemblerCodePtr) * tableSize));
+
+ GPRReg index = params[0].gpr();
+ GPRReg scratch = params.gpScratch(0);
+
+ jit.move(CCallHelpers::TrustedImmPtr(jumpTable), scratch);
+ jit.jump(CCallHelpers::BaseIndex(scratch, index, CCallHelpers::timesPtr()));
+
+ // These labels are guaranteed to be populated before either late paths or
+ // link tasks run.
+ Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ if (hasUnhandledIndex) {
+ MacroAssemblerCodePtr fallThrough =
+ linkBuffer.locationOf(*labels.last());
+ for (unsigned i = tableSize; i--;)
+ jumpTable[i] = fallThrough;
+ }
+
+ unsigned labelIndex = 0;
+ for (unsigned tableIndex : handledIndices) {
+ jumpTable[tableIndex] =
+ linkBuffer.locationOf(*labels[labelIndex++]);
+ }
+ });
+ });
+ return;
+ }
+ }
+
+ // See comments in jit/BinarySwitch.cpp for a justification of this algorithm. The only
+ // thing we do differently is that we don't use randomness.
+
+ const unsigned leafThreshold = 3;
+
+ unsigned size = end - start;
+
+ if (size <= leafThreshold) {
+ bool allConsecutive = false;
+
+ if ((hardStart || (start && cases[start - 1].caseValue() == cases[start].caseValue() - 1))
+ && end < cases.size()
+ && cases[end - 1].caseValue() == cases[end].caseValue() - 1) {
+ allConsecutive = true;
+ for (unsigned i = 0; i < size - 1; ++i) {
+ if (cases[start + i].caseValue() + 1 != cases[start + i + 1].caseValue()) {
+ allConsecutive = false;
+ break;
+ }
+ }
+ }
+
+ unsigned limit = allConsecutive ? size - 1 : size;
+
+ for (unsigned i = 0; i < limit; ++i) {
+ BasicBlock* nextCheck = m_blockInsertionSet.insertAfter(m_block);
+ before->appendNew<Value>(
+ m_proc, Branch, m_origin,
+ before->appendNew<Value>(
+ m_proc, Equal, m_origin, child,
+ before->appendIntConstant(
+ m_proc, m_origin, type,
+ cases[start + i].caseValue())));
+ before->setSuccessors(cases[start + i].target(), FrequentedBlock(nextCheck));
+
+ before = nextCheck;
+ }
+
+ before->appendNew<Value>(m_proc, Jump, m_origin);
+ if (allConsecutive)
+ before->setSuccessors(cases[end - 1].target());
+ else
+ before->setSuccessors(fallThrough);
+ return;
+ }
+
+ unsigned medianIndex = (start + end) / 2;
+
+ BasicBlock* left = m_blockInsertionSet.insertAfter(m_block);
+ BasicBlock* right = m_blockInsertionSet.insertAfter(m_block);
+
+ before->appendNew<Value>(
+ m_proc, Branch, m_origin,
+ before->appendNew<Value>(
+ m_proc, LessThan, m_origin, child,
+ before->appendIntConstant(
+ m_proc, m_origin, type,
+ cases[medianIndex].caseValue())));
+ before->setSuccessors(FrequentedBlock(left), FrequentedBlock(right));
+
+ recursivelyBuildSwitch(cases, fallThrough, start, hardStart, medianIndex, left);
+ recursivelyBuildSwitch(cases, fallThrough, medianIndex, true, end, right);
+ }
+
+ Procedure& m_proc;
+ BlockInsertionSet m_blockInsertionSet;
+ InsertionSet m_insertionSet;
+ BasicBlock* m_block;
+ unsigned m_index;
+ Value* m_value;
+ Origin m_origin;
+ bool m_changed { false };
+};
+
+bool lowerMacrosImpl(Procedure& proc)
+{
+ LowerMacros lowerMacros(proc);
+ return lowerMacros.run();
+}
+
+} // anonymous namespace
+
+bool lowerMacros(Procedure& proc)
+{
+ PhaseScope phaseScope(proc, "lowerMacros");
+ bool result = lowerMacrosImpl(proc);
+ if (shouldValidateIR())
+ RELEASE_ASSERT(!lowerMacrosImpl(proc));
+ return result;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3LowerMacros.h b/Source/JavaScriptCore/b3/B3LowerMacros.h
new file mode 100644
index 000000000..f9649e2c8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerMacros.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Lowers high-level operations that it's easier to deal with once they are broken up. Currently
+// this includes Switch and ChillDiv.
+
+bool lowerMacros(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.cpp b/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.cpp
new file mode 100644
index 000000000..dbe158b5c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3LowerMacrosAfterOptimizations.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3CCallValue.h"
+#include "B3ConstDoubleValue.h"
+#include "B3ConstFloatValue.h"
+#include "B3ConstPtrValue.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class LowerMacros {
+public:
+ LowerMacros(Procedure& proc)
+ : m_proc(proc)
+ , m_blockInsertionSet(proc)
+ , m_insertionSet(proc)
+ {
+ }
+
+ bool run()
+ {
+ for (BasicBlock* block : m_proc) {
+ m_block = block;
+ processCurrentBlock();
+ }
+ m_changed |= m_blockInsertionSet.execute();
+ if (m_changed) {
+ m_proc.resetReachability();
+ m_proc.invalidateCFG();
+ }
+ return m_changed;
+ }
+
+private:
+ void processCurrentBlock()
+ {
+ for (m_index = 0; m_index < m_block->size(); ++m_index) {
+ m_value = m_block->at(m_index);
+ m_origin = m_value->origin();
+ switch (m_value->opcode()) {
+ case Abs: {
+ // ARM supports this instruction natively.
+ if (isARM64())
+ break;
+
+ Value* mask = nullptr;
+ if (m_value->type() == Double)
+ mask = m_insertionSet.insert<ConstDoubleValue>(m_index, m_origin, bitwise_cast<double>(~(1ll << 63)));
+ else if (m_value->type() == Float)
+ mask = m_insertionSet.insert<ConstFloatValue>(m_index, m_origin, bitwise_cast<float>(~(1 << 31)));
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+ Value* result = m_insertionSet.insert<Value>(m_index, BitAnd, m_origin, m_value->child(0), mask);
+ m_value->replaceWithIdentity(result);
+ break;
+ }
+ case Ceil: {
+ if (MacroAssembler::supportsFloatingPointRounding())
+ break;
+
+ Value* functionAddress = nullptr;
+ if (m_value->type() == Double) {
+ double (*ceilDouble)(double) = ceil;
+ functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, ceilDouble);
+ } else if (m_value->type() == Float)
+ functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, ceilf);
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+
+ Value* result = m_insertionSet.insert<CCallValue>(m_index,
+ m_value->type(),
+ m_origin,
+ Effects::none(),
+ functionAddress,
+ m_value->child(0));
+ m_value->replaceWithIdentity(result);
+ break;
+ }
+ case Floor: {
+ if (MacroAssembler::supportsFloatingPointRounding())
+ break;
+
+ Value* functionAddress = nullptr;
+ if (m_value->type() == Double) {
+ double (*floorDouble)(double) = floor;
+ functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, floorDouble);
+ } else if (m_value->type() == Float)
+ functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, floorf);
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+
+ Value* result = m_insertionSet.insert<CCallValue>(m_index,
+ m_value->type(),
+ m_origin,
+ Effects::none(),
+ functionAddress,
+ m_value->child(0));
+ m_value->replaceWithIdentity(result);
+ break;
+ }
+ case Neg: {
+ if (!isFloat(m_value->type()))
+ break;
+
+ // X86 is odd in that it requires this.
+ if (!isX86())
+ break;
+
+ Value* mask = nullptr;
+ if (m_value->type() == Double)
+ mask = m_insertionSet.insert<ConstDoubleValue>(m_index, m_origin, -0.0);
+ else {
+ RELEASE_ASSERT(m_value->type() == Float);
+ mask = m_insertionSet.insert<ConstFloatValue>(m_index, m_origin, -0.0f);
+ }
+
+ Value* result = m_insertionSet.insert<Value>(
+ m_index, BitXor, m_origin, m_value->child(0), mask);
+ m_value->replaceWithIdentity(result);
+ break;
+ }
+
+ case RotL: {
+ // ARM64 doesn't have a rotate left.
+ if (isARM64()) {
+ if (isARM64()) {
+ Value* newShift = m_insertionSet.insert<Value>(m_index, Neg, m_value->origin(), m_value->child(1));
+ Value* rotate = m_insertionSet.insert<Value>(m_index, RotR, m_value->origin(), m_value->child(0), newShift);
+ m_value->replaceWithIdentity(rotate);
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ m_insertionSet.execute(m_block);
+ }
+
+ Procedure& m_proc;
+ BlockInsertionSet m_blockInsertionSet;
+ InsertionSet m_insertionSet;
+ BasicBlock* m_block;
+ unsigned m_index;
+ Value* m_value;
+ Origin m_origin;
+ bool m_changed { false };
+};
+
+bool lowerMacrosImpl(Procedure& proc)
+{
+ LowerMacros lowerMacros(proc);
+ return lowerMacros.run();
+}
+
+} // anonymous namespace
+
+bool lowerMacrosAfterOptimizations(Procedure& proc)
+{
+ PhaseScope phaseScope(proc, "lowerMacrosAfterOptimizations");
+ bool result = lowerMacrosImpl(proc);
+ if (shouldValidateIR())
+ RELEASE_ASSERT(!lowerMacrosImpl(proc));
+ return result;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.h b/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.h
new file mode 100644
index 000000000..f7b653665
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerMacrosAfterOptimizations.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Lower certain high level opcodes to lower-level opcode to help code generation.
+
+bool lowerMacrosAfterOptimizations(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3LowerToAir.cpp b/Source/JavaScriptCore/b3/B3LowerToAir.cpp
new file mode 100644
index 000000000..29a4379dc
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerToAir.cpp
@@ -0,0 +1,2899 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3LowerToAir.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallSpecial.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirStackSlot.h"
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BlockWorklist.h"
+#include "B3CCallValue.h"
+#include "B3CheckSpecial.h"
+#include "B3Commutativity.h"
+#include "B3Dominators.h"
+#include "B3FenceValue.h"
+#include "B3MemoryValue.h"
+#include "B3PatchpointSpecial.h"
+#include "B3PatchpointValue.h"
+#include "B3PhaseScope.h"
+#include "B3PhiChildren.h"
+#include "B3Procedure.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3UpsilonValue.h"
+#include "B3UseCounts.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include "B3WasmAddressValue.h"
+#include <wtf/IndexMap.h>
+#include <wtf/IndexSet.h>
+#include <wtf/ListDump.h>
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+namespace {
+
+const bool verbose = false;
+
+class LowerToAir {
+public:
+ LowerToAir(Procedure& procedure)
+ : m_valueToTmp(procedure.values().size())
+ , m_phiToTmp(procedure.values().size())
+ , m_blockToBlock(procedure.size())
+ , m_useCounts(procedure)
+ , m_phiChildren(procedure)
+ , m_dominators(procedure.dominators())
+ , m_procedure(procedure)
+ , m_code(procedure.code())
+ {
+ }
+
+ void run()
+ {
+ for (B3::BasicBlock* block : m_procedure)
+ m_blockToBlock[block] = m_code.addBlock(block->frequency());
+
+ for (Value* value : m_procedure.values()) {
+ switch (value->opcode()) {
+ case Phi: {
+ m_phiToTmp[value] = m_code.newTmp(Arg::typeForB3Type(value->type()));
+ if (verbose)
+ dataLog("Phi tmp for ", *value, ": ", m_phiToTmp[value], "\n");
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ for (B3::StackSlot* stack : m_procedure.stackSlots())
+ m_stackToStack.add(stack, m_code.addStackSlot(stack));
+ for (Variable* variable : m_procedure.variables())
+ m_variableToTmp.add(variable, m_code.newTmp(Arg::typeForB3Type(variable->type())));
+
+ // Figure out which blocks are not rare.
+ m_fastWorklist.push(m_procedure[0]);
+ while (B3::BasicBlock* block = m_fastWorklist.pop()) {
+ for (B3::FrequentedBlock& successor : block->successors()) {
+ if (!successor.isRare())
+ m_fastWorklist.push(successor.block());
+ }
+ }
+
+ m_procedure.resetValueOwners(); // Used by crossesInterference().
+
+ // Lower defs before uses on a global level. This is a good heuristic to lock down a
+ // hoisted address expression before we duplicate it back into the loop.
+ for (B3::BasicBlock* block : m_procedure.blocksInPreOrder()) {
+ m_block = block;
+ // Reset some state.
+ m_insts.resize(0);
+
+ m_isRare = !m_fastWorklist.saw(block);
+
+ if (verbose)
+ dataLog("Lowering Block ", *block, ":\n");
+
+ // Process blocks in reverse order so we see uses before defs. That's what allows us
+ // to match patterns effectively.
+ for (unsigned i = block->size(); i--;) {
+ m_index = i;
+ m_value = block->at(i);
+ if (m_locked.contains(m_value))
+ continue;
+ m_insts.append(Vector<Inst>());
+ if (verbose)
+ dataLog("Lowering ", deepDump(m_procedure, m_value), ":\n");
+ lower();
+ if (verbose) {
+ for (Inst& inst : m_insts.last())
+ dataLog(" ", inst, "\n");
+ }
+ }
+
+ // Now append the instructions. m_insts contains them in reverse order, so we process
+ // it in reverse.
+ for (unsigned i = m_insts.size(); i--;) {
+ for (Inst& inst : m_insts[i])
+ m_blockToBlock[block]->appendInst(WTFMove(inst));
+ }
+
+ // Make sure that the successors are set up correctly.
+ for (B3::FrequentedBlock successor : block->successors()) {
+ m_blockToBlock[block]->successors().append(
+ Air::FrequentedBlock(m_blockToBlock[successor.block()], successor.frequency()));
+ }
+ }
+
+ Air::InsertionSet insertionSet(m_code);
+ for (Inst& inst : m_prologue)
+ insertionSet.insertInst(0, WTFMove(inst));
+ insertionSet.execute(m_code[0]);
+ }
+
+private:
+ bool shouldCopyPropagate(Value* value)
+ {
+ switch (value->opcode()) {
+ case Trunc:
+ case Identity:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ class ArgPromise {
+ WTF_MAKE_NONCOPYABLE(ArgPromise);
+ public:
+ ArgPromise() { }
+
+ ArgPromise(const Arg& arg, Value* valueToLock = nullptr)
+ : m_arg(arg)
+ , m_value(valueToLock)
+ {
+ }
+
+ void swap(ArgPromise& other)
+ {
+ std::swap(m_arg, other.m_arg);
+ std::swap(m_value, other.m_value);
+ std::swap(m_wasConsumed, other.m_wasConsumed);
+ std::swap(m_wasWrapped, other.m_wasWrapped);
+ std::swap(m_traps, other.m_traps);
+ }
+
+ ArgPromise(ArgPromise&& other)
+ {
+ swap(other);
+ }
+
+ ArgPromise& operator=(ArgPromise&& other)
+ {
+ swap(other);
+ return *this;
+ }
+
+ ~ArgPromise()
+ {
+ if (m_wasConsumed)
+ RELEASE_ASSERT(m_wasWrapped);
+ }
+
+ void setTraps(bool value)
+ {
+ m_traps = value;
+ }
+
+ static ArgPromise tmp(Value* value)
+ {
+ ArgPromise result;
+ result.m_value = value;
+ return result;
+ }
+
+ explicit operator bool() const { return m_arg || m_value; }
+
+ Arg::Kind kind() const
+ {
+ if (!m_arg && m_value)
+ return Arg::Tmp;
+ return m_arg.kind();
+ }
+
+ const Arg& peek() const
+ {
+ return m_arg;
+ }
+
+ Arg consume(LowerToAir& lower)
+ {
+ m_wasConsumed = true;
+ if (!m_arg && m_value)
+ return lower.tmp(m_value);
+ if (m_value)
+ lower.commitInternal(m_value);
+ return m_arg;
+ }
+
+ template<typename... Args>
+ Inst inst(Args&&... args)
+ {
+ Inst result(std::forward<Args>(args)...);
+ result.kind.traps |= m_traps;
+ m_wasWrapped = true;
+ return result;
+ }
+
+ private:
+ // Three forms:
+ // Everything null: invalid.
+ // Arg non-null, value null: just use the arg, nothing special.
+ // Arg null, value non-null: it's a tmp, pin it when necessary.
+ // Arg non-null, value non-null: use the arg, lock the value.
+ Arg m_arg;
+ Value* m_value { nullptr };
+ bool m_wasConsumed { false };
+ bool m_wasWrapped { false };
+ bool m_traps { false };
+ };
+
+ // Consider using tmpPromise() in cases where you aren't sure that you want to pin the value yet.
+ // Here are three canonical ways of using tmp() and tmpPromise():
+ //
+ // Idiom #1: You know that you want a tmp() and you know that it will be valid for the
+ // instruction you're emitting.
+ //
+ // append(Foo, tmp(bar));
+ //
+ // Idiom #2: You don't know if you want to use a tmp() because you haven't determined if the
+ // instruction will accept it, so you query first. Note that the call to tmp() happens only after
+ // you are sure that you will use it.
+ //
+ // if (isValidForm(Foo, Arg::Tmp))
+ // append(Foo, tmp(bar))
+ //
+ // Idiom #3: Same as Idiom #2, but using tmpPromise. Notice that this calls consume() only after
+ // it's sure it will use the tmp. That's deliberate. Also note that you're required to pass any
+ // Inst you create with consumed promises through that promise's inst() function.
+ //
+ // ArgPromise promise = tmpPromise(bar);
+ // if (isValidForm(Foo, promise.kind()))
+ // append(promise.inst(Foo, promise.consume(*this)))
+ //
+ // In both idiom #2 and idiom #3, we don't pin the value to a temporary except when we actually
+ // emit the instruction. Both tmp() and tmpPromise().consume(*this) will pin it. Pinning means
+ // that we will henceforth require that the value of 'bar' is generated as a separate
+ // instruction. We don't want to pin the value to a temporary if we might change our minds, and
+ // pass an address operand representing 'bar' to Foo instead.
+ //
+ // Because tmp() pins, the following is not an idiom you should use:
+ //
+ // Tmp tmp = this->tmp(bar);
+ // if (isValidForm(Foo, tmp.kind()))
+ // append(Foo, tmp);
+ //
+ // That's because if isValidForm() returns false, you will have already pinned the 'bar' to a
+ // temporary. You might later want to try to do something like loadPromise(), and that will fail.
+ // This arises in operations that have both a Addr,Tmp and Tmp,Addr forms. The following code
+ // seems right, but will actually fail to ever match the Tmp,Addr form because by then, the right
+ // value is already pinned.
+ //
+ // auto tryThings = [this] (const Arg& left, const Arg& right) {
+ // if (isValidForm(Foo, left.kind(), right.kind()))
+ // return Inst(Foo, m_value, left, right);
+ // return Inst();
+ // };
+ // if (Inst result = tryThings(loadAddr(left), tmp(right)))
+ // return result;
+ // if (Inst result = tryThings(tmp(left), loadAddr(right))) // this never succeeds.
+ // return result;
+ // return Inst(Foo, m_value, tmp(left), tmp(right));
+ //
+ // If you imagine that loadAddr(value) is just loadPromise(value).consume(*this), then this code
+ // will run correctly - it will generate OK code - but the second form is never matched.
+ // loadAddr(right) will never succeed because it will observe that 'right' is already pinned.
+ // Of course, it's exactly because of the risky nature of such code that we don't have a
+ // loadAddr() helper and require you to balance ArgPromise's in code like this. Such code will
+ // work fine if written as:
+ //
+ // auto tryThings = [this] (ArgPromise& left, ArgPromise& right) {
+ // if (isValidForm(Foo, left.kind(), right.kind()))
+ // return left.inst(right.inst(Foo, m_value, left.consume(*this), right.consume(*this)));
+ // return Inst();
+ // };
+ // if (Inst result = tryThings(loadPromise(left), tmpPromise(right)))
+ // return result;
+ // if (Inst result = tryThings(tmpPromise(left), loadPromise(right)))
+ // return result;
+ // return Inst(Foo, m_value, tmp(left), tmp(right));
+ //
+ // Notice that we did use tmp in the fall-back case at the end, because by then, we know for sure
+ // that we want a tmp. But using tmpPromise in the tryThings() calls ensures that doing so
+ // doesn't prevent us from trying loadPromise on the same value.
+ Tmp tmp(Value* value)
+ {
+ Tmp& tmp = m_valueToTmp[value];
+ if (!tmp) {
+ while (shouldCopyPropagate(value))
+ value = value->child(0);
+
+ if (value->opcode() == FramePointer)
+ return Tmp(GPRInfo::callFrameRegister);
+
+ Tmp& realTmp = m_valueToTmp[value];
+ if (!realTmp) {
+ realTmp = m_code.newTmp(Arg::typeForB3Type(value->type()));
+ if (m_procedure.isFastConstant(value->key()))
+ m_code.addFastTmp(realTmp);
+ if (verbose)
+ dataLog("Tmp for ", *value, ": ", realTmp, "\n");
+ }
+ tmp = realTmp;
+ }
+ return tmp;
+ }
+
+ ArgPromise tmpPromise(Value* value)
+ {
+ return ArgPromise::tmp(value);
+ }
+
+ bool canBeInternal(Value* value)
+ {
+ // If one of the internal things has already been computed, then we don't want to cause
+ // it to be recomputed again.
+ if (m_valueToTmp[value])
+ return false;
+
+ // We require internals to have only one use - us. It's not clear if this should be numUses() or
+ // numUsingInstructions(). Ideally, it would be numUsingInstructions(), except that it's not clear
+ // if we'd actually do the right thing when matching over such a DAG pattern. For now, it simply
+ // doesn't matter because we don't implement patterns that would trigger this.
+ if (m_useCounts.numUses(value) != 1)
+ return false;
+
+ return true;
+ }
+
+ // If you ask canBeInternal() and then construct something from that, and you commit to emitting
+ // that code, then you must commitInternal() on that value. This is tricky, and you only need to
+ // do it if you're pattern matching by hand rather than using the patterns language. Long story
+ // short, you should avoid this by using the pattern matcher to match patterns.
+ void commitInternal(Value* value)
+ {
+ if (value)
+ m_locked.add(value);
+ }
+
+ bool crossesInterference(Value* value)
+ {
+ // If it's in a foreign block, then be conservative. We could handle this if we were
+ // willing to do heavier analysis. For example, if we had liveness, then we could label
+ // values as "crossing interference" if they interfere with anything that they are live
+ // across. But, it's not clear how useful this would be.
+ if (value->owner != m_value->owner)
+ return true;
+
+ Effects effects = value->effects();
+
+ for (unsigned i = m_index; i--;) {
+ Value* otherValue = m_block->at(i);
+ if (otherValue == value)
+ return false;
+ if (effects.interferes(otherValue->effects()))
+ return true;
+ }
+
+ ASSERT_NOT_REACHED();
+ return true;
+ }
+
+ std::optional<unsigned> scaleForShl(Value* shl, int32_t offset, std::optional<Arg::Width> width = std::nullopt)
+ {
+ if (shl->opcode() != Shl)
+ return std::nullopt;
+ if (!shl->child(1)->hasInt32())
+ return std::nullopt;
+ unsigned logScale = shl->child(1)->asInt32();
+ if (shl->type() == Int32)
+ logScale &= 31;
+ else
+ logScale &= 63;
+ // Use 64-bit math to perform the shift so that <<32 does the right thing, but then switch
+ // to signed since that's what all of our APIs want.
+ int64_t bigScale = static_cast<uint64_t>(1) << static_cast<uint64_t>(logScale);
+ if (!isRepresentableAs<int32_t>(bigScale))
+ return std::nullopt;
+ unsigned scale = static_cast<int32_t>(bigScale);
+ if (!Arg::isValidIndexForm(scale, offset, width))
+ return std::nullopt;
+ return scale;
+ }
+
+ // This turns the given operand into an address.
+ Arg effectiveAddr(Value* address, int32_t offset, Arg::Width width)
+ {
+ ASSERT(Arg::isValidAddrForm(offset, width));
+
+ auto fallback = [&] () -> Arg {
+ return Arg::addr(tmp(address), offset);
+ };
+
+ static const unsigned lotsOfUses = 10; // This is arbitrary and we should tune it eventually.
+
+ // Only match if the address value isn't used in some large number of places.
+ if (m_useCounts.numUses(address) > lotsOfUses)
+ return fallback();
+
+ switch (address->opcode()) {
+ case Add: {
+ Value* left = address->child(0);
+ Value* right = address->child(1);
+
+ auto tryIndex = [&] (Value* index, Value* base) -> Arg {
+ std::optional<unsigned> scale = scaleForShl(index, offset, width);
+ if (!scale)
+ return Arg();
+ if (m_locked.contains(index->child(0)) || m_locked.contains(base))
+ return Arg();
+ return Arg::index(tmp(base), tmp(index->child(0)), *scale, offset);
+ };
+
+ if (Arg result = tryIndex(left, right))
+ return result;
+ if (Arg result = tryIndex(right, left))
+ return result;
+
+ if (m_locked.contains(left) || m_locked.contains(right)
+ || !Arg::isValidIndexForm(1, offset, width))
+ return fallback();
+
+ return Arg::index(tmp(left), tmp(right), 1, offset);
+ }
+
+ case Shl: {
+ Value* left = address->child(0);
+
+ // We'll never see child(1)->isInt32(0), since that would have been reduced. If the shift
+ // amount is greater than 1, then there isn't really anything smart that we could do here.
+ // We avoid using baseless indexes because their encoding isn't particularly efficient.
+ if (m_locked.contains(left) || !address->child(1)->isInt32(1)
+ || !Arg::isValidIndexForm(1, offset, width))
+ return fallback();
+
+ return Arg::index(tmp(left), tmp(left), 1, offset);
+ }
+
+ case FramePointer:
+ return Arg::addr(Tmp(GPRInfo::callFrameRegister), offset);
+
+ case SlotBase:
+ return Arg::stack(m_stackToStack.get(address->as<SlotBaseValue>()->slot()), offset);
+
+ case WasmAddress: {
+ WasmAddressValue* wasmAddress = address->as<WasmAddressValue>();
+ Value* pointer = wasmAddress->child(0);
+ ASSERT(Arg::isValidIndexForm(1, offset, width));
+ if (m_locked.contains(pointer))
+ return fallback();
+
+ // FIXME: We should support ARM64 LDR 32-bit addressing, which will
+ // allow us to fuse a Shl ptr, 2 into the address. Additionally, and
+ // perhaps more importantly, it would allow us to avoid a truncating
+ // move. See: https://bugs.webkit.org/show_bug.cgi?id=163465
+
+ return Arg::index(Tmp(wasmAddress->pinnedGPR()), tmp(pointer), 1, offset);
+ }
+
+ default:
+ return fallback();
+ }
+ }
+
+ // This gives you the address of the given Load or Store. If it's not a Load or Store, then
+ // it returns Arg().
+ Arg addr(Value* memoryValue)
+ {
+ MemoryValue* value = memoryValue->as<MemoryValue>();
+ if (!value)
+ return Arg();
+
+ int32_t offset = value->offset();
+ Arg::Width width = Arg::widthForBytes(value->accessByteSize());
+
+ Arg result = effectiveAddr(value->lastChild(), offset, width);
+ ASSERT(result.isValidForm(width));
+
+ return result;
+ }
+
+ template<typename... Args>
+ Inst trappingInst(bool traps, Args&&... args)
+ {
+ Inst result(std::forward<Args>(args)...);
+ result.kind.traps |= traps;
+ return result;
+ }
+
+ template<typename... Args>
+ Inst trappingInst(Value* value, Args&&... args)
+ {
+ return trappingInst(value->traps(), std::forward<Args>(args)...);
+ }
+
+ ArgPromise loadPromiseAnyOpcode(Value* loadValue)
+ {
+ if (!canBeInternal(loadValue))
+ return Arg();
+ if (crossesInterference(loadValue))
+ return Arg();
+ ArgPromise result(addr(loadValue), loadValue);
+ if (loadValue->traps())
+ result.setTraps(true);
+ return result;
+ }
+
+ ArgPromise loadPromise(Value* loadValue, B3::Opcode loadOpcode)
+ {
+ if (loadValue->opcode() != loadOpcode)
+ return Arg();
+ return loadPromiseAnyOpcode(loadValue);
+ }
+
+ ArgPromise loadPromise(Value* loadValue)
+ {
+ return loadPromise(loadValue, Load);
+ }
+
+ Arg imm(int64_t intValue)
+ {
+ if (Arg::isValidImmForm(intValue))
+ return Arg::imm(intValue);
+ return Arg();
+ }
+
+ Arg imm(Value* value)
+ {
+ if (value->hasInt())
+ return imm(value->asInt());
+ return Arg();
+ }
+
+ Arg bitImm(Value* value)
+ {
+ if (value->hasInt()) {
+ int64_t intValue = value->asInt();
+ if (Arg::isValidBitImmForm(intValue))
+ return Arg::bitImm(intValue);
+ }
+ return Arg();
+ }
+
+ Arg bitImm64(Value* value)
+ {
+ if (value->hasInt()) {
+ int64_t intValue = value->asInt();
+ if (Arg::isValidBitImm64Form(intValue))
+ return Arg::bitImm64(intValue);
+ }
+ return Arg();
+ }
+
+ Arg immOrTmp(Value* value)
+ {
+ if (Arg result = imm(value))
+ return result;
+ return tmp(value);
+ }
+
+ // By convention, we use Oops to mean "I don't know".
+ Air::Opcode tryOpcodeForType(
+ Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type)
+ {
+ Air::Opcode opcode;
+ switch (type) {
+ case Int32:
+ opcode = opcode32;
+ break;
+ case Int64:
+ opcode = opcode64;
+ break;
+ case Float:
+ opcode = opcodeFloat;
+ break;
+ case Double:
+ opcode = opcodeDouble;
+ break;
+ default:
+ opcode = Air::Oops;
+ break;
+ }
+
+ return opcode;
+ }
+
+ Air::Opcode tryOpcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type)
+ {
+ return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type);
+ }
+
+ Air::Opcode opcodeForType(
+ Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type)
+ {
+ Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, type);
+ RELEASE_ASSERT(opcode != Air::Oops);
+ return opcode;
+ }
+
+ Air::Opcode opcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type)
+ {
+ return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type);
+ }
+
+ template<Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble = Air::Oops, Air::Opcode opcodeFloat = Air::Oops>
+ void appendUnOp(Value* value)
+ {
+ Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, value->type());
+
+ Tmp result = tmp(m_value);
+
+ // Two operand forms like:
+ // Op a, b
+ // mean something like:
+ // b = Op a
+
+ ArgPromise addr = loadPromise(value);
+ if (isValidForm(opcode, addr.kind(), Arg::Tmp)) {
+ append(addr.inst(opcode, m_value, addr.consume(*this), result));
+ return;
+ }
+
+ if (isValidForm(opcode, Arg::Tmp, Arg::Tmp)) {
+ append(opcode, tmp(value), result);
+ return;
+ }
+
+ ASSERT(value->type() == m_value->type());
+ append(relaxedMoveForType(m_value->type()), tmp(value), result);
+ append(opcode, result);
+ }
+
+ // Call this method when doing two-operand lowering of a commutative operation. You have a choice of
+ // which incoming Value is moved into the result. This will select which one is likely to be most
+ // profitable to use as the result. Doing the right thing can have big performance consequences in tight
+ // kernels.
+ bool preferRightForResult(Value* left, Value* right)
+ {
+ // The default is to move left into result, because that's required for non-commutative instructions.
+ // The value that we want to move into result position is the one that dies here. So, if we're
+ // compiling a commutative operation and we know that actually right is the one that dies right here,
+ // then we can flip things around to help coalescing, which then kills the move instruction.
+ //
+ // But it's more complicated:
+ // - Used-once is a bad estimate of whether the variable dies here.
+ // - A child might be a candidate for coalescing with this value.
+ //
+ // Currently, we have machinery in place to recognize super obvious forms of the latter issue.
+
+ // We recognize when a child is a Phi that has this value as one of its children. We're very
+ // conservative about this; for example we don't even consider transitive Phi children.
+ bool leftIsPhiWithThis = m_phiChildren[left].transitivelyUses(m_value);
+ bool rightIsPhiWithThis = m_phiChildren[right].transitivelyUses(m_value);
+
+ if (leftIsPhiWithThis != rightIsPhiWithThis)
+ return rightIsPhiWithThis;
+
+ if (m_useCounts.numUsingInstructions(right) != 1)
+ return false;
+
+ if (m_useCounts.numUsingInstructions(left) != 1)
+ return true;
+
+ // The use count might be 1 if the variable is live around a loop. We can guarantee that we
+ // pick the the variable that is least likely to suffer this problem if we pick the one that
+ // is closest to us in an idom walk. By convention, we slightly bias this in favor of
+ // returning true.
+
+ // We cannot prefer right if right is further away in an idom walk.
+ if (m_dominators.strictlyDominates(right->owner, left->owner))
+ return false;
+
+ return true;
+ }
+
+ template<Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Commutativity commutativity = NotCommutative>
+ void appendBinOp(Value* left, Value* right)
+ {
+ Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, left->type());
+
+ Tmp result = tmp(m_value);
+
+ // Three-operand forms like:
+ // Op a, b, c
+ // mean something like:
+ // c = a Op b
+
+ if (isValidForm(opcode, Arg::Imm, Arg::Tmp, Arg::Tmp)) {
+ if (commutativity == Commutative) {
+ if (imm(right)) {
+ append(opcode, imm(right), tmp(left), result);
+ return;
+ }
+ } else {
+ // A non-commutative operation could have an immediate in left.
+ if (imm(left)) {
+ append(opcode, imm(left), tmp(right), result);
+ return;
+ }
+ }
+ }
+
+ if (isValidForm(opcode, Arg::BitImm, Arg::Tmp, Arg::Tmp)) {
+ if (commutativity == Commutative) {
+ if (Arg rightArg = bitImm(right)) {
+ append(opcode, rightArg, tmp(left), result);
+ return;
+ }
+ } else {
+ // A non-commutative operation could have an immediate in left.
+ if (Arg leftArg = bitImm(left)) {
+ append(opcode, leftArg, tmp(right), result);
+ return;
+ }
+ }
+ }
+
+ if (isValidForm(opcode, Arg::BitImm64, Arg::Tmp, Arg::Tmp)) {
+ if (commutativity == Commutative) {
+ if (Arg rightArg = bitImm64(right)) {
+ append(opcode, rightArg, tmp(left), result);
+ return;
+ }
+ } else {
+ // A non-commutative operation could have an immediate in left.
+ if (Arg leftArg = bitImm64(left)) {
+ append(opcode, leftArg, tmp(right), result);
+ return;
+ }
+ }
+ }
+
+ if (imm(right) && isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
+ append(opcode, tmp(left), imm(right), result);
+ return;
+ }
+
+ // Note that no extant architecture has a three-operand form of binary operations that also
+ // load from memory. If such an abomination did exist, we would handle it somewhere around
+ // here.
+
+ // Two-operand forms like:
+ // Op a, b
+ // mean something like:
+ // b = b Op a
+
+ // At this point, we prefer versions of the operation that have a fused load or an immediate
+ // over three operand forms.
+
+ if (left != right) {
+ ArgPromise leftAddr = loadPromise(left);
+ if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp, Arg::Tmp)) {
+ append(leftAddr.inst(opcode, m_value, leftAddr.consume(*this), tmp(right), result));
+ return;
+ }
+
+ if (commutativity == Commutative) {
+ if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp)) {
+ append(relaxedMoveForType(m_value->type()), tmp(right), result);
+ append(leftAddr.inst(opcode, m_value, leftAddr.consume(*this), result));
+ return;
+ }
+ }
+
+ ArgPromise rightAddr = loadPromise(right);
+ if (isValidForm(opcode, Arg::Tmp, rightAddr.kind(), Arg::Tmp)) {
+ append(rightAddr.inst(opcode, m_value, tmp(left), rightAddr.consume(*this), result));
+ return;
+ }
+
+ if (commutativity == Commutative) {
+ if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp, Arg::Tmp)) {
+ append(rightAddr.inst(opcode, m_value, rightAddr.consume(*this), tmp(left), result));
+ return;
+ }
+ }
+
+ if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp)) {
+ append(relaxedMoveForType(m_value->type()), tmp(left), result);
+ append(rightAddr.inst(opcode, m_value, rightAddr.consume(*this), result));
+ return;
+ }
+ }
+
+ if (imm(right) && isValidForm(opcode, Arg::Imm, Arg::Tmp)) {
+ append(relaxedMoveForType(m_value->type()), tmp(left), result);
+ append(opcode, imm(right), result);
+ return;
+ }
+
+ if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+ append(opcode, tmp(left), tmp(right), result);
+ return;
+ }
+
+ if (commutativity == Commutative && preferRightForResult(left, right)) {
+ append(relaxedMoveForType(m_value->type()), tmp(right), result);
+ append(opcode, tmp(left), result);
+ return;
+ }
+
+ append(relaxedMoveForType(m_value->type()), tmp(left), result);
+ append(opcode, tmp(right), result);
+ }
+
+ template<Air::Opcode opcode32, Air::Opcode opcode64, Commutativity commutativity = NotCommutative>
+ void appendBinOp(Value* left, Value* right)
+ {
+ appendBinOp<opcode32, opcode64, Air::Oops, Air::Oops, commutativity>(left, right);
+ }
+
+ template<Air::Opcode opcode32, Air::Opcode opcode64>
+ void appendShift(Value* value, Value* amount)
+ {
+ Air::Opcode opcode = opcodeForType(opcode32, opcode64, value->type());
+
+ if (imm(amount)) {
+ if (isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
+ append(opcode, tmp(value), imm(amount), tmp(m_value));
+ return;
+ }
+ if (isValidForm(opcode, Arg::Imm, Arg::Tmp)) {
+ append(Move, tmp(value), tmp(m_value));
+ append(opcode, imm(amount), tmp(m_value));
+ return;
+ }
+ }
+
+ if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+ append(opcode, tmp(value), tmp(amount), tmp(m_value));
+ return;
+ }
+
+#if CPU(X86) || CPU(X86_64)
+ append(Move, tmp(value), tmp(m_value));
+ append(Move, tmp(amount), Tmp(X86Registers::ecx));
+ append(opcode, Tmp(X86Registers::ecx), tmp(m_value));
+#endif
+ }
+
+ template<Air::Opcode opcode32, Air::Opcode opcode64>
+ bool tryAppendStoreUnOp(Value* value)
+ {
+ Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, value->type());
+ if (opcode == Air::Oops)
+ return false;
+
+ Arg storeAddr = addr(m_value);
+ ASSERT(storeAddr);
+
+ ArgPromise loadPromise = this->loadPromise(value);
+ if (loadPromise.peek() != storeAddr)
+ return false;
+
+ if (!isValidForm(opcode, storeAddr.kind()))
+ return false;
+
+ loadPromise.consume(*this);
+ append(trappingInst(m_value, loadPromise.inst(opcode, m_value, storeAddr)));
+ return true;
+ }
+
+ template<
+ Air::Opcode opcode32, Air::Opcode opcode64, Commutativity commutativity = NotCommutative>
+ bool tryAppendStoreBinOp(Value* left, Value* right)
+ {
+ Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, left->type());
+ if (opcode == Air::Oops)
+ return false;
+
+ Arg storeAddr = addr(m_value);
+ ASSERT(storeAddr);
+
+ auto getLoadPromise = [&] (Value* load) -> ArgPromise {
+ switch (m_value->opcode()) {
+ case B3::Store:
+ if (load->opcode() != B3::Load)
+ return ArgPromise();
+ break;
+ case B3::Store8:
+ if (load->opcode() != B3::Load8Z && load->opcode() != B3::Load8S)
+ return ArgPromise();
+ break;
+ case B3::Store16:
+ if (load->opcode() != B3::Load16Z && load->opcode() != B3::Load16S)
+ return ArgPromise();
+ break;
+ default:
+ return ArgPromise();
+ }
+ return loadPromiseAnyOpcode(load);
+ };
+
+ ArgPromise loadPromise;
+ Value* otherValue = nullptr;
+
+ loadPromise = getLoadPromise(left);
+ if (loadPromise.peek() == storeAddr)
+ otherValue = right;
+ else if (commutativity == Commutative) {
+ loadPromise = getLoadPromise(right);
+ if (loadPromise.peek() == storeAddr)
+ otherValue = left;
+ }
+
+ if (!otherValue)
+ return false;
+
+ if (isValidForm(opcode, Arg::Imm, storeAddr.kind()) && imm(otherValue)) {
+ loadPromise.consume(*this);
+ append(trappingInst(m_value, loadPromise.inst(opcode, m_value, imm(otherValue), storeAddr)));
+ return true;
+ }
+
+ if (!isValidForm(opcode, Arg::Tmp, storeAddr.kind()))
+ return false;
+
+ loadPromise.consume(*this);
+ append(trappingInst(m_value, loadPromise.inst(opcode, m_value, tmp(otherValue), storeAddr)));
+ return true;
+ }
+
+ Inst createStore(Air::Opcode move, Value* value, const Arg& dest)
+ {
+ if (imm(value) && isValidForm(move, Arg::Imm, dest.kind()))
+ return Inst(move, m_value, imm(value), dest);
+
+ return Inst(move, m_value, tmp(value), dest);
+ }
+
+ Inst createStore(Value* value, const Arg& dest)
+ {
+ Air::Opcode moveOpcode = moveForType(value->type());
+ return createStore(moveOpcode, value, dest);
+ }
+
+ template<typename... Args>
+ void appendStore(Args&&... args)
+ {
+ append(trappingInst(m_value, createStore(std::forward<Args>(args)...)));
+ }
+
+ Air::Opcode moveForType(Type type)
+ {
+ switch (type) {
+ case Int32:
+ return Move32;
+ case Int64:
+ RELEASE_ASSERT(is64Bit());
+ return Move;
+ case Float:
+ return MoveFloat;
+ case Double:
+ return MoveDouble;
+ case Void:
+ break;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return Air::Oops;
+ }
+
+ Air::Opcode relaxedMoveForType(Type type)
+ {
+ switch (type) {
+ case Int32:
+ case Int64:
+ // For Int32, we could return Move or Move32. It's a trade-off.
+ //
+ // Move32: Using Move32 guarantees that we use the narrower move, but in cases where the
+ // register allocator can't prove that the variables involved are 32-bit, this will
+ // disable coalescing.
+ //
+ // Move: Using Move guarantees that the register allocator can coalesce normally, but in
+ // cases where it can't prove that the variables are 32-bit and it doesn't coalesce,
+ // this will force us to use a full 64-bit Move instead of the slightly cheaper
+ // 32-bit Move32.
+ //
+ // Coalescing is a lot more profitable than turning Move into Move32. So, it's better to
+ // use Move here because in cases where the register allocator cannot prove that
+ // everything is 32-bit, we still get coalescing.
+ return Move;
+ case Float:
+ // MoveFloat is always coalescable and we never convert MoveDouble to MoveFloat, so we
+ // should use MoveFloat when we know that the temporaries involved are 32-bit.
+ return MoveFloat;
+ case Double:
+ return MoveDouble;
+ case Void:
+ break;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return Air::Oops;
+ }
+
+ template<typename... Arguments>
+ void append(Air::Opcode opcode, Arguments&&... arguments)
+ {
+ m_insts.last().append(Inst(opcode, m_value, std::forward<Arguments>(arguments)...));
+ }
+
+ void append(Inst&& inst)
+ {
+ m_insts.last().append(WTFMove(inst));
+ }
+ void append(const Inst& inst)
+ {
+ m_insts.last().append(inst);
+ }
+
+ template<typename T, typename... Arguments>
+ T* ensureSpecial(T*& field, Arguments&&... arguments)
+ {
+ if (!field) {
+ field = static_cast<T*>(
+ m_code.addSpecial(std::make_unique<T>(std::forward<Arguments>(arguments)...)));
+ }
+ return field;
+ }
+
+ template<typename... Arguments>
+ CheckSpecial* ensureCheckSpecial(Arguments&&... arguments)
+ {
+ CheckSpecial::Key key(std::forward<Arguments>(arguments)...);
+ auto result = m_checkSpecials.add(key, nullptr);
+ return ensureSpecial(result.iterator->value, key);
+ }
+
+ void fillStackmap(Inst& inst, StackmapValue* stackmap, unsigned numSkipped)
+ {
+ for (unsigned i = numSkipped; i < stackmap->numChildren(); ++i) {
+ ConstrainedValue value = stackmap->constrainedChild(i);
+
+ Arg arg;
+ switch (value.rep().kind()) {
+ case ValueRep::WarmAny:
+ case ValueRep::ColdAny:
+ case ValueRep::LateColdAny:
+ if (imm(value.value()))
+ arg = imm(value.value());
+ else if (value.value()->hasInt64())
+ arg = Arg::bigImm(value.value()->asInt64());
+ else if (value.value()->hasDouble() && canBeInternal(value.value())) {
+ commitInternal(value.value());
+ arg = Arg::bigImm(bitwise_cast<int64_t>(value.value()->asDouble()));
+ } else
+ arg = tmp(value.value());
+ break;
+ case ValueRep::SomeRegister:
+ arg = tmp(value.value());
+ break;
+ case ValueRep::LateRegister:
+ case ValueRep::Register:
+ stackmap->earlyClobbered().clear(value.rep().reg());
+ arg = Tmp(value.rep().reg());
+ append(relaxedMoveForType(value.value()->type()), immOrTmp(value.value()), arg);
+ break;
+ case ValueRep::StackArgument:
+ arg = Arg::callArg(value.rep().offsetFromSP());
+ appendStore(value.value(), arg);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ inst.args.append(arg);
+ }
+ }
+
+ // Create an Inst to do the comparison specified by the given value.
+ template<typename CompareFunctor, typename TestFunctor, typename CompareDoubleFunctor, typename CompareFloatFunctor>
+ Inst createGenericCompare(
+ Value* value,
+ const CompareFunctor& compare, // Signature: (Arg::Width, Arg relCond, Arg, Arg) -> Inst
+ const TestFunctor& test, // Signature: (Arg::Width, Arg resCond, Arg, Arg) -> Inst
+ const CompareDoubleFunctor& compareDouble, // Signature: (Arg doubleCond, Arg, Arg) -> Inst
+ const CompareFloatFunctor& compareFloat, // Signature: (Arg doubleCond, Arg, Arg) -> Inst
+ bool inverted = false)
+ {
+ // NOTE: This is totally happy to match comparisons that have already been computed elsewhere
+ // since on most architectures, the cost of branching on a previously computed comparison
+ // result is almost always higher than just doing another fused compare/branch. The only time
+ // it could be worse is if we have a binary comparison and both operands are variables (not
+ // constants), and we encounter register pressure. Even in this case, duplicating the compare
+ // so that we can fuse it to the branch will be more efficient most of the time, since
+ // register pressure is not *that* common. For this reason, this algorithm will always
+ // duplicate the comparison.
+ //
+ // However, we cannot duplicate loads. The canBeInternal() on a load will assume that we
+ // already validated canBeInternal() on all of the values that got us to the load. So, even
+ // if we are sharing a value, we still need to call canBeInternal() for the purpose of
+ // tracking whether we are still in good shape to fuse loads.
+ //
+ // We could even have a chain of compare values that we fuse, and any member of the chain
+ // could be shared. Once any of them are shared, then the shared one's transitive children
+ // cannot be locked (i.e. commitInternal()). But if none of them are shared, then we want to
+ // lock all of them because that's a prerequisite to fusing the loads so that the loads don't
+ // get duplicated. For example, we might have:
+ //
+ // @tmp1 = LessThan(@a, @b)
+ // @tmp2 = Equal(@tmp1, 0)
+ // Branch(@tmp2)
+ //
+ // If either @a or @b are loads, then we want to have locked @tmp1 and @tmp2 so that they
+ // don't emit the loads a second time. But if we had another use of @tmp2, then we cannot
+ // lock @tmp1 (or @a or @b) because then we'll get into trouble when the other values that
+ // try to share @tmp1 with us try to do their lowering.
+ //
+ // There's one more wrinkle. If we don't lock an internal value, then this internal value may
+ // have already separately locked its children. So, if we're not locking a value then we need
+ // to make sure that its children aren't locked. We encapsulate this in two ways:
+ //
+ // canCommitInternal: This variable tells us if the values that we've fused so far are
+ // locked. This means that we're not sharing any of them with anyone. This permits us to fuse
+ // loads. If it's false, then we cannot fuse loads and we also need to ensure that the
+ // children of any values we try to fuse-by-sharing are not already locked. You don't have to
+ // worry about the children locking thing if you use prepareToFuse() before trying to fuse a
+ // sharable value. But, you do need to guard any load fusion by checking if canCommitInternal
+ // is true.
+ //
+ // FusionResult prepareToFuse(value): Call this when you think that you would like to fuse
+ // some value and that value is not a load. It will automatically handle the shared-or-locked
+ // issues and it will clear canCommitInternal if necessary. This will return CannotFuse
+ // (which acts like false) if the value cannot be locked and its children are locked. That's
+ // rare, but you just need to make sure that you do smart things when this happens (i.e. just
+ // use the value rather than trying to fuse it). After you call prepareToFuse(), you can
+ // still change your mind about whether you will actually fuse the value. If you do fuse it,
+ // you need to call commitFusion(value, fusionResult).
+ //
+ // commitFusion(value, fusionResult): Handles calling commitInternal(value) if fusionResult
+ // is FuseAndCommit.
+
+ bool canCommitInternal = true;
+
+ enum FusionResult {
+ CannotFuse,
+ FuseAndCommit,
+ Fuse
+ };
+ auto prepareToFuse = [&] (Value* value) -> FusionResult {
+ if (value == m_value) {
+ // It's not actually internal. It's the root value. We're good to go.
+ return Fuse;
+ }
+
+ if (canCommitInternal && canBeInternal(value)) {
+ // We are the only users of this value. This also means that the value's children
+ // could not have been locked, since we have now proved that m_value dominates value
+ // in the data flow graph. To only other way to value is from a user of m_value. If
+ // value's children are shared with others, then they could not have been locked
+ // because their use count is greater than 1. If they are only used from value, then
+ // in order for value's children to be locked, value would also have to be locked,
+ // and we just proved that it wasn't.
+ return FuseAndCommit;
+ }
+
+ // We're going to try to share value with others. It's possible that some other basic
+ // block had already emitted code for value and then matched over its children and then
+ // locked them, in which case we just want to use value instead of duplicating it. So, we
+ // validate the children. Note that this only arises in linear chains like:
+ //
+ // BB#1:
+ // @1 = Foo(...)
+ // @2 = Bar(@1)
+ // Jump(#2)
+ // BB#2:
+ // @3 = Baz(@2)
+ //
+ // Notice how we could start by generating code for BB#1 and then decide to lock @1 when
+ // generating code for @2, if we have some way of fusing Bar and Foo into a single
+ // instruction. This is legal, since indeed @1 only has one user. The fact that @2 now
+ // has a tmp (i.e. @2 is pinned), canBeInternal(@2) will return false, which brings us
+ // here. In that case, we cannot match over @2 because then we'd hit a hazard if we end
+ // up deciding not to fuse Foo into the fused Baz/Bar.
+ //
+ // Happily, there are only two places where this kind of child validation happens is in
+ // rules that admit sharing, like this and effectiveAddress().
+ //
+ // N.B. We could probably avoid the need to do value locking if we committed to a well
+ // chosen code generation order. For example, if we guaranteed that all of the users of
+ // a value get generated before that value, then there's no way for the lowering of @3 to
+ // see @1 locked. But we don't want to do that, since this is a greedy instruction
+ // selector and so we want to be able to play with order.
+ for (Value* child : value->children()) {
+ if (m_locked.contains(child))
+ return CannotFuse;
+ }
+
+ // It's safe to share value, but since we're sharing, it means that we aren't locking it.
+ // If we don't lock it, then fusing loads is off limits and all of value's children will
+ // have to go through the sharing path as well.
+ canCommitInternal = false;
+
+ return Fuse;
+ };
+
+ auto commitFusion = [&] (Value* value, FusionResult result) {
+ if (result == FuseAndCommit)
+ commitInternal(value);
+ };
+
+ // Chew through any inversions. This loop isn't necessary for comparisons and branches, but
+ // we do need at least one iteration of it for Check.
+ for (;;) {
+ bool shouldInvert =
+ (value->opcode() == BitXor && value->child(1)->hasInt() && (value->child(1)->asInt() & 1) && value->child(0)->returnsBool())
+ || (value->opcode() == Equal && value->child(1)->isInt(0));
+ if (!shouldInvert)
+ break;
+
+ FusionResult fusionResult = prepareToFuse(value);
+ if (fusionResult == CannotFuse)
+ break;
+ commitFusion(value, fusionResult);
+
+ value = value->child(0);
+ inverted = !inverted;
+ }
+
+ auto createRelCond = [&] (
+ MacroAssembler::RelationalCondition relationalCondition,
+ MacroAssembler::DoubleCondition doubleCondition) {
+ Arg relCond = Arg::relCond(relationalCondition).inverted(inverted);
+ Arg doubleCond = Arg::doubleCond(doubleCondition).inverted(inverted);
+ Value* left = value->child(0);
+ Value* right = value->child(1);
+
+ if (isInt(value->child(0)->type())) {
+ // FIXME: We wouldn't have to worry about leftImm if we canonicalized integer
+ // comparisons.
+ // https://bugs.webkit.org/show_bug.cgi?id=150958
+
+ Arg leftImm = imm(left);
+ Arg rightImm = imm(right);
+
+ auto tryCompare = [&] (
+ Arg::Width width, ArgPromise&& left, ArgPromise&& right) -> Inst {
+ if (Inst result = compare(width, relCond, left, right))
+ return result;
+ if (Inst result = compare(width, relCond.flipped(), right, left))
+ return result;
+ return Inst();
+ };
+
+ auto tryCompareLoadImm = [&] (
+ Arg::Width width, B3::Opcode loadOpcode, Arg::Signedness signedness) -> Inst {
+ if (rightImm && rightImm.isRepresentableAs(width, signedness)) {
+ if (Inst result = tryCompare(width, loadPromise(left, loadOpcode), rightImm)) {
+ commitInternal(left);
+ return result;
+ }
+ }
+ if (leftImm && leftImm.isRepresentableAs(width, signedness)) {
+ if (Inst result = tryCompare(width, leftImm, loadPromise(right, loadOpcode))) {
+ commitInternal(right);
+ return result;
+ }
+ }
+ return Inst();
+ };
+
+ Arg::Width width = Arg::widthForB3Type(value->child(0)->type());
+
+ if (canCommitInternal) {
+ // First handle compares that involve fewer bits than B3's type system supports.
+ // This is pretty important. For example, we want this to be a single
+ // instruction:
+ //
+ // @1 = Load8S(...)
+ // @2 = Const32(...)
+ // @3 = LessThan(@1, @2)
+ // Branch(@3)
+
+ if (relCond.isSignedCond()) {
+ if (Inst result = tryCompareLoadImm(Arg::Width8, Load8S, Arg::Signed))
+ return result;
+ }
+
+ if (relCond.isUnsignedCond()) {
+ if (Inst result = tryCompareLoadImm(Arg::Width8, Load8Z, Arg::Unsigned))
+ return result;
+ }
+
+ if (relCond.isSignedCond()) {
+ if (Inst result = tryCompareLoadImm(Arg::Width16, Load16S, Arg::Signed))
+ return result;
+ }
+
+ if (relCond.isUnsignedCond()) {
+ if (Inst result = tryCompareLoadImm(Arg::Width16, Load16Z, Arg::Unsigned))
+ return result;
+ }
+
+ // Now handle compares that involve a load and an immediate.
+
+ if (Inst result = tryCompareLoadImm(width, Load, Arg::Signed))
+ return result;
+
+ // Now handle compares that involve a load. It's not obvious that it's better to
+ // handle this before the immediate cases or not. Probably doesn't matter.
+
+ if (Inst result = tryCompare(width, loadPromise(left), tmpPromise(right))) {
+ commitInternal(left);
+ return result;
+ }
+
+ if (Inst result = tryCompare(width, tmpPromise(left), loadPromise(right))) {
+ commitInternal(right);
+ return result;
+ }
+ }
+
+ // Now handle compares that involve an immediate and a tmp.
+
+ if (leftImm && leftImm.isRepresentableAs<int32_t>()) {
+ if (Inst result = tryCompare(width, leftImm, tmpPromise(right)))
+ return result;
+ }
+
+ if (rightImm && rightImm.isRepresentableAs<int32_t>()) {
+ if (Inst result = tryCompare(width, tmpPromise(left), rightImm))
+ return result;
+ }
+
+ // Finally, handle comparison between tmps.
+ ArgPromise leftPromise = tmpPromise(left);
+ ArgPromise rightPromise = tmpPromise(right);
+ return compare(width, relCond, leftPromise, rightPromise);
+ }
+
+ // Floating point comparisons can't really do anything smart.
+ ArgPromise leftPromise = tmpPromise(left);
+ ArgPromise rightPromise = tmpPromise(right);
+ if (value->child(0)->type() == Float)
+ return compareFloat(doubleCond, leftPromise, rightPromise);
+ return compareDouble(doubleCond, leftPromise, rightPromise);
+ };
+
+ Arg::Width width = Arg::widthForB3Type(value->type());
+ Arg resCond = Arg::resCond(MacroAssembler::NonZero).inverted(inverted);
+
+ auto tryTest = [&] (
+ Arg::Width width, ArgPromise&& left, ArgPromise&& right) -> Inst {
+ if (Inst result = test(width, resCond, left, right))
+ return result;
+ if (Inst result = test(width, resCond, right, left))
+ return result;
+ return Inst();
+ };
+
+ auto attemptFused = [&] () -> Inst {
+ switch (value->opcode()) {
+ case NotEqual:
+ return createRelCond(MacroAssembler::NotEqual, MacroAssembler::DoubleNotEqualOrUnordered);
+ case Equal:
+ return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqual);
+ case LessThan:
+ return createRelCond(MacroAssembler::LessThan, MacroAssembler::DoubleLessThan);
+ case GreaterThan:
+ return createRelCond(MacroAssembler::GreaterThan, MacroAssembler::DoubleGreaterThan);
+ case LessEqual:
+ return createRelCond(MacroAssembler::LessThanOrEqual, MacroAssembler::DoubleLessThanOrEqual);
+ case GreaterEqual:
+ return createRelCond(MacroAssembler::GreaterThanOrEqual, MacroAssembler::DoubleGreaterThanOrEqual);
+ case EqualOrUnordered:
+ // The integer condition is never used in this case.
+ return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqualOrUnordered);
+ case Above:
+ // We use a bogus double condition because these integer comparisons won't got down that
+ // path anyway.
+ return createRelCond(MacroAssembler::Above, MacroAssembler::DoubleEqual);
+ case Below:
+ return createRelCond(MacroAssembler::Below, MacroAssembler::DoubleEqual);
+ case AboveEqual:
+ return createRelCond(MacroAssembler::AboveOrEqual, MacroAssembler::DoubleEqual);
+ case BelowEqual:
+ return createRelCond(MacroAssembler::BelowOrEqual, MacroAssembler::DoubleEqual);
+ case BitAnd: {
+ Value* left = value->child(0);
+ Value* right = value->child(1);
+
+ bool hasRightConst;
+ int64_t rightConst;
+ Arg rightImm;
+ Arg rightImm64;
+
+ hasRightConst = right->hasInt();
+ if (hasRightConst) {
+ rightConst = right->asInt();
+ rightImm = bitImm(right);
+ rightImm64 = bitImm64(right);
+ }
+
+ auto tryTestLoadImm = [&] (Arg::Width width, Arg::Signedness signedness, B3::Opcode loadOpcode) -> Inst {
+ if (!hasRightConst)
+ return Inst();
+ // Signed loads will create high bits, so if the immediate has high bits
+ // then we cannot proceed. Consider BitAnd(Load8S(ptr), 0x101). This cannot
+ // be turned into testb (ptr), $1, since if the high bit within that byte
+ // was set then it would be extended to include 0x100. The handling below
+ // won't anticipate this, so we need to catch it here.
+ if (signedness == Arg::Signed
+ && !Arg::isRepresentableAs(width, Arg::Unsigned, rightConst))
+ return Inst();
+
+ // FIXME: If this is unsigned then we can chop things off of the immediate.
+ // This might make the immediate more legal. Perhaps that's a job for
+ // strength reduction?
+
+ if (rightImm) {
+ if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm)) {
+ commitInternal(left);
+ return result;
+ }
+ }
+ if (rightImm64) {
+ if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm64)) {
+ commitInternal(left);
+ return result;
+ }
+ }
+ return Inst();
+ };
+
+ if (canCommitInternal) {
+ // First handle test's that involve fewer bits than B3's type system supports.
+
+ if (Inst result = tryTestLoadImm(Arg::Width8, Arg::Unsigned, Load8Z))
+ return result;
+
+ if (Inst result = tryTestLoadImm(Arg::Width8, Arg::Signed, Load8S))
+ return result;
+
+ if (Inst result = tryTestLoadImm(Arg::Width16, Arg::Unsigned, Load16Z))
+ return result;
+
+ if (Inst result = tryTestLoadImm(Arg::Width16, Arg::Signed, Load16S))
+ return result;
+
+ // This allows us to use a 32-bit test for 64-bit BitAnd if the immediate is
+ // representable as an unsigned 32-bit value. The logic involved is the same
+ // as if we were pondering using a 32-bit test for
+ // BitAnd(SExt(Load(ptr)), const), in the sense that in both cases we have
+ // to worry about high bits. So, we use the "Signed" version of this helper.
+ if (Inst result = tryTestLoadImm(Arg::Width32, Arg::Signed, Load))
+ return result;
+
+ // This is needed to handle 32-bit test for arbitrary 32-bit immediates.
+ if (Inst result = tryTestLoadImm(width, Arg::Unsigned, Load))
+ return result;
+
+ // Now handle test's that involve a load.
+
+ Arg::Width width = Arg::widthForB3Type(value->child(0)->type());
+ if (Inst result = tryTest(width, loadPromise(left), tmpPromise(right))) {
+ commitInternal(left);
+ return result;
+ }
+
+ if (Inst result = tryTest(width, tmpPromise(left), loadPromise(right))) {
+ commitInternal(right);
+ return result;
+ }
+ }
+
+ // Now handle test's that involve an immediate and a tmp.
+
+ if (hasRightConst) {
+ if ((width == Arg::Width32 && rightConst == 0xffffffff)
+ || (width == Arg::Width64 && rightConst == -1)) {
+ if (Inst result = tryTest(width, tmpPromise(left), tmpPromise(left)))
+ return result;
+ }
+ if (isRepresentableAs<uint32_t>(rightConst)) {
+ if (Inst result = tryTest(Arg::Width32, tmpPromise(left), rightImm))
+ return result;
+ if (Inst result = tryTest(Arg::Width32, tmpPromise(left), rightImm64))
+ return result;
+ }
+ if (Inst result = tryTest(width, tmpPromise(left), rightImm))
+ return result;
+ if (Inst result = tryTest(width, tmpPromise(left), rightImm64))
+ return result;
+ }
+
+ // Finally, just do tmp's.
+ return tryTest(width, tmpPromise(left), tmpPromise(right));
+ }
+ default:
+ return Inst();
+ }
+ };
+
+ if (FusionResult fusionResult = prepareToFuse(value)) {
+ if (Inst result = attemptFused()) {
+ commitFusion(value, fusionResult);
+ return result;
+ }
+ }
+
+ if (Arg::isValidBitImmForm(-1)) {
+ if (canCommitInternal && value->as<MemoryValue>()) {
+ // Handle things like Branch(Load8Z(value))
+
+ if (Inst result = tryTest(Arg::Width8, loadPromise(value, Load8Z), Arg::bitImm(-1))) {
+ commitInternal(value);
+ return result;
+ }
+
+ if (Inst result = tryTest(Arg::Width8, loadPromise(value, Load8S), Arg::bitImm(-1))) {
+ commitInternal(value);
+ return result;
+ }
+
+ if (Inst result = tryTest(Arg::Width16, loadPromise(value, Load16Z), Arg::bitImm(-1))) {
+ commitInternal(value);
+ return result;
+ }
+
+ if (Inst result = tryTest(Arg::Width16, loadPromise(value, Load16S), Arg::bitImm(-1))) {
+ commitInternal(value);
+ return result;
+ }
+
+ if (Inst result = tryTest(width, loadPromise(value), Arg::bitImm(-1))) {
+ commitInternal(value);
+ return result;
+ }
+ }
+
+ ArgPromise leftPromise = tmpPromise(value);
+ ArgPromise rightPromise = Arg::bitImm(-1);
+ if (Inst result = test(width, resCond, leftPromise, rightPromise))
+ return result;
+ }
+
+ // Sometimes this is the only form of test available. We prefer not to use this because
+ // it's less canonical.
+ ArgPromise leftPromise = tmpPromise(value);
+ ArgPromise rightPromise = tmpPromise(value);
+ return test(width, resCond, leftPromise, rightPromise);
+ }
+
+ Inst createBranch(Value* value, bool inverted = false)
+ {
+ return createGenericCompare(
+ value,
+ [this] (
+ Arg::Width width, const Arg& relCond,
+ ArgPromise& left, ArgPromise& right) -> Inst {
+ switch (width) {
+ case Arg::Width8:
+ if (isValidForm(Branch8, Arg::RelCond, left.kind(), right.kind())) {
+ return left.inst(right.inst(
+ Branch8, m_value, relCond,
+ left.consume(*this), right.consume(*this)));
+ }
+ return Inst();
+ case Arg::Width16:
+ return Inst();
+ case Arg::Width32:
+ if (isValidForm(Branch32, Arg::RelCond, left.kind(), right.kind())) {
+ return left.inst(right.inst(
+ Branch32, m_value, relCond,
+ left.consume(*this), right.consume(*this)));
+ }
+ return Inst();
+ case Arg::Width64:
+ if (isValidForm(Branch64, Arg::RelCond, left.kind(), right.kind())) {
+ return left.inst(right.inst(
+ Branch64, m_value, relCond,
+ left.consume(*this), right.consume(*this)));
+ }
+ return Inst();
+ }
+ ASSERT_NOT_REACHED();
+ },
+ [this] (
+ Arg::Width width, const Arg& resCond,
+ ArgPromise& left, ArgPromise& right) -> Inst {
+ switch (width) {
+ case Arg::Width8:
+ if (isValidForm(BranchTest8, Arg::ResCond, left.kind(), right.kind())) {
+ return left.inst(right.inst(
+ BranchTest8, m_value, resCond,
+ left.consume(*this), right.consume(*this)));
+ }
+ return Inst();
+ case Arg::Width16:
+ return Inst();
+ case Arg::Width32:
+ if (isValidForm(BranchTest32, Arg::ResCond, left.kind(), right.kind())) {
+ return left.inst(right.inst(
+ BranchTest32, m_value, resCond,
+ left.consume(*this), right.consume(*this)));
+ }
+ return Inst();
+ case Arg::Width64:
+ if (isValidForm(BranchTest64, Arg::ResCond, left.kind(), right.kind())) {
+ return left.inst(right.inst(
+ BranchTest64, m_value, resCond,
+ left.consume(*this), right.consume(*this)));
+ }
+ return Inst();
+ }
+ ASSERT_NOT_REACHED();
+ },
+ [this] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+ if (isValidForm(BranchDouble, Arg::DoubleCond, left.kind(), right.kind())) {
+ return left.inst(right.inst(
+ BranchDouble, m_value, doubleCond,
+ left.consume(*this), right.consume(*this)));
+ }
+ return Inst();
+ },
+ [this] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+ if (isValidForm(BranchFloat, Arg::DoubleCond, left.kind(), right.kind())) {
+ return left.inst(right.inst(
+ BranchFloat, m_value, doubleCond,
+ left.consume(*this), right.consume(*this)));
+ }
+ return Inst();
+ },
+ inverted);
+ }
+
+ Inst createCompare(Value* value, bool inverted = false)
+ {
+ return createGenericCompare(
+ value,
+ [this] (
+ Arg::Width width, const Arg& relCond,
+ ArgPromise& left, ArgPromise& right) -> Inst {
+ switch (width) {
+ case Arg::Width8:
+ case Arg::Width16:
+ return Inst();
+ case Arg::Width32:
+ if (isValidForm(Compare32, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) {
+ return left.inst(right.inst(
+ Compare32, m_value, relCond,
+ left.consume(*this), right.consume(*this), tmp(m_value)));
+ }
+ return Inst();
+ case Arg::Width64:
+ if (isValidForm(Compare64, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) {
+ return left.inst(right.inst(
+ Compare64, m_value, relCond,
+ left.consume(*this), right.consume(*this), tmp(m_value)));
+ }
+ return Inst();
+ }
+ ASSERT_NOT_REACHED();
+ },
+ [this] (
+ Arg::Width width, const Arg& resCond,
+ ArgPromise& left, ArgPromise& right) -> Inst {
+ switch (width) {
+ case Arg::Width8:
+ case Arg::Width16:
+ return Inst();
+ case Arg::Width32:
+ if (isValidForm(Test32, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) {
+ return left.inst(right.inst(
+ Test32, m_value, resCond,
+ left.consume(*this), right.consume(*this), tmp(m_value)));
+ }
+ return Inst();
+ case Arg::Width64:
+ if (isValidForm(Test64, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) {
+ return left.inst(right.inst(
+ Test64, m_value, resCond,
+ left.consume(*this), right.consume(*this), tmp(m_value)));
+ }
+ return Inst();
+ }
+ ASSERT_NOT_REACHED();
+ },
+ [this] (const Arg& doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+ if (isValidForm(CompareDouble, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) {
+ return left.inst(right.inst(
+ CompareDouble, m_value, doubleCond,
+ left.consume(*this), right.consume(*this), tmp(m_value)));
+ }
+ return Inst();
+ },
+ [this] (const Arg& doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+ if (isValidForm(CompareFloat, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) {
+ return left.inst(right.inst(
+ CompareFloat, m_value, doubleCond,
+ left.consume(*this), right.consume(*this), tmp(m_value)));
+ }
+ return Inst();
+ },
+ inverted);
+ }
+
+ struct MoveConditionallyConfig {
+ Air::Opcode moveConditionally32;
+ Air::Opcode moveConditionally64;
+ Air::Opcode moveConditionallyTest32;
+ Air::Opcode moveConditionallyTest64;
+ Air::Opcode moveConditionallyDouble;
+ Air::Opcode moveConditionallyFloat;
+ };
+ Inst createSelect(const MoveConditionallyConfig& config)
+ {
+ auto createSelectInstruction = [&] (Air::Opcode opcode, const Arg& condition, ArgPromise& left, ArgPromise& right) -> Inst {
+ if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+ Tmp result = tmp(m_value);
+ Tmp thenCase = tmp(m_value->child(1));
+ Tmp elseCase = tmp(m_value->child(2));
+ return left.inst(right.inst(
+ opcode, m_value, condition,
+ left.consume(*this), right.consume(*this), thenCase, elseCase, result));
+ }
+ if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp)) {
+ Tmp result = tmp(m_value);
+ Tmp source = tmp(m_value->child(1));
+ append(relaxedMoveForType(m_value->type()), tmp(m_value->child(2)), result);
+ return left.inst(right.inst(
+ opcode, m_value, condition,
+ left.consume(*this), right.consume(*this), source, result));
+ }
+ return Inst();
+ };
+
+ return createGenericCompare(
+ m_value->child(0),
+ [&] (
+ Arg::Width width, const Arg& relCond,
+ ArgPromise& left, ArgPromise& right) -> Inst {
+ switch (width) {
+ case Arg::Width8:
+ // FIXME: Support these things.
+ // https://bugs.webkit.org/show_bug.cgi?id=151504
+ return Inst();
+ case Arg::Width16:
+ return Inst();
+ case Arg::Width32:
+ return createSelectInstruction(config.moveConditionally32, relCond, left, right);
+ case Arg::Width64:
+ return createSelectInstruction(config.moveConditionally64, relCond, left, right);
+ }
+ ASSERT_NOT_REACHED();
+ },
+ [&] (
+ Arg::Width width, const Arg& resCond,
+ ArgPromise& left, ArgPromise& right) -> Inst {
+ switch (width) {
+ case Arg::Width8:
+ // FIXME: Support more things.
+ // https://bugs.webkit.org/show_bug.cgi?id=151504
+ return Inst();
+ case Arg::Width16:
+ return Inst();
+ case Arg::Width32:
+ return createSelectInstruction(config.moveConditionallyTest32, resCond, left, right);
+ case Arg::Width64:
+ return createSelectInstruction(config.moveConditionallyTest64, resCond, left, right);
+ }
+ ASSERT_NOT_REACHED();
+ },
+ [&] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+ return createSelectInstruction(config.moveConditionallyDouble, doubleCond, left, right);
+ },
+ [&] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+ return createSelectInstruction(config.moveConditionallyFloat, doubleCond, left, right);
+ },
+ false);
+ }
+
+ bool tryAppendLea()
+ {
+ Air::Opcode leaOpcode = tryOpcodeForType(Lea32, Lea64, m_value->type());
+ if (!isValidForm(leaOpcode, Arg::Index, Arg::Tmp))
+ return false;
+
+ // This lets us turn things like this:
+ //
+ // Add(Add(@x, Shl(@y, $2)), $100)
+ //
+ // Into this:
+ //
+ // lea 100(%rdi,%rsi,4), %rax
+ //
+ // We have a choice here between committing the internal bits of an index or sharing
+ // them. There are solid arguments for both.
+ //
+ // Sharing: The word on the street is that the cost of a lea is one cycle no matter
+ // what it does. Every experiment I've ever seen seems to confirm this. So, sharing
+ // helps us in situations where Wasm input did this:
+ //
+ // x = a[i].x;
+ // y = a[i].y;
+ //
+ // With sharing we would do:
+ //
+ // leal (%a,%i,4), %tmp
+ // cmp (%size, %tmp)
+ // ja _fail
+ // movl (%base, %tmp), %x
+ // leal 4(%a,%i,4), %tmp
+ // cmp (%size, %tmp)
+ // ja _fail
+ // movl (%base, %tmp), %y
+ //
+ // In the absence of sharing, we may find ourselves needing separate registers for
+ // the innards of the index. That's relatively unlikely to be a thing due to other
+ // optimizations that we already have, but it could happen
+ //
+ // Committing: The worst case is that there is a complicated graph of additions and
+ // shifts, where each value has multiple uses. In that case, it's better to compute
+ // each one separately from the others since that way, each calculation will use a
+ // relatively nearby tmp as its input. That seems uncommon, but in those cases,
+ // committing is a clear winner: it would result in a simple interference graph
+ // while sharing would result in a complex one. Interference sucks because it means
+ // more time in IRC and it means worse code.
+ //
+ // It's not super clear if any of these corner cases would ever arise. Committing
+ // has the benefit that it's easier to reason about, and protects a much darker
+ // corner case (more interference).
+
+ // Here are the things we want to match:
+ // Add(Add(@x, @y), $c)
+ // Add(Shl(@x, $c), @y)
+ // Add(@x, Shl(@y, $c))
+ // Add(Add(@x, Shl(@y, $c)), $d)
+ // Add(Add(Shl(@x, $c), @y), $d)
+ //
+ // Note that if you do Add(Shl(@x, $c), $d) then we will treat $d as a non-constant and
+ // force it to materialize. You'll get something like this:
+ //
+ // movl $d, %tmp
+ // leal (%tmp,%x,1<<c), %result
+ //
+ // Which is pretty close to optimal and has the nice effect of being able to handle large
+ // constants gracefully.
+
+ Value* innerAdd = nullptr;
+
+ Value* value = m_value;
+
+ // We're going to consume Add(Add(_), $c). If we succeed at consuming it then we have these
+ // patterns left (i.e. in the Add(_)):
+ //
+ // Add(Add(@x, @y), $c)
+ // Add(Add(@x, Shl(@y, $c)), $d)
+ // Add(Add(Shl(@x, $c), @y), $d)
+ //
+ // Otherwise we are looking at these patterns:
+ //
+ // Add(Shl(@x, $c), @y)
+ // Add(@x, Shl(@y, $c))
+ //
+ // This means that the subsequent code only has to worry about three patterns:
+ //
+ // Add(Shl(@x, $c), @y)
+ // Add(@x, Shl(@y, $c))
+ // Add(@x, @y) (only if offset != 0)
+ int32_t offset = 0;
+ if (value->child(1)->isRepresentableAs<int32_t>()
+ && canBeInternal(value->child(0))
+ && value->child(0)->opcode() == Add) {
+ innerAdd = value->child(0);
+ offset = static_cast<int32_t>(value->child(1)->asInt());
+ value = value->child(0);
+ }
+
+ auto tryShl = [&] (Value* shl, Value* other) -> bool {
+ std::optional<unsigned> scale = scaleForShl(shl, offset);
+ if (!scale)
+ return false;
+ if (!canBeInternal(shl))
+ return false;
+
+ ASSERT(!m_locked.contains(shl->child(0)));
+ ASSERT(!m_locked.contains(other));
+
+ append(leaOpcode, Arg::index(tmp(other), tmp(shl->child(0)), *scale, offset), tmp(m_value));
+ commitInternal(innerAdd);
+ commitInternal(shl);
+ return true;
+ };
+
+ if (tryShl(value->child(0), value->child(1)))
+ return true;
+ if (tryShl(value->child(1), value->child(0)))
+ return true;
+
+ // The remaining pattern is just:
+ // Add(@x, @y) (only if offset != 0)
+ if (!offset)
+ return false;
+ ASSERT(!m_locked.contains(value->child(0)));
+ ASSERT(!m_locked.contains(value->child(1)));
+ append(leaOpcode, Arg::index(tmp(value->child(0)), tmp(value->child(1)), 1, offset), tmp(m_value));
+ commitInternal(innerAdd);
+ return true;
+ }
+
+ void lower()
+ {
+ switch (m_value->opcode()) {
+ case B3::Nop: {
+ // Yes, we will totally see Nop's because some phases will replaceWithNop() instead of
+ // properly removing things.
+ return;
+ }
+
+ case Load: {
+ append(trappingInst(m_value, moveForType(m_value->type()), m_value, addr(m_value), tmp(m_value)));
+ return;
+ }
+
+ case Load8S: {
+ append(trappingInst(m_value, Load8SignedExtendTo32, m_value, addr(m_value), tmp(m_value)));
+ return;
+ }
+
+ case Load8Z: {
+ append(trappingInst(m_value, Load8, m_value, addr(m_value), tmp(m_value)));
+ return;
+ }
+
+ case Load16S: {
+ append(trappingInst(m_value, Load16SignedExtendTo32, m_value, addr(m_value), tmp(m_value)));
+ return;
+ }
+
+ case Load16Z: {
+ append(trappingInst(m_value, Load16, m_value, addr(m_value), tmp(m_value)));
+ return;
+ }
+
+ case Add: {
+ if (tryAppendLea())
+ return;
+
+ Air::Opcode multiplyAddOpcode = tryOpcodeForType(MultiplyAdd32, MultiplyAdd64, m_value->type());
+ if (isValidForm(multiplyAddOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+ Value* left = m_value->child(0);
+ Value* right = m_value->child(1);
+ if (!imm(right) || m_valueToTmp[right]) {
+ auto tryAppendMultiplyAdd = [&] (Value* left, Value* right) -> bool {
+ if (left->opcode() != Mul || !canBeInternal(left))
+ return false;
+
+ Value* multiplyLeft = left->child(0);
+ Value* multiplyRight = left->child(1);
+ if (canBeInternal(multiplyLeft) || canBeInternal(multiplyRight))
+ return false;
+
+ append(multiplyAddOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(right), tmp(m_value));
+ commitInternal(left);
+
+ return true;
+ };
+
+ if (tryAppendMultiplyAdd(left, right))
+ return;
+ if (tryAppendMultiplyAdd(right, left))
+ return;
+ }
+ }
+
+ appendBinOp<Add32, Add64, AddDouble, AddFloat, Commutative>(
+ m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case Sub: {
+ Air::Opcode multiplySubOpcode = tryOpcodeForType(MultiplySub32, MultiplySub64, m_value->type());
+ if (multiplySubOpcode != Air::Oops
+ && isValidForm(multiplySubOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+ Value* left = m_value->child(0);
+ Value* right = m_value->child(1);
+ if (!imm(right) || m_valueToTmp[right]) {
+ auto tryAppendMultiplySub = [&] () -> bool {
+ if (right->opcode() != Mul || !canBeInternal(right))
+ return false;
+
+ Value* multiplyLeft = right->child(0);
+ Value* multiplyRight = right->child(1);
+ if (m_locked.contains(multiplyLeft) || m_locked.contains(multiplyRight))
+ return false;
+
+ append(multiplySubOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(left), tmp(m_value));
+ commitInternal(right);
+
+ return true;
+ };
+
+ if (tryAppendMultiplySub())
+ return;
+ }
+ }
+
+ appendBinOp<Sub32, Sub64, SubDouble, SubFloat>(m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case Neg: {
+ Air::Opcode multiplyNegOpcode = tryOpcodeForType(MultiplyNeg32, MultiplyNeg64, m_value->type());
+ if (multiplyNegOpcode != Air::Oops
+ && isValidForm(multiplyNegOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)
+ && m_value->child(0)->opcode() == Mul
+ && canBeInternal(m_value->child(0))) {
+ Value* multiplyOperation = m_value->child(0);
+ Value* multiplyLeft = multiplyOperation->child(0);
+ Value* multiplyRight = multiplyOperation->child(1);
+ if (!m_locked.contains(multiplyLeft) && !m_locked.contains(multiplyRight)) {
+ append(multiplyNegOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(m_value));
+ commitInternal(multiplyOperation);
+ return;
+ }
+ }
+
+ appendUnOp<Neg32, Neg64, NegateDouble, NegateFloat>(m_value->child(0));
+ return;
+ }
+
+ case Mul: {
+ appendBinOp<Mul32, Mul64, MulDouble, MulFloat, Commutative>(
+ m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case Div: {
+ if (m_value->isChill())
+ RELEASE_ASSERT(isARM64());
+ if (isInt(m_value->type()) && isX86()) {
+ lowerX86Div(Div);
+ return;
+ }
+ ASSERT(!isX86() || isFloat(m_value->type()));
+
+ appendBinOp<Div32, Div64, DivDouble, DivFloat>(m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case UDiv: {
+ if (isInt(m_value->type()) && isX86()) {
+ lowerX86UDiv(UDiv);
+ return;
+ }
+
+ ASSERT(!isX86() && !isFloat(m_value->type()));
+
+ appendBinOp<UDiv32, UDiv64, Air::Oops, Air::Oops>(m_value->child(0), m_value->child(1));
+ return;
+
+ }
+
+ case Mod: {
+ RELEASE_ASSERT(isX86());
+ RELEASE_ASSERT(!m_value->isChill());
+ lowerX86Div(Mod);
+ return;
+ }
+
+ case UMod: {
+ RELEASE_ASSERT(isX86());
+ lowerX86UDiv(UMod);
+ return;
+ }
+
+ case BitAnd: {
+ if (m_value->child(1)->isInt(0xff)) {
+ appendUnOp<ZeroExtend8To32, ZeroExtend8To32>(m_value->child(0));
+ return;
+ }
+
+ if (m_value->child(1)->isInt(0xffff)) {
+ appendUnOp<ZeroExtend16To32, ZeroExtend16To32>(m_value->child(0));
+ return;
+ }
+
+ if (m_value->child(1)->isInt(0xffffffff)) {
+ appendUnOp<Move32, Move32>(m_value->child(0));
+ return;
+ }
+
+ appendBinOp<And32, And64, AndDouble, AndFloat, Commutative>(
+ m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case BitOr: {
+ appendBinOp<Or32, Or64, OrDouble, OrFloat, Commutative>(
+ m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case BitXor: {
+ // FIXME: If canBeInternal(child), we should generate this using the comparison path.
+ // https://bugs.webkit.org/show_bug.cgi?id=152367
+
+ if (m_value->child(1)->isInt(-1)) {
+ appendUnOp<Not32, Not64>(m_value->child(0));
+ return;
+ }
+ appendBinOp<Xor32, Xor64, XorDouble, XorFloat, Commutative>(
+ m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case Shl: {
+ if (m_value->child(1)->isInt32(1)) {
+ appendBinOp<Add32, Add64, AddDouble, AddFloat, Commutative>(m_value->child(0), m_value->child(0));
+ return;
+ }
+
+ appendShift<Lshift32, Lshift64>(m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case SShr: {
+ appendShift<Rshift32, Rshift64>(m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case ZShr: {
+ appendShift<Urshift32, Urshift64>(m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case RotR: {
+ appendShift<RotateRight32, RotateRight64>(m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case RotL: {
+ appendShift<RotateLeft32, RotateLeft64>(m_value->child(0), m_value->child(1));
+ return;
+ }
+
+ case Clz: {
+ appendUnOp<CountLeadingZeros32, CountLeadingZeros64>(m_value->child(0));
+ return;
+ }
+
+ case Abs: {
+ RELEASE_ASSERT_WITH_MESSAGE(!isX86(), "Abs is not supported natively on x86. It must be replaced before generation.");
+ appendUnOp<Air::Oops, Air::Oops, AbsDouble, AbsFloat>(m_value->child(0));
+ return;
+ }
+
+ case Ceil: {
+ appendUnOp<Air::Oops, Air::Oops, CeilDouble, CeilFloat>(m_value->child(0));
+ return;
+ }
+
+ case Floor: {
+ appendUnOp<Air::Oops, Air::Oops, FloorDouble, FloorFloat>(m_value->child(0));
+ return;
+ }
+
+ case Sqrt: {
+ appendUnOp<Air::Oops, Air::Oops, SqrtDouble, SqrtFloat>(m_value->child(0));
+ return;
+ }
+
+ case BitwiseCast: {
+ appendUnOp<Move32ToFloat, Move64ToDouble, MoveDoubleTo64, MoveFloatTo32>(m_value->child(0));
+ return;
+ }
+
+ case Store: {
+ Value* valueToStore = m_value->child(0);
+ if (canBeInternal(valueToStore)) {
+ bool matched = false;
+ switch (valueToStore->opcode()) {
+ case Add:
+ matched = tryAppendStoreBinOp<Add32, Add64, Commutative>(
+ valueToStore->child(0), valueToStore->child(1));
+ break;
+ case Sub:
+ if (valueToStore->child(0)->isInt(0)) {
+ matched = tryAppendStoreUnOp<Neg32, Neg64>(valueToStore->child(1));
+ break;
+ }
+ matched = tryAppendStoreBinOp<Sub32, Sub64>(
+ valueToStore->child(0), valueToStore->child(1));
+ break;
+ case BitAnd:
+ matched = tryAppendStoreBinOp<And32, And64, Commutative>(
+ valueToStore->child(0), valueToStore->child(1));
+ break;
+ case BitXor:
+ if (valueToStore->child(1)->isInt(-1)) {
+ matched = tryAppendStoreUnOp<Not32, Not64>(valueToStore->child(0));
+ break;
+ }
+ matched = tryAppendStoreBinOp<Xor32, Xor64, Commutative>(
+ valueToStore->child(0), valueToStore->child(1));
+ break;
+ default:
+ break;
+ }
+ if (matched) {
+ commitInternal(valueToStore);
+ return;
+ }
+ }
+
+ appendStore(valueToStore, addr(m_value));
+ return;
+ }
+
+ case B3::Store8: {
+ Value* valueToStore = m_value->child(0);
+ if (canBeInternal(valueToStore)) {
+ bool matched = false;
+ switch (valueToStore->opcode()) {
+ case Add:
+ matched = tryAppendStoreBinOp<Add8, Air::Oops, Commutative>(
+ valueToStore->child(0), valueToStore->child(1));
+ break;
+ default:
+ break;
+ }
+ if (matched) {
+ commitInternal(valueToStore);
+ return;
+ }
+ }
+ appendStore(Air::Store8, valueToStore, addr(m_value));
+ return;
+ }
+
+ case B3::Store16: {
+ Value* valueToStore = m_value->child(0);
+ if (canBeInternal(valueToStore)) {
+ bool matched = false;
+ switch (valueToStore->opcode()) {
+ case Add:
+ matched = tryAppendStoreBinOp<Add16, Air::Oops, Commutative>(
+ valueToStore->child(0), valueToStore->child(1));
+ break;
+ default:
+ break;
+ }
+ if (matched) {
+ commitInternal(valueToStore);
+ return;
+ }
+ }
+ appendStore(Air::Store16, valueToStore, addr(m_value));
+ return;
+ }
+
+ case WasmAddress: {
+ WasmAddressValue* address = m_value->as<WasmAddressValue>();
+
+ append(Add64, Arg(address->pinnedGPR()), tmp(address));
+ return;
+ }
+
+ case Fence: {
+ FenceValue* fence = m_value->as<FenceValue>();
+ if (!fence->write && !fence->read)
+ return;
+ if (!fence->write) {
+ // A fence that reads but does not write is for protecting motion of stores.
+ append(StoreFence);
+ return;
+ }
+ if (!fence->read) {
+ // A fence that writes but does not read is for protecting motion of loads.
+ append(LoadFence);
+ return;
+ }
+ append(MemoryFence);
+ return;
+ }
+
+ case Trunc: {
+ ASSERT(tmp(m_value->child(0)) == tmp(m_value));
+ return;
+ }
+
+ case SExt8: {
+ appendUnOp<SignExtend8To32, Air::Oops>(m_value->child(0));
+ return;
+ }
+
+ case SExt16: {
+ appendUnOp<SignExtend16To32, Air::Oops>(m_value->child(0));
+ return;
+ }
+
+ case ZExt32: {
+ appendUnOp<Move32, Air::Oops>(m_value->child(0));
+ return;
+ }
+
+ case SExt32: {
+ // FIXME: We should have support for movsbq/movswq
+ // https://bugs.webkit.org/show_bug.cgi?id=152232
+
+ appendUnOp<SignExtend32ToPtr, Air::Oops>(m_value->child(0));
+ return;
+ }
+
+ case FloatToDouble: {
+ appendUnOp<Air::Oops, Air::Oops, Air::Oops, ConvertFloatToDouble>(m_value->child(0));
+ return;
+ }
+
+ case DoubleToFloat: {
+ appendUnOp<Air::Oops, Air::Oops, ConvertDoubleToFloat>(m_value->child(0));
+ return;
+ }
+
+ case ArgumentReg: {
+ m_prologue.append(Inst(
+ moveForType(m_value->type()), m_value,
+ Tmp(m_value->as<ArgumentRegValue>()->argumentReg()),
+ tmp(m_value)));
+ return;
+ }
+
+ case Const32:
+ case Const64: {
+ if (imm(m_value))
+ append(Move, imm(m_value), tmp(m_value));
+ else
+ append(Move, Arg::bigImm(m_value->asInt()), tmp(m_value));
+ return;
+ }
+
+ case ConstDouble:
+ case ConstFloat: {
+ // We expect that the moveConstants() phase has run, and any doubles referenced from
+ // stackmaps get fused.
+ RELEASE_ASSERT(m_value->opcode() == ConstFloat || isIdentical(m_value->asDouble(), 0.0));
+ RELEASE_ASSERT(m_value->opcode() == ConstDouble || isIdentical(m_value->asFloat(), 0.0f));
+ append(MoveZeroToDouble, tmp(m_value));
+ return;
+ }
+
+ case FramePointer: {
+ ASSERT(tmp(m_value) == Tmp(GPRInfo::callFrameRegister));
+ return;
+ }
+
+ case SlotBase: {
+ append(
+ pointerType() == Int64 ? Lea64 : Lea32,
+ Arg::stack(m_stackToStack.get(m_value->as<SlotBaseValue>()->slot())),
+ tmp(m_value));
+ return;
+ }
+
+ case Equal:
+ case NotEqual:
+ case LessThan:
+ case GreaterThan:
+ case LessEqual:
+ case GreaterEqual:
+ case Above:
+ case Below:
+ case AboveEqual:
+ case BelowEqual:
+ case EqualOrUnordered: {
+ m_insts.last().append(createCompare(m_value));
+ return;
+ }
+
+ case Select: {
+ MoveConditionallyConfig config;
+ if (isInt(m_value->type())) {
+ config.moveConditionally32 = MoveConditionally32;
+ config.moveConditionally64 = MoveConditionally64;
+ config.moveConditionallyTest32 = MoveConditionallyTest32;
+ config.moveConditionallyTest64 = MoveConditionallyTest64;
+ config.moveConditionallyDouble = MoveConditionallyDouble;
+ config.moveConditionallyFloat = MoveConditionallyFloat;
+ } else {
+ // FIXME: it's not obvious that these are particularly efficient.
+ config.moveConditionally32 = MoveDoubleConditionally32;
+ config.moveConditionally64 = MoveDoubleConditionally64;
+ config.moveConditionallyTest32 = MoveDoubleConditionallyTest32;
+ config.moveConditionallyTest64 = MoveDoubleConditionallyTest64;
+ config.moveConditionallyDouble = MoveDoubleConditionallyDouble;
+ config.moveConditionallyFloat = MoveDoubleConditionallyFloat;
+ }
+
+ m_insts.last().append(createSelect(config));
+ return;
+ }
+
+ case IToD: {
+ appendUnOp<ConvertInt32ToDouble, ConvertInt64ToDouble>(m_value->child(0));
+ return;
+ }
+
+ case IToF: {
+ appendUnOp<ConvertInt32ToFloat, ConvertInt64ToFloat>(m_value->child(0));
+ return;
+ }
+
+ case B3::CCall: {
+ CCallValue* cCall = m_value->as<CCallValue>();
+
+ Inst inst(m_isRare ? Air::ColdCCall : Air::CCall, cCall);
+
+ // We have a ton of flexibility regarding the callee argument, but currently, we don't
+ // use it yet. It gets weird for reasons:
+ // 1) We probably will never take advantage of this. We don't have C calls to locations
+ // loaded from addresses. We have JS calls like that, but those use Patchpoints.
+ // 2) On X86_64 we still don't support call with BaseIndex.
+ // 3) On non-X86, we don't natively support any kind of loading from address.
+ // 4) We don't have an isValidForm() for the CCallSpecial so we have no smart way to
+ // decide.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=151052
+ inst.args.append(tmp(cCall->child(0)));
+
+ if (cCall->type() != Void)
+ inst.args.append(tmp(cCall));
+
+ for (unsigned i = 1; i < cCall->numChildren(); ++i)
+ inst.args.append(immOrTmp(cCall->child(i)));
+
+ m_insts.last().append(WTFMove(inst));
+ return;
+ }
+
+ case Patchpoint: {
+ PatchpointValue* patchpointValue = m_value->as<PatchpointValue>();
+ ensureSpecial(m_patchpointSpecial);
+
+ Inst inst(Patch, patchpointValue, Arg::special(m_patchpointSpecial));
+
+ Vector<Inst> after;
+ if (patchpointValue->type() != Void) {
+ switch (patchpointValue->resultConstraint.kind()) {
+ case ValueRep::WarmAny:
+ case ValueRep::ColdAny:
+ case ValueRep::LateColdAny:
+ case ValueRep::SomeRegister:
+ case ValueRep::SomeEarlyRegister:
+ inst.args.append(tmp(patchpointValue));
+ break;
+ case ValueRep::Register: {
+ Tmp reg = Tmp(patchpointValue->resultConstraint.reg());
+ inst.args.append(reg);
+ after.append(Inst(
+ relaxedMoveForType(patchpointValue->type()), m_value, reg, tmp(patchpointValue)));
+ break;
+ }
+ case ValueRep::StackArgument: {
+ Arg arg = Arg::callArg(patchpointValue->resultConstraint.offsetFromSP());
+ inst.args.append(arg);
+ after.append(Inst(
+ moveForType(patchpointValue->type()), m_value, arg, tmp(patchpointValue)));
+ break;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ fillStackmap(inst, patchpointValue, 0);
+
+ if (patchpointValue->resultConstraint.isReg())
+ patchpointValue->lateClobbered().clear(patchpointValue->resultConstraint.reg());
+
+ for (unsigned i = patchpointValue->numGPScratchRegisters; i--;)
+ inst.args.append(m_code.newTmp(Arg::GP));
+ for (unsigned i = patchpointValue->numFPScratchRegisters; i--;)
+ inst.args.append(m_code.newTmp(Arg::FP));
+
+ m_insts.last().append(WTFMove(inst));
+ m_insts.last().appendVector(after);
+ return;
+ }
+
+ case CheckAdd:
+ case CheckSub:
+ case CheckMul: {
+ CheckValue* checkValue = m_value->as<CheckValue>();
+
+ Value* left = checkValue->child(0);
+ Value* right = checkValue->child(1);
+
+ Tmp result = tmp(m_value);
+
+ // Handle checked negation.
+ if (checkValue->opcode() == CheckSub && left->isInt(0)) {
+ append(Move, tmp(right), result);
+
+ Air::Opcode opcode =
+ opcodeForType(BranchNeg32, BranchNeg64, checkValue->type());
+ CheckSpecial* special = ensureCheckSpecial(opcode, 2);
+
+ Inst inst(Patch, checkValue, Arg::special(special));
+ inst.args.append(Arg::resCond(MacroAssembler::Overflow));
+ inst.args.append(result);
+
+ fillStackmap(inst, checkValue, 2);
+
+ m_insts.last().append(WTFMove(inst));
+ return;
+ }
+
+ Air::Opcode opcode = Air::Oops;
+ Commutativity commutativity = NotCommutative;
+ StackmapSpecial::RoleMode stackmapRole = StackmapSpecial::SameAsRep;
+ switch (m_value->opcode()) {
+ case CheckAdd:
+ opcode = opcodeForType(BranchAdd32, BranchAdd64, m_value->type());
+ stackmapRole = StackmapSpecial::ForceLateUseUnlessRecoverable;
+ commutativity = Commutative;
+ break;
+ case CheckSub:
+ opcode = opcodeForType(BranchSub32, BranchSub64, m_value->type());
+ break;
+ case CheckMul:
+ opcode = opcodeForType(BranchMul32, BranchMul64, checkValue->type());
+ stackmapRole = StackmapSpecial::ForceLateUse;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ // FIXME: It would be great to fuse Loads into these. We currently don't do it because the
+ // rule for stackmaps is that all addresses are just stack addresses. Maybe we could relax
+ // this rule here.
+ // https://bugs.webkit.org/show_bug.cgi?id=151228
+
+ Vector<Arg, 2> sources;
+ if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
+ sources.append(tmp(left));
+ sources.append(imm(right));
+ } else if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Imm, Arg::Tmp)) {
+ sources.append(imm(right));
+ append(Move, tmp(left), result);
+ } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+ sources.append(tmp(left));
+ sources.append(tmp(right));
+ } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp)) {
+ if (commutativity == Commutative && preferRightForResult(left, right)) {
+ sources.append(tmp(left));
+ append(Move, tmp(right), result);
+ } else {
+ sources.append(tmp(right));
+ append(Move, tmp(left), result);
+ }
+ } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+ sources.append(tmp(left));
+ sources.append(tmp(right));
+ sources.append(m_code.newTmp(Arg::typeForB3Type(m_value->type())));
+ sources.append(m_code.newTmp(Arg::typeForB3Type(m_value->type())));
+ }
+
+ // There is a really hilarious case that arises when we do BranchAdd32(%x, %x). We won't emit
+ // such code, but the coalescing in our register allocator also does copy propagation, so
+ // although we emit:
+ //
+ // Move %tmp1, %tmp2
+ // BranchAdd32 %tmp1, %tmp2
+ //
+ // The register allocator may turn this into:
+ //
+ // BranchAdd32 %rax, %rax
+ //
+ // Currently we handle this by ensuring that even this kind of addition can be undone. We can
+ // undo it by using the carry flag. It's tempting to get rid of that code and just "fix" this
+ // here by forcing LateUse on the stackmap. If we did that unconditionally, we'd lose a lot of
+ // performance. So it's tempting to do it only if left == right. But that creates an awkward
+ // constraint on Air: it means that Air would not be allowed to do any copy propagation.
+ // Notice that the %rax,%rax situation happened after Air copy-propagated the Move we are
+ // emitting. We know that copy-propagating over that Move causes add-to-self. But what if we
+ // emit something like a Move - or even do other kinds of copy-propagation on tmp's -
+ // somewhere else in this code. The add-to-self situation may only emerge after some other Air
+ // optimizations remove other Move's or identity-like operations. That's why we don't use
+ // LateUse here to take care of add-to-self.
+
+ CheckSpecial* special = ensureCheckSpecial(opcode, 2 + sources.size(), stackmapRole);
+
+ Inst inst(Patch, checkValue, Arg::special(special));
+
+ inst.args.append(Arg::resCond(MacroAssembler::Overflow));
+
+ inst.args.appendVector(sources);
+ inst.args.append(result);
+
+ fillStackmap(inst, checkValue, 2);
+
+ m_insts.last().append(WTFMove(inst));
+ return;
+ }
+
+ case Check: {
+ Inst branch = createBranch(m_value->child(0));
+
+ CheckSpecial* special = ensureCheckSpecial(branch);
+
+ CheckValue* checkValue = m_value->as<CheckValue>();
+
+ Inst inst(Patch, checkValue, Arg::special(special));
+ inst.args.appendVector(branch.args);
+
+ fillStackmap(inst, checkValue, 1);
+
+ m_insts.last().append(WTFMove(inst));
+ return;
+ }
+
+ case B3::WasmBoundsCheck: {
+ WasmBoundsCheckValue* value = m_value->as<WasmBoundsCheckValue>();
+
+ Value* ptr = value->child(0);
+
+ Arg temp = m_code.newTmp(Arg::GP);
+ append(Inst(Move32, value, tmp(ptr), temp));
+ if (value->offset()) {
+ if (imm(value->offset()))
+ append(Add64, imm(value->offset()), temp);
+ else {
+ Arg bigImm = m_code.newTmp(Arg::GP);
+ append(Move, Arg::bigImm(value->offset()), bigImm);
+ append(Add64, bigImm, temp);
+ }
+ }
+ append(Inst(Air::WasmBoundsCheck, value, temp, Arg(value->pinnedGPR())));
+ return;
+ }
+
+ case Upsilon: {
+ Value* value = m_value->child(0);
+ append(
+ relaxedMoveForType(value->type()), immOrTmp(value),
+ m_phiToTmp[m_value->as<UpsilonValue>()->phi()]);
+ return;
+ }
+
+ case Phi: {
+ // Snapshot the value of the Phi. It may change under us because you could do:
+ // a = Phi()
+ // Upsilon(@x, ^a)
+ // @a => this should get the value of the Phi before the Upsilon, i.e. not @x.
+
+ append(relaxedMoveForType(m_value->type()), m_phiToTmp[m_value], tmp(m_value));
+ return;
+ }
+
+ case Set: {
+ Value* value = m_value->child(0);
+ append(
+ relaxedMoveForType(value->type()), immOrTmp(value),
+ m_variableToTmp.get(m_value->as<VariableValue>()->variable()));
+ return;
+ }
+
+ case Get: {
+ append(
+ relaxedMoveForType(m_value->type()),
+ m_variableToTmp.get(m_value->as<VariableValue>()->variable()), tmp(m_value));
+ return;
+ }
+
+ case Branch: {
+ m_insts.last().append(createBranch(m_value->child(0)));
+ return;
+ }
+
+ case B3::Jump: {
+ append(Air::Jump);
+ return;
+ }
+
+ case Identity: {
+ ASSERT(tmp(m_value->child(0)) == tmp(m_value));
+ return;
+ }
+
+ case Return: {
+ if (!m_value->numChildren()) {
+ append(RetVoid);
+ return;
+ }
+ Value* value = m_value->child(0);
+ Tmp returnValueGPR = Tmp(GPRInfo::returnValueGPR);
+ Tmp returnValueFPR = Tmp(FPRInfo::returnValueFPR);
+ switch (value->type()) {
+ case Void:
+ // It's impossible for a void value to be used as a child. If we did want to have a
+ // void return, we'd introduce a different opcode, like ReturnVoid.
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ case Int32:
+ append(Move, immOrTmp(value), returnValueGPR);
+ append(Ret32, returnValueGPR);
+ break;
+ case Int64:
+ append(Move, immOrTmp(value), returnValueGPR);
+ append(Ret64, returnValueGPR);
+ break;
+ case Float:
+ append(MoveFloat, tmp(value), returnValueFPR);
+ append(RetFloat, returnValueFPR);
+ break;
+ case Double:
+ append(MoveDouble, tmp(value), returnValueFPR);
+ append(RetDouble, returnValueFPR);
+ break;
+ }
+ return;
+ }
+
+ case B3::Oops: {
+ append(Air::Oops);
+ return;
+ }
+
+ case B3::EntrySwitch: {
+ append(Air::EntrySwitch);
+ return;
+ }
+
+ default:
+ break;
+ }
+
+ dataLog("FATAL: could not lower ", deepDump(m_procedure, m_value), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ void lowerX86Div(B3::Opcode op)
+ {
+#if CPU(X86) || CPU(X86_64)
+ Tmp eax = Tmp(X86Registers::eax);
+ Tmp edx = Tmp(X86Registers::edx);
+
+ Air::Opcode convertToDoubleWord;
+ Air::Opcode div;
+ switch (m_value->type()) {
+ case Int32:
+ convertToDoubleWord = X86ConvertToDoubleWord32;
+ div = X86Div32;
+ break;
+ case Int64:
+ convertToDoubleWord = X86ConvertToQuadWord64;
+ div = X86Div64;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+
+ ASSERT(op == Div || op == Mod);
+ X86Registers::RegisterID result = op == Div ? X86Registers::eax : X86Registers::edx;
+
+ append(Move, tmp(m_value->child(0)), eax);
+ append(convertToDoubleWord, eax, edx);
+ append(div, eax, edx, tmp(m_value->child(1)));
+ append(Move, Tmp(result), tmp(m_value));
+
+#else
+ UNUSED_PARAM(op);
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+ }
+
+ void lowerX86UDiv(B3::Opcode op)
+ {
+#if CPU(X86) || CPU(X86_64)
+ Tmp eax = Tmp(X86Registers::eax);
+ Tmp edx = Tmp(X86Registers::edx);
+
+ Air::Opcode div = m_value->type() == Int32 ? X86UDiv32 : X86UDiv64;
+
+ ASSERT(op == UDiv || op == UMod);
+ X86Registers::RegisterID result = op == UDiv ? X86Registers::eax : X86Registers::edx;
+
+ append(Move, tmp(m_value->child(0)), eax);
+ append(Xor64, edx, edx);
+ append(div, eax, edx, tmp(m_value->child(1)));
+ append(Move, Tmp(result), tmp(m_value));
+#else
+ UNUSED_PARAM(op);
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+ }
+
+ IndexSet<Value> m_locked; // These are values that will have no Tmp in Air.
+ IndexMap<Value, Tmp> m_valueToTmp; // These are values that must have a Tmp in Air. We say that a Value* with a non-null Tmp is "pinned".
+ IndexMap<Value, Tmp> m_phiToTmp; // Each Phi gets its own Tmp.
+ IndexMap<B3::BasicBlock, Air::BasicBlock*> m_blockToBlock;
+ HashMap<B3::StackSlot*, Air::StackSlot*> m_stackToStack;
+ HashMap<Variable*, Tmp> m_variableToTmp;
+
+ UseCounts m_useCounts;
+ PhiChildren m_phiChildren;
+ BlockWorklist m_fastWorklist;
+ Dominators& m_dominators;
+
+ Vector<Vector<Inst, 4>> m_insts;
+ Vector<Inst> m_prologue;
+
+ B3::BasicBlock* m_block;
+ bool m_isRare;
+ unsigned m_index;
+ Value* m_value;
+
+ PatchpointSpecial* m_patchpointSpecial { nullptr };
+ HashMap<CheckSpecial::Key, CheckSpecial*> m_checkSpecials;
+
+ Procedure& m_procedure;
+ Code& m_code;
+};
+
+} // anonymous namespace
+
+void lowerToAir(Procedure& procedure)
+{
+ PhaseScope phaseScope(procedure, "lowerToAir");
+ LowerToAir lowerToAir(procedure);
+ lowerToAir.run();
+}
+
+} } // namespace JSC::B3
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3LowerToAir.h b/Source/JavaScriptCore/b3/B3LowerToAir.h
new file mode 100644
index 000000000..a66837613
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3LowerToAir.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+namespace Air { class Code; }
+
+// This lowers the current B3 procedure to an Air code.
+
+JS_EXPORT_PRIVATE void lowerToAir(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3MathExtras.cpp b/Source/JavaScriptCore/b3/B3MathExtras.cpp
new file mode 100644
index 000000000..1c99379ba
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MathExtras.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3MathExtras.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CCallValue.h"
+#include "B3Const32Value.h"
+#include "B3ConstDoubleValue.h"
+#include "B3ConstPtrValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "MathCommon.h"
+
+namespace JSC { namespace B3 {
+
+std::pair<BasicBlock*, Value*> powDoubleInt32(Procedure& procedure, BasicBlock* start, Origin origin, Value* x, Value* y)
+{
+ BasicBlock* functionCallCase = procedure.addBlock();
+ BasicBlock* loopPreHeaderCase = procedure.addBlock();
+ BasicBlock* loopTestForEvenCase = procedure.addBlock();
+ BasicBlock* loopOdd = procedure.addBlock();
+ BasicBlock* loopEvenOdd = procedure.addBlock();
+ BasicBlock* continuation = procedure.addBlock();
+
+ Value* shouldGoSlowPath = start->appendNew<Value>(procedure, Above, origin,
+ y,
+ start->appendNew<Const32Value>(procedure, origin, maxExponentForIntegerMathPow));
+ start->appendNew<Value>(procedure, Branch, origin, shouldGoSlowPath);
+ start->setSuccessors(FrequentedBlock(functionCallCase), FrequentedBlock(loopPreHeaderCase));
+
+ // Function call.
+ Value* yAsDouble = functionCallCase->appendNew<Value>(procedure, IToD, origin, y);
+ double (*powDouble)(double, double) = pow;
+ Value* powResult = functionCallCase->appendNew<CCallValue>(
+ procedure, Double, origin,
+ functionCallCase->appendNew<ConstPtrValue>(procedure, origin, bitwise_cast<void*>(powDouble)),
+ x, yAsDouble);
+ UpsilonValue* powResultUpsilon = functionCallCase->appendNew<UpsilonValue>(procedure, origin, powResult);
+ functionCallCase->appendNew<Value>(procedure, Jump, origin);
+ functionCallCase->setSuccessors(FrequentedBlock(continuation));
+
+ // Loop pre-header.
+ Value* initialResult = loopPreHeaderCase->appendNew<ConstDoubleValue>(procedure, origin, 1.);
+ UpsilonValue* initialLoopValue = loopPreHeaderCase->appendNew<UpsilonValue>(procedure, origin, initialResult);
+ UpsilonValue* initialResultValue = loopPreHeaderCase->appendNew<UpsilonValue>(procedure, origin, initialResult);
+ UpsilonValue* initialSquaredInput = loopPreHeaderCase->appendNew<UpsilonValue>(procedure, origin, x);
+ UpsilonValue* initialLoopCounter = loopPreHeaderCase->appendNew<UpsilonValue>(procedure, origin, y);
+ loopPreHeaderCase->appendNew<Value>(procedure, Jump, origin);
+ loopPreHeaderCase->setSuccessors(FrequentedBlock(loopTestForEvenCase));
+
+ // Test if what is left of the counter is even.
+ Value* inLoopCounter = loopTestForEvenCase->appendNew<Value>(procedure, Phi, Int32, origin);
+ Value* inLoopSquaredInput = loopTestForEvenCase->appendNew<Value>(procedure, Phi, Double, origin);
+ Value* lastCounterBit = loopTestForEvenCase->appendNew<Value>(procedure, BitAnd, origin,
+ inLoopCounter,
+ loopTestForEvenCase->appendNew<Const32Value>(procedure, origin, 1));
+ loopTestForEvenCase->appendNew<Value>(procedure, Branch, origin, lastCounterBit);
+ loopTestForEvenCase->setSuccessors(FrequentedBlock(loopOdd), FrequentedBlock(loopEvenOdd));
+
+ // Counter is odd.
+ Value* inLoopResult = loopOdd->appendNew<Value>(procedure, Phi, Double, origin);
+ Value* updatedResult = loopOdd->appendNew<Value>(procedure, Mul, origin, inLoopResult, inLoopSquaredInput);
+ UpsilonValue* updatedLoopResultUpsilon = loopOdd->appendNew<UpsilonValue>(procedure, origin, updatedResult);
+ initialLoopValue->setPhi(inLoopResult);
+ updatedLoopResultUpsilon->setPhi(inLoopResult);
+ UpsilonValue* updatedLoopResult = loopOdd->appendNew<UpsilonValue>(procedure, origin, updatedResult);
+
+ loopOdd->appendNew<Value>(procedure, Jump, origin);
+ loopOdd->setSuccessors(FrequentedBlock(loopEvenOdd));
+
+ // Even value and following the Odd.
+ Value* squaredInput = loopEvenOdd->appendNew<Value>(procedure, Mul, origin, inLoopSquaredInput, inLoopSquaredInput);
+ UpsilonValue* squaredInputUpsilon = loopEvenOdd->appendNew<UpsilonValue>(procedure, origin, squaredInput);
+ initialSquaredInput->setPhi(inLoopSquaredInput);
+ squaredInputUpsilon->setPhi(inLoopSquaredInput);
+
+ Value* updatedCounter = loopEvenOdd->appendNew<Value>(procedure, ZShr, origin,
+ inLoopCounter,
+ loopEvenOdd->appendNew<Const32Value>(procedure, origin, 1));
+ UpsilonValue* updatedCounterUpsilon = loopEvenOdd->appendNew<UpsilonValue>(procedure, origin, updatedCounter);
+ initialLoopCounter->setPhi(inLoopCounter);
+ updatedCounterUpsilon->setPhi(inLoopCounter);
+
+ loopEvenOdd->appendNew<Value>(procedure, Branch, origin, updatedCounter);
+ loopEvenOdd->setSuccessors(FrequentedBlock(loopTestForEvenCase), FrequentedBlock(continuation));
+
+ // Inline loop.
+ Value* finalResultPhi = continuation->appendNew<Value>(procedure, Phi, Double, origin);
+ powResultUpsilon->setPhi(finalResultPhi);
+ initialResultValue->setPhi(finalResultPhi);
+ updatedLoopResult->setPhi(finalResultPhi);
+ return std::make_pair(continuation, finalResultPhi);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3MathExtras.h b/Source/JavaScriptCore/b3/B3MathExtras.h
new file mode 100644
index 000000000..b6bddea65
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MathExtras.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class Procedure;
+class Value;
+
+// Raise "x" to "y" power.
+// Return a new block continuing the flow and the value representing the result.
+JS_EXPORT_PRIVATE std::pair<BasicBlock*, Value*> powDoubleInt32(Procedure&, BasicBlock*, Origin, Value* x, Value* y);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3MemoryValue.cpp b/Source/JavaScriptCore/b3/B3MemoryValue.cpp
new file mode 100644
index 000000000..3764b7445
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MemoryValue.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3MemoryValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+MemoryValue::~MemoryValue()
+{
+}
+
+size_t MemoryValue::accessByteSize() const
+{
+ switch (opcode()) {
+ case Load8Z:
+ case Load8S:
+ case Store8:
+ return 1;
+ case Load16Z:
+ case Load16S:
+ case Store16:
+ return 2;
+ case Load:
+ return sizeofType(type());
+ case Store:
+ return sizeofType(child(0)->type());
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+}
+
+void MemoryValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ if (m_offset)
+ out.print(comma, "offset = ", m_offset);
+ if ((isLoad() && effects().reads != range())
+ || (isStore() && effects().writes != range()))
+ out.print(comma, "range = ", range());
+}
+
+Value* MemoryValue::cloneImpl() const
+{
+ return new MemoryValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3MemoryValue.h b/Source/JavaScriptCore/b3/B3MemoryValue.h
new file mode 100644
index 000000000..9a0504f98
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MemoryValue.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+// FIXME: We want to allow fenced memory accesses on ARM.
+// https://bugs.webkit.org/show_bug.cgi?id=162349
+
+class JS_EXPORT_PRIVATE MemoryValue : public Value {
+public:
+ static bool accepts(Kind kind)
+ {
+ switch (kind.opcode()) {
+ case Load8Z:
+ case Load8S:
+ case Load16Z:
+ case Load16S:
+ case Load:
+ case Store8:
+ case Store16:
+ case Store:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool isStore(Kind kind)
+ {
+ switch (kind.opcode()) {
+ case Store8:
+ case Store16:
+ case Store:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool isLoad(Kind kind)
+ {
+ return accepts(kind) && !isStore(kind);
+ }
+
+ ~MemoryValue();
+
+ int32_t offset() const { return m_offset; }
+ void setOffset(int32_t offset) { m_offset = offset; }
+
+ const HeapRange& range() const { return m_range; }
+ void setRange(const HeapRange& range) { m_range = range; }
+
+ bool isStore() const { return type() == Void; }
+ bool isLoad() const { return type() != Void; }
+
+ size_t accessByteSize() const;
+
+protected:
+ void dumpMeta(CommaPrinter& comma, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ // Use this form for Load (but not Load8Z, Load8S, or any of the Loads that have a suffix that
+ // describes the returned type).
+ MemoryValue(Kind kind, Type type, Origin origin, Value* pointer, int32_t offset = 0)
+ : Value(CheckedOpcode, kind, type, origin, pointer)
+ , m_offset(offset)
+ , m_range(HeapRange::top())
+ {
+ if (!ASSERT_DISABLED) {
+ switch (kind.opcode()) {
+ case Load:
+ break;
+ case Load8Z:
+ case Load8S:
+ case Load16Z:
+ case Load16S:
+ ASSERT(type == Int32);
+ break;
+ case Store8:
+ case Store16:
+ case Store:
+ ASSERT(type == Void);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+ }
+
+ // Use this form for loads where the return type is implied.
+ MemoryValue(Kind kind, Origin origin, Value* pointer, int32_t offset = 0)
+ : MemoryValue(kind, Int32, origin, pointer, offset)
+ {
+ }
+
+ // Use this form for stores.
+ MemoryValue(Kind kind, Origin origin, Value* value, Value* pointer, int32_t offset = 0)
+ : Value(CheckedOpcode, kind, Void, origin, value, pointer)
+ , m_offset(offset)
+ , m_range(HeapRange::top())
+ {
+ if (!ASSERT_DISABLED) {
+ switch (kind.opcode()) {
+ case Store8:
+ case Store16:
+ case Store:
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+ }
+
+ int32_t m_offset { 0 };
+ HeapRange m_range;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3MoveConstants.cpp b/Source/JavaScriptCore/b3/B3MoveConstants.cpp
new file mode 100644
index 000000000..0d987738e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MoveConstants.cpp
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3MoveConstants.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+#include "B3ValueKeyInlines.h"
+#include <wtf/HashMap.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class MoveConstants {
+public:
+ MoveConstants(Procedure& proc)
+ : m_proc(proc)
+ , m_insertionSet(proc)
+ {
+ }
+
+ void run()
+ {
+ hoistConstants(
+ [&] (const ValueKey& key) -> bool {
+ return key.opcode() == ConstFloat || key.opcode() == ConstDouble;
+ });
+
+ lowerFPConstants();
+
+ hoistConstants(
+ [&] (const ValueKey& key) -> bool {
+ return key.opcode() == Const32 || key.opcode() == Const64 || key.opcode() == ArgumentReg;
+ });
+ }
+
+private:
+ template<typename Filter>
+ void hoistConstants(const Filter& filter)
+ {
+ Dominators& dominators = m_proc.dominators();
+ HashMap<ValueKey, Value*> valueForConstant;
+ IndexMap<BasicBlock, Vector<Value*>> materializations(m_proc.size());
+
+ // We determine where things get materialized based on where they are used.
+ for (BasicBlock* block : m_proc) {
+ for (Value* value : *block) {
+ for (Value*& child : value->children()) {
+ ValueKey key = child->key();
+ if (!filter(key))
+ continue;
+
+ auto result = valueForConstant.add(key, child);
+ if (result.isNewEntry) {
+ // Assume that this block is where we want to materialize the value.
+ child->owner = block;
+ continue;
+ }
+
+ // Make 'value' use the canonical constant rather than the one it was using.
+ child = result.iterator->value;
+
+ // Determine the least common dominator. That's the lowest place in the CFG where
+ // we could materialize the constant while still having only one materialization
+ // in the resulting code.
+ while (!dominators.dominates(child->owner, block))
+ child->owner = dominators.idom(child->owner);
+ }
+ }
+ }
+
+ // Make sure that each basic block knows what to materialize. This also refines the
+ // materialization block based on execution frequency. It finds the minimum block frequency
+ // of all of its dominators, and selects the closest block amongst those that are tied for
+ // lowest frequency.
+ for (auto& entry : valueForConstant) {
+ Value* value = entry.value;
+ for (BasicBlock* block = value->owner; block; block = dominators.idom(block)) {
+ if (block->frequency() < value->owner->frequency())
+ value->owner = block;
+ }
+ materializations[value->owner].append(value);
+ }
+
+ // Get rid of Value's that are fast constants but aren't canonical. Also remove the canonical
+ // ones from the CFG, since we're going to reinsert them elsewhere.
+ for (BasicBlock* block : m_proc) {
+ for (Value*& value : *block) {
+ ValueKey key = value->key();
+ if (!filter(key))
+ continue;
+
+ if (valueForConstant.get(key) == value)
+ value = m_proc.add<Value>(Nop, value->origin());
+ else
+ value->replaceWithNopIgnoringType();
+ }
+ }
+
+ // Now make sure that we move constants to where they are supposed to go. Again, we do this
+ // based on uses.
+ for (BasicBlock* block : m_proc) {
+ for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+ Value* value = block->at(valueIndex);
+
+ // This finds the outermost (best) block last. So, the functor overrides the result
+ // each time it finds something acceptable.
+ auto findBestConstant = [&] (const auto& predicate) -> Value* {
+ Value* result = nullptr;
+ dominators.forAllDominatorsOf(
+ block,
+ [&] (BasicBlock* dominator) {
+ for (Value* value : materializations[dominator]) {
+ if (predicate(value)) {
+ result = value;
+ break;
+ }
+ }
+ });
+ return result;
+ };
+
+ // We call this when we have found a constant that we'd like to use. It's possible that
+ // we have computed that the constant should be meterialized in this block, but we
+ // haven't inserted it yet. This inserts the constant if necessary.
+ auto materialize = [&] (Value* child) {
+ ValueKey key = child->key();
+ if (!filter(key))
+ return;
+
+ // If we encounter a fast constant, then it must be canonical, since we already
+ // got rid of the non-canonical ones.
+ ASSERT(valueForConstant.get(key) == child);
+
+ if (child->owner != block) {
+ // This constant isn't our problem. It's going to be materialized in another
+ // block.
+ return;
+ }
+
+ // We're supposed to materialize this constant in this block, and we haven't
+ // done it yet.
+ m_insertionSet.insertValue(valueIndex, child);
+ child->owner = nullptr;
+ };
+
+ if (MemoryValue* memoryValue = value->as<MemoryValue>()) {
+ Value* pointer = memoryValue->lastChild();
+ if (pointer->hasIntPtr() && filter(pointer->key())) {
+ auto desiredOffset = [&] (Value* otherPointer) -> intptr_t {
+ // We would turn this:
+ //
+ // Load(@p, offset = c)
+ //
+ // into this:
+ //
+ // Load(@q, offset = ?)
+ //
+ // The offset should be c + @p - @q, because then we're loading from:
+ //
+ // @q + c + @p - @q
+ uintptr_t c = static_cast<uintptr_t>(static_cast<intptr_t>(memoryValue->offset()));
+ uintptr_t p = pointer->asIntPtr();
+ uintptr_t q = otherPointer->asIntPtr();
+ return c + p - q;
+ };
+
+ Value* bestPointer = findBestConstant(
+ [&] (Value* candidatePointer) -> bool {
+ if (!candidatePointer->hasIntPtr())
+ return false;
+
+ intptr_t offset = desiredOffset(candidatePointer);
+ if (!B3::isRepresentableAs<int32_t>(static_cast<int64_t>(offset)))
+ return false;
+ return Air::Arg::isValidAddrForm(
+ static_cast<int32_t>(offset),
+ Air::Arg::widthForBytes(memoryValue->accessByteSize()));
+ });
+
+ if (bestPointer) {
+ memoryValue->lastChild() = bestPointer;
+ memoryValue->setOffset(desiredOffset(bestPointer));
+ }
+ }
+ } else {
+ switch (value->opcode()) {
+ case Add:
+ case Sub: {
+ Value* addend = value->child(1);
+ if (!addend->hasInt() || !filter(addend->key()))
+ break;
+ int64_t addendConst = addend->asInt();
+ Value* bestAddend = findBestConstant(
+ [&] (Value* candidateAddend) -> bool {
+ if (candidateAddend->type() != addend->type())
+ return false;
+ if (!candidateAddend->hasInt())
+ return false;
+ return candidateAddend == addend
+ || candidateAddend->asInt() == -addendConst;
+ });
+ if (!bestAddend || bestAddend == addend)
+ break;
+ materialize(value->child(0));
+ materialize(bestAddend);
+ value->replaceWithIdentity(
+ m_insertionSet.insert<Value>(
+ valueIndex, value->opcode() == Add ? Sub : Add, value->origin(),
+ value->child(0), bestAddend));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ for (Value* child : value->children())
+ materialize(child);
+ }
+
+ // We may have some constants that need to be materialized right at the end of this
+ // block.
+ for (Value* value : materializations[block]) {
+ if (!value->owner) {
+ // It's already materialized in this block.
+ continue;
+ }
+
+ m_insertionSet.insertValue(block->size() - 1, value);
+ }
+ m_insertionSet.execute(block);
+ }
+ }
+
+ void lowerFPConstants()
+ {
+ for (Value* value : m_proc.values()) {
+ ValueKey key = value->key();
+ if (goesInTable(key))
+ m_constTable.add(key, m_constTable.size());
+ }
+
+ m_dataSection = static_cast<int64_t*>(m_proc.addDataSection(m_constTable.size() * sizeof(int64_t)));
+ for (auto& entry : m_constTable)
+ m_dataSection[entry.value] = entry.key.value();
+
+ IndexSet<Value> offLimits;
+ for (BasicBlock* block : m_proc) {
+ for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+ StackmapValue* value = block->at(valueIndex)->as<StackmapValue>();
+ if (!value)
+ continue;
+
+ for (unsigned childIndex = 0; childIndex < value->numChildren(); ++childIndex) {
+ if (!value->constrainedChild(childIndex).rep().isAny())
+ continue;
+
+ Value*& child = value->child(childIndex);
+ ValueKey key = child->key();
+ if (!goesInTable(key))
+ continue;
+
+ child = m_insertionSet.insertValue(
+ valueIndex, key.materialize(m_proc, value->origin()));
+ offLimits.add(child);
+ }
+ }
+
+ m_insertionSet.execute(block);
+ }
+
+ for (BasicBlock* block : m_proc) {
+ for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+ Value* value = block->at(valueIndex);
+ ValueKey key = value->key();
+ if (!goesInTable(key))
+ continue;
+ if (offLimits.contains(value))
+ continue;
+
+ Value* tableBase = m_insertionSet.insertIntConstant(
+ valueIndex, value->origin(), pointerType(),
+ bitwise_cast<intptr_t>(m_dataSection));
+ Value* result = m_insertionSet.insert<MemoryValue>(
+ valueIndex, Load, value->type(), value->origin(), tableBase,
+ sizeof(int64_t) * m_constTable.get(key));
+ value->replaceWithIdentity(result);
+ }
+
+ m_insertionSet.execute(block);
+ }
+ }
+
+ bool goesInTable(const ValueKey& key)
+ {
+ return (key.opcode() == ConstDouble && key != doubleZero())
+ || (key.opcode() == ConstFloat && key != floatZero());
+ }
+
+ static ValueKey doubleZero()
+ {
+ return ValueKey(ConstDouble, Double, 0.0);
+ }
+
+ static ValueKey floatZero()
+ {
+ return ValueKey(ConstFloat, Double, 0.0);
+ }
+
+ Procedure& m_proc;
+ Vector<Value*> m_toRemove;
+ HashMap<ValueKey, unsigned> m_constTable;
+ int64_t* m_dataSection;
+ HashMap<ValueKey, Value*> m_constants;
+ InsertionSet m_insertionSet;
+};
+
+} // anonymous namespace
+
+void moveConstants(Procedure& proc)
+{
+ PhaseScope phaseScope(proc, "moveConstants");
+ MoveConstants moveConstants(proc);
+ moveConstants.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3MoveConstants.h b/Source/JavaScriptCore/b3/B3MoveConstants.h
new file mode 100644
index 000000000..b9f92ffe1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3MoveConstants.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Moves large constants around, with the goal of placing them in the optimal points in the program.
+
+JS_EXPORT_PRIVATE void moveConstants(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3OpaqueByproduct.h b/Source/JavaScriptCore/b3/B3OpaqueByproduct.h
new file mode 100644
index 000000000..35a2a06a4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3OpaqueByproduct.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+class OpaqueByproduct {
+ WTF_MAKE_NONCOPYABLE(OpaqueByproduct);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ OpaqueByproduct() { }
+ virtual ~OpaqueByproduct() { }
+
+ virtual void dump(PrintStream&) const = 0;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3OpaqueByproducts.cpp b/Source/JavaScriptCore/b3/B3OpaqueByproducts.cpp
new file mode 100644
index 000000000..f89f8bfed
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3OpaqueByproducts.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3OpaqueByproducts.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+OpaqueByproducts::OpaqueByproducts()
+{
+}
+
+OpaqueByproducts::~OpaqueByproducts()
+{
+}
+
+void OpaqueByproducts::add(std::unique_ptr<OpaqueByproduct> byproduct)
+{
+ m_byproducts.append(WTFMove(byproduct));
+}
+
+void OpaqueByproducts::dump(PrintStream& out) const
+{
+ out.print("Byproducts:\n");
+ if (m_byproducts.isEmpty()) {
+ out.print(" <empty>\n");
+ return;
+ }
+ for (auto& byproduct : m_byproducts)
+ out.print(" ", *byproduct, "\n");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3OpaqueByproducts.h b/Source/JavaScriptCore/b3/B3OpaqueByproducts.h
new file mode 100644
index 000000000..e8eec113f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3OpaqueByproducts.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproduct.h"
+#include <memory>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class OpaqueByproducts {
+ WTF_MAKE_NONCOPYABLE(OpaqueByproducts)
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ OpaqueByproducts();
+ JS_EXPORT_PRIVATE ~OpaqueByproducts();
+
+ size_t count() const { return m_byproducts.size(); }
+
+ void add(std::unique_ptr<OpaqueByproduct>);
+
+ void dump(PrintStream&) const;
+
+private:
+ Vector<std::unique_ptr<OpaqueByproduct>> m_byproducts;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Opcode.cpp b/Source/JavaScriptCore/b3/B3Opcode.cpp
new file mode 100644
index 000000000..a0aa5a990
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Opcode.cpp
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Opcode.h"
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+std::optional<Opcode> invertedCompare(Opcode opcode, Type type)
+{
+ switch (opcode) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case LessThan:
+ if (isInt(type))
+ return GreaterEqual;
+ return std::nullopt;
+ case GreaterThan:
+ if (isInt(type))
+ return LessEqual;
+ return std::nullopt;
+ case LessEqual:
+ if (isInt(type))
+ return GreaterThan;
+ return std::nullopt;
+ case GreaterEqual:
+ if (isInt(type))
+ return LessThan;
+ return std::nullopt;
+ case Above:
+ return BelowEqual;
+ case Below:
+ return AboveEqual;
+ case AboveEqual:
+ return Below;
+ case BelowEqual:
+ return Above;
+ default:
+ return std::nullopt;
+ }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, Opcode opcode)
+{
+ switch (opcode) {
+ case Nop:
+ out.print("Nop");
+ return;
+ case Identity:
+ out.print("Identity");
+ return;
+ case Const32:
+ out.print("Const32");
+ return;
+ case Const64:
+ out.print("Const64");
+ return;
+ case ConstDouble:
+ out.print("ConstDouble");
+ return;
+ case ConstFloat:
+ out.print("ConstFloat");
+ return;
+ case Get:
+ out.print("Get");
+ return;
+ case Set:
+ out.print("Set");
+ return;
+ case SlotBase:
+ out.print("SlotBase");
+ return;
+ case ArgumentReg:
+ out.print("ArgumentReg");
+ return;
+ case FramePointer:
+ out.print("FramePointer");
+ return;
+ case Add:
+ out.print("Add");
+ return;
+ case Sub:
+ out.print("Sub");
+ return;
+ case Mul:
+ out.print("Mul");
+ return;
+ case Div:
+ out.print("Div");
+ return;
+ case UDiv:
+ out.print("UDiv");
+ return;
+ case Mod:
+ out.print("Mod");
+ return;
+ case UMod:
+ out.print("UMod");
+ return;
+ case Neg:
+ out.print("Neg");
+ return;
+ case BitAnd:
+ out.print("BitAnd");
+ return;
+ case BitOr:
+ out.print("BitOr");
+ return;
+ case BitXor:
+ out.print("BitXor");
+ return;
+ case Shl:
+ out.print("Shl");
+ return;
+ case SShr:
+ out.print("SShr");
+ return;
+ case ZShr:
+ out.print("ZShr");
+ return;
+ case RotR:
+ out.print("RotR");
+ return;
+ case RotL:
+ out.print("RotL");
+ return;
+ case Clz:
+ out.print("Clz");
+ return;
+ case Abs:
+ out.print("Abs");
+ return;
+ case Ceil:
+ out.print("Ceil");
+ return;
+ case Floor:
+ out.print("Floor");
+ return;
+ case Sqrt:
+ out.print("Sqrt");
+ return;
+ case BitwiseCast:
+ out.print("BitwiseCast");
+ return;
+ case SExt8:
+ out.print("SExt8");
+ return;
+ case SExt16:
+ out.print("SExt16");
+ return;
+ case SExt32:
+ out.print("SExt32");
+ return;
+ case ZExt32:
+ out.print("ZExt32");
+ return;
+ case Trunc:
+ out.print("Trunc");
+ return;
+ case IToD:
+ out.print("IToD");
+ return;
+ case IToF:
+ out.print("IToF");
+ return;
+ case FloatToDouble:
+ out.print("FloatToDouble");
+ return;
+ case DoubleToFloat:
+ out.print("DoubleToFloat");
+ return;
+ case Equal:
+ out.print("Equal");
+ return;
+ case NotEqual:
+ out.print("NotEqual");
+ return;
+ case LessThan:
+ out.print("LessThan");
+ return;
+ case GreaterThan:
+ out.print("GreaterThan");
+ return;
+ case LessEqual:
+ out.print("LessEqual");
+ return;
+ case GreaterEqual:
+ out.print("GreaterEqual");
+ return;
+ case Above:
+ out.print("Above");
+ return;
+ case Below:
+ out.print("Below");
+ return;
+ case AboveEqual:
+ out.print("AboveEqual");
+ return;
+ case BelowEqual:
+ out.print("BelowEqual");
+ return;
+ case EqualOrUnordered:
+ out.print("EqualOrUnordered");
+ return;
+ case Select:
+ out.print("Select");
+ return;
+ case Load8Z:
+ out.print("Load8Z");
+ return;
+ case Load8S:
+ out.print("Load8S");
+ return;
+ case Load16Z:
+ out.print("Load16Z");
+ return;
+ case Load16S:
+ out.print("Load16S");
+ return;
+ case Load:
+ out.print("Load");
+ return;
+ case Store8:
+ out.print("Store8");
+ return;
+ case Store16:
+ out.print("Store16");
+ return;
+ case Store:
+ out.print("Store");
+ return;
+ case WasmAddress:
+ out.print("WasmAddress");
+ return;
+ case Fence:
+ out.print("Fence");
+ return;
+ case CCall:
+ out.print("CCall");
+ return;
+ case Patchpoint:
+ out.print("Patchpoint");
+ return;
+ case CheckAdd:
+ out.print("CheckAdd");
+ return;
+ case CheckSub:
+ out.print("CheckSub");
+ return;
+ case CheckMul:
+ out.print("CheckMul");
+ return;
+ case Check:
+ out.print("Check");
+ return;
+ case WasmBoundsCheck:
+ out.print("WasmBoundsCheck");
+ return;
+ case Upsilon:
+ out.print("Upsilon");
+ return;
+ case Phi:
+ out.print("Phi");
+ return;
+ case Jump:
+ out.print("Jump");
+ return;
+ case Branch:
+ out.print("Branch");
+ return;
+ case Switch:
+ out.print("Switch");
+ return;
+ case EntrySwitch:
+ out.print("EntrySwitch");
+ return;
+ case Return:
+ out.print("Return");
+ return;
+ case Oops:
+ out.print("Oops");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Opcode.h b/Source/JavaScriptCore/b3/B3Opcode.h
new file mode 100644
index 000000000..956dba99a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Opcode.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Type.h"
+#include <wtf/Optional.h>
+#include <wtf/StdLibExtras.h>
+
+namespace JSC { namespace B3 {
+
+// Warning: In B3, an Opcode is just one part of a Kind. Kind is used the way that an opcode
+// would be used in simple IRs. See B3Kind.h.
+
+enum Opcode : int16_t {
+ // A no-op that returns Void, useful for when you want to remove a value.
+ Nop,
+
+ // Polymorphic identity, usable with any value type.
+ Identity,
+
+ // Constants. Use the ConstValue* classes. Constants exist in the control flow, so that we can
+ // reason about where we would construct them. Large constants are expensive to create.
+ Const32,
+ Const64,
+ ConstDouble,
+ ConstFloat,
+
+ // B3 supports non-SSA variables. These are accessed using Get and Set opcodes. Use the
+ // VariableValue class. It's a good idea to run fixSSA() to turn these into SSA. The
+ // optimizer will do that eventually, but if your input tends to use these opcodes, you
+ // should run fixSSA() directly before launching the optimizer.
+ Set,
+ Get,
+
+ // Gets the base address of a StackSlot.
+ SlotBase,
+
+ // The magical argument register. This is viewed as executing at the top of the program
+ // regardless of where in control flow you put it, and the compiler takes care to ensure that we
+ // don't clobber the value by register allocation or calls (either by saving the argument to the
+ // stack or preserving it in a callee-save register). Use the ArgumentRegValue class. The return
+ // type is either pointer() (for GPRs) or Double (for FPRs).
+ ArgumentReg,
+
+ // The frame pointer. You can put this anywhere in control flow but it will always yield the
+ // frame pointer, with a caveat: if our compiler changes the frame pointer temporarily for some
+ // silly reason, the FramePointer intrinsic will return where the frame pointer *should* be not
+ // where it happens to be right now.
+ FramePointer,
+
+ // Polymorphic math, usable with any value type.
+ Add,
+ Sub,
+ Mul,
+ Div, // All bets are off as to what will happen when you execute this for -2^31/-1 and x/0.
+ UDiv,
+ Mod, // All bets are off as to what will happen when you execute this for -2^31%-1 and x%0.
+ UMod,
+
+
+ // Polymorphic negation. Note that we only need this for floating point, since integer negation
+ // is exactly like Sub(0, x). But that's not true for floating point. Sub(0, 0) is 0, while
+ // Neg(0) is -0. Also, we canonicalize Sub(0, x) into Neg(x) in case of integers.
+ Neg,
+
+ // Integer math.
+ BitAnd,
+ BitOr,
+ BitXor,
+ Shl,
+ SShr, // Arithmetic Shift.
+ ZShr, // Logical Shift.
+ RotR, // Rotate Right.
+ RotL, // Rotate Left.
+ Clz, // Count leading zeros.
+
+ // Floating point math.
+ Abs,
+ Ceil,
+ Floor,
+ Sqrt,
+
+ // Casts and such.
+ // Bitwise Cast of Double->Int64 or Int64->Double
+ BitwiseCast,
+ // Takes and returns Int32:
+ SExt8,
+ SExt16,
+ // Takes Int32 and returns Int64:
+ SExt32,
+ ZExt32,
+ // Does a bitwise truncation of Int64->Int32 and Double->Float:
+ Trunc,
+ // Takes ints and returns floating point value. Note that we don't currently provide the opposite operation,
+ // because double-to-int conversions have weirdly different semantics on different platforms. Use
+ // a patchpoint if you need to do that.
+ IToD,
+ IToF,
+ // Convert between double and float.
+ FloatToDouble,
+ DoubleToFloat,
+
+ // Polymorphic comparisons, usable with any value type. Returns int32 0 or 1. Note that "Not"
+ // is just Equal(x, 0), and "ToBoolean" is just NotEqual(x, 0).
+ Equal,
+ NotEqual,
+ LessThan,
+ GreaterThan,
+ LessEqual,
+ GreaterEqual,
+
+ // Integer comparisons. Returns int32 0 or 1.
+ Above,
+ Below,
+ AboveEqual,
+ BelowEqual,
+
+ // Unordered floating point compare: values are equal or either one is NaN.
+ EqualOrUnordered,
+
+ // SSA form of conditional move. The first child is evaluated for truthiness. If true, the second child
+ // is returned. Otherwise, the third child is returned.
+ Select,
+
+ // Memory loads. Opcode indicates how we load and the loaded type. These use MemoryValue.
+ // These return Int32:
+ Load8Z,
+ Load8S,
+ Load16Z,
+ Load16S,
+ // This returns whatever the return type is:
+ Load,
+
+ // Memory stores. Opcode indicates how the value is stored. These use MemoryValue.
+ // These take an Int32 value:
+ Store8,
+ Store16,
+ // This is a polymorphic store for Int32, Int64, Float, and Double.
+ Store,
+
+ // This is used to compute the actual address of a Wasm memory operation. It takes an IntPtr
+ // and a pinned register then computes the appropriate IntPtr address. For the use-case of
+ // Wasm it is important that the first child initially be a ZExt32 so the top bits are cleared.
+ // We do WasmAddress(ZExt32(ptr), ...) so that we can avoid generating extraneous moves in Air.
+ WasmAddress,
+
+ // This is used to represent standalone fences - i.e. fences that are not part of other
+ // instructions. It's expressive enough to expose mfence on x86 and dmb ish/ishst on ARM. On
+ // x86, it also acts as a compiler store-store fence in those cases where it would have been a
+ // dmb ishst on ARM.
+ Fence,
+
+ // This is a regular ordinary C function call, using the system C calling convention. Make sure
+ // that the arguments are passed using the right types. The first argument is the callee.
+ CCall,
+
+ // This is a patchpoint. Use the PatchpointValue class. This is viewed as behaving like a call,
+ // but only emits code via a code generation callback. That callback gets to emit code inline.
+ // You can pass a stackmap along with constraints on how each stackmap argument must be passed.
+ // It's legal to request that a stackmap argument is in some register and it's legal to request
+ // that a stackmap argument is at some offset from the top of the argument passing area on the
+ // stack.
+ Patchpoint,
+
+ // Checked math. Use the CheckValue class. Like a Patchpoint, this takes a code generation
+ // callback. That callback gets to emit some code after the epilogue, and gets to link the jump
+ // from the check, and the choice of registers. You also get to supply a stackmap. Note that you
+ // are not allowed to jump back into the mainline code from your slow path, since the compiler
+ // will assume that the execution of these instructions proves that overflow didn't happen. For
+ // example, if you have two CheckAdd's:
+ //
+ // a = CheckAdd(x, y)
+ // b = CheckAdd(x, y)
+ //
+ // Then it's valid to change this to:
+ //
+ // a = CheckAdd(x, y)
+ // b = Identity(a)
+ //
+ // This is valid regardless of the callbacks used by the two CheckAdds. They may have different
+ // callbacks. Yet, this transformation is valid even if they are different because we know that
+ // after the first CheckAdd executes, the second CheckAdd could not have possibly taken slow
+ // path. Therefore, the second CheckAdd's callback is irrelevant.
+ //
+ // Note that the first two children of these operations have ValueRep's as input constraints but do
+ // not have output constraints.
+ CheckAdd,
+ CheckSub,
+ CheckMul,
+
+ // Check that side-exits. Use the CheckValue class. Like CheckAdd and friends, this has a
+ // stackmap with a generation callback. This takes an int argument that this branches on, with
+ // full branch fusion in the instruction selector. A true value jumps to the generator's slow
+ // path. Note that the predicate child is has both an input ValueRep. The input constraint must be
+ // WarmAny. It will not have an output constraint.
+ Check,
+
+ // Special Wasm opcode that takes a Int32, a special pinned gpr and an offset. This node exists
+ // to allow us to CSE WasmBoundsChecks if both use the same pointer and one dominates the other.
+ // Without some such node B3 would not have enough information about the inner workings of wasm
+ // to be able to perform such optimizations.
+ WasmBoundsCheck,
+
+ // SSA support, in the style of DFG SSA.
+ Upsilon, // This uses the UpsilonValue class.
+ Phi,
+
+ // Jump.
+ Jump,
+
+ // Polymorphic branch, usable with any integer type. Branches if not equal to zero. The 0-index
+ // successor is the true successor.
+ Branch,
+
+ // Switch. Switches over either Int32 or Int64. Uses the SwitchValue class.
+ Switch,
+
+ // Multiple entrypoints are supported via the EntrySwitch operation. Place this in the root
+ // block and list the entrypoints as the successors. All blocks backwards-reachable from
+ // EntrySwitch are duplicated for each entrypoint.
+ EntrySwitch,
+
+ // Return. Note that B3 procedures don't know their return type, so this can just return any
+ // type.
+ Return,
+
+ // This is a terminal that indicates that we will never get here.
+ Oops
+};
+
+inline bool isCheckMath(Opcode opcode)
+{
+ switch (opcode) {
+ case CheckAdd:
+ case CheckSub:
+ case CheckMul:
+ return true;
+ default:
+ return false;
+ }
+}
+
+std::optional<Opcode> invertedCompare(Opcode, Type);
+
+inline Opcode constPtrOpcode()
+{
+ if (is64Bit())
+ return Const64;
+ return Const32;
+}
+
+inline bool isConstant(Opcode opcode)
+{
+ switch (opcode) {
+ case Const32:
+ case Const64:
+ case ConstDouble:
+ case ConstFloat:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isDefinitelyTerminal(Opcode opcode)
+{
+ switch (opcode) {
+ case Jump:
+ case Branch:
+ case Switch:
+ case Oops:
+ case Return:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Opcode);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Origin.cpp b/Source/JavaScriptCore/b3/B3Origin.cpp
new file mode 100644
index 000000000..8baf012ea
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Origin.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Origin.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+void Origin::dump(PrintStream& out) const
+{
+ out.print("Origin(", RawPointer(m_data), ")");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Origin.h b/Source/JavaScriptCore/b3/B3Origin.h
new file mode 100644
index 000000000..47fd10fd9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Origin.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+// Whoever generates B3IR can choose to put origins on values. When you do this, B3 will be able to
+// account, down to the machine code, which instruction corresponds to which origin. B3
+// transformations must preserve Origins carefully. It's an error to write a transformation that
+// either drops Origins or lies about them.
+class Origin {
+public:
+ explicit Origin(const void* data = nullptr)
+ : m_data(data)
+ {
+ }
+
+ explicit operator bool() const { return !!m_data; }
+
+ const void* data() const { return m_data; }
+
+ bool operator==(const Origin& other) const { return m_data == other.m_data; }
+
+ // You should avoid using this. Use OriginDump instead.
+ void dump(PrintStream&) const;
+
+private:
+ const void* m_data;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3OriginDump.cpp b/Source/JavaScriptCore/b3/B3OriginDump.cpp
new file mode 100644
index 000000000..da7afeeb8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3OriginDump.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3OriginDump.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 {
+
+void OriginDump::dump(PrintStream& out) const
+{
+ if (m_proc)
+ m_proc->printOrigin(out, m_origin);
+ else
+ out.print(m_origin);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3OriginDump.h b/Source/JavaScriptCore/b3/B3OriginDump.h
new file mode 100644
index 000000000..5392ac911
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3OriginDump.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class OriginDump {
+public:
+ OriginDump(const Procedure* proc, Origin origin)
+ : m_proc(proc)
+ , m_origin(origin)
+ {
+ }
+
+ void dump(PrintStream& out) const;
+
+private:
+ const Procedure* m_proc;
+ Origin m_origin;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PCToOriginMap.h b/Source/JavaScriptCore/b3/B3PCToOriginMap.h
new file mode 100644
index 000000000..5e6ce451d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PCToOriginMap.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+#include "MacroAssembler.h"
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class PCToOriginMap {
+ WTF_MAKE_NONCOPYABLE(PCToOriginMap);
+public:
+ PCToOriginMap()
+ { }
+
+ PCToOriginMap(PCToOriginMap&& other)
+ : m_ranges(WTFMove(other.m_ranges))
+ { }
+
+ struct OriginRange {
+ MacroAssembler::Label label;
+ Origin origin;
+ };
+
+ void appendItem(MacroAssembler::Label label, Origin origin)
+ {
+ if (m_ranges.size()) {
+ if (m_ranges.last().label == label)
+ return;
+ }
+
+ m_ranges.append(OriginRange{label, origin});
+ }
+
+ const Vector<OriginRange>& ranges() const { return m_ranges; }
+
+private:
+ Vector<OriginRange> m_ranges;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PatchpointSpecial.cpp b/Source/JavaScriptCore/b3/B3PatchpointSpecial.cpp
new file mode 100644
index 000000000..c5fc5885d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PatchpointSpecial.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3PatchpointSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirGenerationContext.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+PatchpointSpecial::PatchpointSpecial()
+{
+}
+
+PatchpointSpecial::~PatchpointSpecial()
+{
+}
+
+void PatchpointSpecial::forEachArg(Inst& inst, const ScopedLambda<Inst::EachArgCallback>& callback)
+{
+ PatchpointValue* patchpoint = inst.origin->as<PatchpointValue>();
+ unsigned argIndex = 1;
+
+ if (patchpoint->type() != Void) {
+ Arg::Role role;
+ if (patchpoint->resultConstraint.kind() == ValueRep::SomeEarlyRegister)
+ role = Arg::EarlyDef;
+ else
+ role = Arg::Def;
+
+ callback(inst.args[argIndex++], role, inst.origin->airType(), inst.origin->airWidth());
+ }
+
+ forEachArgImpl(0, argIndex, inst, SameAsRep, std::nullopt, callback);
+ argIndex += inst.origin->numChildren();
+
+ for (unsigned i = patchpoint->numGPScratchRegisters; i--;)
+ callback(inst.args[argIndex++], Arg::Scratch, Arg::GP, Arg::conservativeWidth(Arg::GP));
+ for (unsigned i = patchpoint->numFPScratchRegisters; i--;)
+ callback(inst.args[argIndex++], Arg::Scratch, Arg::FP, Arg::conservativeWidth(Arg::FP));
+}
+
+bool PatchpointSpecial::isValid(Inst& inst)
+{
+ PatchpointValue* patchpoint = inst.origin->as<PatchpointValue>();
+ unsigned argIndex = 1;
+
+ if (inst.origin->type() != Void) {
+ if (argIndex >= inst.args.size())
+ return false;
+
+ if (!isArgValidForValue(inst.args[argIndex], patchpoint))
+ return false;
+ if (!isArgValidForRep(code(), inst.args[argIndex], patchpoint->resultConstraint))
+ return false;
+ argIndex++;
+ }
+
+ if (!isValidImpl(0, argIndex, inst))
+ return false;
+ argIndex += patchpoint->numChildren();
+
+ if (argIndex + patchpoint->numGPScratchRegisters + patchpoint->numFPScratchRegisters
+ != inst.args.size())
+ return false;
+
+ for (unsigned i = patchpoint->numGPScratchRegisters; i--;) {
+ Arg arg = inst.args[argIndex++];
+ if (!arg.isGPTmp())
+ return false;
+ }
+ for (unsigned i = patchpoint->numFPScratchRegisters; i--;) {
+ Arg arg = inst.args[argIndex++];
+ if (!arg.isFPTmp())
+ return false;
+ }
+
+ return true;
+}
+
+bool PatchpointSpecial::admitsStack(Inst& inst, unsigned argIndex)
+{
+ if (inst.origin->type() == Void)
+ return admitsStackImpl(0, 1, inst, argIndex);
+
+ if (argIndex == 1) {
+ switch (inst.origin->as<PatchpointValue>()->resultConstraint.kind()) {
+ case ValueRep::WarmAny:
+ case ValueRep::StackArgument:
+ return true;
+ case ValueRep::SomeRegister:
+ case ValueRep::SomeEarlyRegister:
+ case ValueRep::Register:
+ case ValueRep::LateRegister:
+ return false;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+ }
+
+ return admitsStackImpl(0, 2, inst, argIndex);
+}
+
+CCallHelpers::Jump PatchpointSpecial::generate(
+ Inst& inst, CCallHelpers& jit, GenerationContext& context)
+{
+ PatchpointValue* value = inst.origin->as<PatchpointValue>();
+ ASSERT(value);
+
+ Vector<ValueRep> reps;
+ unsigned offset = 1;
+ if (inst.origin->type() != Void)
+ reps.append(repForArg(*context.code, inst.args[offset++]));
+ reps.appendVector(repsImpl(context, 0, offset, inst));
+ offset += value->numChildren();
+
+ StackmapGenerationParams params(value, reps, context);
+
+ for (unsigned i = value->numGPScratchRegisters; i--;)
+ params.m_gpScratch.append(inst.args[offset++].gpr());
+ for (unsigned i = value->numFPScratchRegisters; i--;)
+ params.m_fpScratch.append(inst.args[offset++].fpr());
+
+ value->m_generator->run(jit, params);
+
+ return CCallHelpers::Jump();
+}
+
+bool PatchpointSpecial::isTerminal(Inst& inst)
+{
+ return inst.origin->as<PatchpointValue>()->effects.terminal;
+}
+
+void PatchpointSpecial::dumpImpl(PrintStream& out) const
+{
+ out.print("Patchpoint");
+}
+
+void PatchpointSpecial::deepDumpImpl(PrintStream& out) const
+{
+ out.print("Lowered B3::PatchpointValue.");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PatchpointSpecial.h b/Source/JavaScriptCore/b3/B3PatchpointSpecial.h
new file mode 100644
index 000000000..4e1b2a319
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PatchpointSpecial.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackmapSpecial.h"
+
+namespace JSC { namespace B3 {
+
+// This is a special that recognizes that there are two uses of Patchpoint: Void and and non-Void.
+// In the Void case, the syntax of the Air Patch instruction is:
+//
+// Patch &patchpoint, args...
+//
+// Where "args..." are the lowered arguments to the Patchpoint instruction. In the non-Void case
+// we will have:
+//
+// Patch &patchpoint, result, args...
+
+class PatchpointSpecial : public StackmapSpecial {
+public:
+ PatchpointSpecial();
+ virtual ~PatchpointSpecial();
+
+protected:
+ void forEachArg(Air::Inst&, const ScopedLambda<Air::Inst::EachArgCallback>&) override;
+ bool isValid(Air::Inst&) override;
+ bool admitsStack(Air::Inst&, unsigned argIndex) override;
+
+ // NOTE: the generate method will generate the hidden branch and then register a LatePath that
+ // generates the stackmap. Super crazy dude!
+
+ CCallHelpers::Jump generate(Air::Inst&, CCallHelpers&, Air::GenerationContext&) override;
+
+ bool isTerminal(Air::Inst&) override;
+
+ void dumpImpl(PrintStream&) const override;
+ void deepDumpImpl(PrintStream&) const override;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PatchpointValue.cpp b/Source/JavaScriptCore/b3/B3PatchpointValue.cpp
new file mode 100644
index 000000000..b33c558bf
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PatchpointValue.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3PatchpointValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+PatchpointValue::~PatchpointValue()
+{
+}
+
+void PatchpointValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ Base::dumpMeta(comma, out);
+ out.print(comma, "resultConstraint = ", resultConstraint);
+ if (numGPScratchRegisters)
+ out.print(comma, "numGPScratchRegisters = ", numGPScratchRegisters);
+ if (numFPScratchRegisters)
+ out.print(comma, "numFPScratchRegisters = ", numFPScratchRegisters);
+}
+
+Value* PatchpointValue::cloneImpl() const
+{
+ return new PatchpointValue(*this);
+}
+
+PatchpointValue::PatchpointValue(Type type, Origin origin)
+ : Base(CheckedOpcode, Patchpoint, type, origin)
+ , effects(Effects::forCall())
+ , resultConstraint(type == Void ? ValueRep::WarmAny : ValueRep::SomeRegister)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PatchpointValue.h b/Source/JavaScriptCore/b3/B3PatchpointValue.h
new file mode 100644
index 000000000..3378dc410
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PatchpointValue.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Effects.h"
+#include "B3StackmapValue.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class PatchpointValue : public StackmapValue {
+public:
+ typedef StackmapValue Base;
+
+ static bool accepts(Kind kind) { return kind == Patchpoint; }
+
+ ~PatchpointValue();
+
+ // The effects of the patchpoint. This defaults to Effects::forCall(), but you can set it to anything.
+ //
+ // If there are no effects, B3 is free to assume any use of this PatchpointValue can be replaced with
+ // a use of a different PatchpointValue, so long as the other one also has no effects and has the
+ // same children. Note that this comparison ignores child constraints, the result constraint, and all
+ // other StackmapValue meta-data. If there are read effects but not write effects, then this same sort
+ // of substitution could be made so long as there are no interfering writes.
+ Effects effects;
+
+ // The input representation (i.e. constraint) of the return value. This defaults to WarmAny if the
+ // type is Void and it defaults to SomeRegister otherwise. It's illegal to mess with this if the type
+ // is Void. Otherwise you can set this to any input constraint.
+ ValueRep resultConstraint;
+
+ // The number of scratch registers that this patchpoint gets. The scratch register is guaranteed
+ // to be different from any input register and the destination register. It's also guaranteed not
+ // to be clobbered either early or late. These are 0 by default.
+ uint8_t numGPScratchRegisters { 0 };
+ uint8_t numFPScratchRegisters { 0 };
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ JS_EXPORT_PRIVATE PatchpointValue(Type, Origin);
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PhaseScope.cpp b/Source/JavaScriptCore/b3/B3PhaseScope.cpp
new file mode 100644
index 000000000..27b22de21
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PhaseScope.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3PhaseScope.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Common.h"
+#include "B3Procedure.h"
+#include "B3Validate.h"
+#include <wtf/DataLog.h>
+#include <wtf/StringPrintStream.h>
+
+namespace JSC { namespace B3 {
+
+PhaseScope::PhaseScope(Procedure& procedure, const char* name)
+ : m_procedure(procedure)
+ , m_name(name)
+ , m_timingScope(name)
+{
+ if (shouldDumpIRAtEachPhase(B3Mode)) {
+ dataLog("B3 after ", procedure.lastPhaseName(), ", before ", name, ":\n");
+ dataLog(procedure);
+ }
+
+ if (shouldSaveIRBeforePhase())
+ m_dumpBefore = toCString(procedure);
+}
+
+PhaseScope::~PhaseScope()
+{
+ m_procedure.setLastPhaseName(m_name);
+ if (shouldValidateIRAtEachPhase())
+ validate(m_procedure, m_dumpBefore.data());
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PhaseScope.h b/Source/JavaScriptCore/b3/B3PhaseScope.h
new file mode 100644
index 000000000..a17698848
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PhaseScope.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3TimingScope.h"
+#include <wtf/Noncopyable.h>
+#include <wtf/text/CString.h>
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class PhaseScope {
+ WTF_MAKE_NONCOPYABLE(PhaseScope);
+public:
+ PhaseScope(Procedure&, const char* name);
+ ~PhaseScope(); // this does validation
+
+private:
+ Procedure& m_procedure;
+ const char* m_name;
+ TimingScope m_timingScope;
+ CString m_dumpBefore;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PhiChildren.cpp b/Source/JavaScriptCore/b3/B3PhiChildren.cpp
new file mode 100644
index 000000000..3b9b4e244
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PhiChildren.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3PhiChildren.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+PhiChildren::PhiChildren(Procedure& proc)
+ : m_upsilons(proc.values().size())
+{
+ for (Value* value : proc.values()) {
+ if (UpsilonValue* upsilon = value->as<UpsilonValue>()) {
+ Value* phi = upsilon->phi();
+ Vector<UpsilonValue*>& vector = m_upsilons[phi];
+ if (vector.isEmpty())
+ m_phis.append(phi);
+ vector.append(upsilon);
+ }
+ }
+}
+
+PhiChildren::~PhiChildren()
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3PhiChildren.h b/Source/JavaScriptCore/b3/B3PhiChildren.h
new file mode 100644
index 000000000..22b827730
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PhiChildren.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+#include "B3UpsilonValue.h"
+#include <wtf/GraphNodeWorklist.h>
+#include <wtf/IndexMap.h>
+
+namespace JSC { namespace B3 {
+
+class PhiChildren {
+public:
+ PhiChildren(Procedure&);
+ ~PhiChildren();
+
+ class ValueCollection {
+ public:
+ ValueCollection(Vector<UpsilonValue*>* values = nullptr)
+ : m_values(values)
+ {
+ }
+
+ unsigned size() const { return m_values->size(); }
+ Value* at(unsigned index) const { return m_values->at(index)->child(0); }
+ Value* operator[](unsigned index) const { return at(index); }
+
+ bool contains(Value* value) const
+ {
+ for (unsigned i = size(); i--;) {
+ if (at(i) == value)
+ return true;
+ }
+ return false;
+ }
+
+ class iterator {
+ public:
+ iterator(Vector<UpsilonValue*>* values = nullptr, unsigned index = 0)
+ : m_values(values)
+ , m_index(index)
+ {
+ }
+
+ Value* operator*() const
+ {
+ return m_values->at(m_index)->child(0);
+ }
+
+ iterator& operator++()
+ {
+ m_index++;
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ ASSERT(m_values == other.m_values);
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ Vector<UpsilonValue*>* m_values;
+ unsigned m_index;
+ };
+
+ iterator begin() const { return iterator(m_values); }
+ iterator end() const { return iterator(m_values, m_values->size()); }
+
+ private:
+ Vector<UpsilonValue*>* m_values;
+ };
+
+ class UpsilonCollection {
+ public:
+ UpsilonCollection()
+ {
+ }
+
+ UpsilonCollection(PhiChildren* phiChildren, Value* value, Vector<UpsilonValue*>* values)
+ : m_phiChildren(phiChildren)
+ , m_value(value)
+ , m_values(values)
+ {
+ }
+
+ unsigned size() const { return m_values->size(); }
+ Value* at(unsigned index) const { return m_values->at(index); }
+ Value* operator[](unsigned index) const { return at(index); }
+
+ bool contains(Value* value) const { return m_values->contains(value); }
+
+ typedef Vector<UpsilonValue*>::const_iterator iterator;
+ Vector<UpsilonValue*>::const_iterator begin() const { return m_values->begin(); }
+ Vector<UpsilonValue*>::const_iterator end() const { return m_values->end(); }
+
+ ValueCollection values() { return ValueCollection(m_values); }
+
+ template<typename Functor>
+ void forAllTransitiveIncomingValues(const Functor& functor)
+ {
+ if (m_value->opcode() != Phi) {
+ functor(m_value);
+ return;
+ }
+
+ GraphNodeWorklist<Value*> worklist;
+ worklist.push(m_value);
+ while (Value* phi = worklist.pop()) {
+ for (Value* child : m_phiChildren->at(phi).values()) {
+ if (child->opcode() == Phi)
+ worklist.push(child);
+ else
+ functor(child);
+ }
+ }
+ }
+
+ bool transitivelyUses(Value* candidate)
+ {
+ bool result = false;
+ forAllTransitiveIncomingValues(
+ [&] (Value* child) {
+ result |= child == candidate;
+ });
+ return result;
+ }
+
+ private:
+ PhiChildren* m_phiChildren { nullptr };
+ Value* m_value { nullptr };
+ Vector<UpsilonValue*>* m_values { nullptr };
+ };
+
+ UpsilonCollection at(Value* value) { return UpsilonCollection(this, value, &m_upsilons[value]); }
+ UpsilonCollection operator[](Value* value) { return at(value); }
+
+ const Vector<Value*, 8>& phis() const { return m_phis; }
+
+private:
+ IndexMap<Value, Vector<UpsilonValue*>> m_upsilons;
+ Vector<Value*, 8> m_phis;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Procedure.cpp b/Source/JavaScriptCore/b3/B3Procedure.cpp
new file mode 100644
index 000000000..0cb48c407
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Procedure.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Procedure.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BasicBlockUtils.h"
+#include "B3BlockWorklist.h"
+#include "B3CFG.h"
+#include "B3DataSection.h"
+#include "B3Dominators.h"
+#include "B3OpaqueByproducts.h"
+#include "B3PhiChildren.h"
+#include "B3StackSlot.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+
+namespace JSC { namespace B3 {
+
+Procedure::Procedure()
+ : m_cfg(new CFG(*this))
+ , m_lastPhaseName("initial")
+ , m_byproducts(std::make_unique<OpaqueByproducts>())
+ , m_code(new Air::Code(*this))
+{
+}
+
+Procedure::~Procedure()
+{
+}
+
+void Procedure::printOrigin(PrintStream& out, Origin origin) const
+{
+ if (m_originPrinter)
+ m_originPrinter->run(out, origin);
+ else
+ out.print(origin);
+}
+
+BasicBlock* Procedure::addBlock(double frequency)
+{
+ std::unique_ptr<BasicBlock> block(new BasicBlock(m_blocks.size(), frequency));
+ BasicBlock* result = block.get();
+ m_blocks.append(WTFMove(block));
+ return result;
+}
+
+StackSlot* Procedure::addStackSlot(unsigned byteSize)
+{
+ return m_stackSlots.addNew(byteSize);
+}
+
+Variable* Procedure::addVariable(Type type)
+{
+ return m_variables.addNew(type);
+}
+
+Value* Procedure::clone(Value* value)
+{
+ std::unique_ptr<Value> clone(value->cloneImpl());
+ clone->m_index = UINT_MAX;
+ clone->owner = nullptr;
+ return m_values.add(WTFMove(clone));
+}
+
+Value* Procedure::addIntConstant(Origin origin, Type type, int64_t value)
+{
+ switch (type) {
+ case Int32:
+ return add<Const32Value>(origin, static_cast<int32_t>(value));
+ case Int64:
+ return add<Const64Value>(origin, value);
+ case Double:
+ return add<ConstDoubleValue>(origin, static_cast<double>(value));
+ case Float:
+ return add<ConstFloatValue>(origin, static_cast<float>(value));
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+ }
+}
+
+Value* Procedure::addIntConstant(Value* likeValue, int64_t value)
+{
+ return addIntConstant(likeValue->origin(), likeValue->type(), value);
+}
+
+Value* Procedure::addBottom(Origin origin, Type type)
+{
+ return addIntConstant(origin, type, 0);
+}
+
+Value* Procedure::addBottom(Value* value)
+{
+ return addBottom(value->origin(), value->type());
+}
+
+Value* Procedure::addBoolConstant(Origin origin, TriState triState)
+{
+ int32_t value = 0;
+ switch (triState) {
+ case FalseTriState:
+ value = 0;
+ break;
+ case TrueTriState:
+ value = 1;
+ break;
+ case MixedTriState:
+ return nullptr;
+ }
+
+ return addIntConstant(origin, Int32, value);
+}
+
+void Procedure::resetValueOwners()
+{
+ for (BasicBlock* block : *this) {
+ for (Value* value : *block)
+ value->owner = block;
+ }
+}
+
+void Procedure::resetReachability()
+{
+ recomputePredecessors(m_blocks);
+
+ // The common case is that this does not find any dead blocks.
+ bool foundDead = false;
+ for (auto& block : m_blocks) {
+ if (isBlockDead(block.get())) {
+ foundDead = true;
+ break;
+ }
+ }
+ if (!foundDead)
+ return;
+
+ resetValueOwners();
+
+ for (Value* value : values()) {
+ if (UpsilonValue* upsilon = value->as<UpsilonValue>()) {
+ if (isBlockDead(upsilon->phi()->owner))
+ upsilon->replaceWithNop();
+ }
+ }
+
+ for (auto& block : m_blocks) {
+ if (isBlockDead(block.get())) {
+ for (Value* value : *block)
+ deleteValue(value);
+ block = nullptr;
+ }
+ }
+}
+
+void Procedure::invalidateCFG()
+{
+ m_dominators = nullptr;
+}
+
+void Procedure::dump(PrintStream& out) const
+{
+ IndexSet<Value> valuesInBlocks;
+ for (BasicBlock* block : *this) {
+ out.print(deepDump(*this, block));
+ valuesInBlocks.addAll(*block);
+ }
+ bool didPrint = false;
+ for (Value* value : values()) {
+ if (valuesInBlocks.contains(value))
+ continue;
+
+ if (!didPrint) {
+ dataLog("Orphaned values:\n");
+ didPrint = true;
+ }
+ dataLog(" ", deepDump(*this, value), "\n");
+ }
+ if (variables().size()) {
+ out.print("Variables:\n");
+ for (Variable* variable : variables())
+ out.print(" ", deepDump(variable), "\n");
+ }
+ if (stackSlots().size()) {
+ out.print("Stack slots:\n");
+ for (StackSlot* slot : stackSlots())
+ out.print(" ", pointerDump(slot), ": ", deepDump(slot), "\n");
+ }
+ if (m_byproducts->count())
+ out.print(*m_byproducts);
+}
+
+Vector<BasicBlock*> Procedure::blocksInPreOrder()
+{
+ return B3::blocksInPreOrder(at(0));
+}
+
+Vector<BasicBlock*> Procedure::blocksInPostOrder()
+{
+ return B3::blocksInPostOrder(at(0));
+}
+
+void Procedure::deleteStackSlot(StackSlot* stackSlot)
+{
+ m_stackSlots.remove(stackSlot);
+}
+
+void Procedure::deleteVariable(Variable* variable)
+{
+ m_variables.remove(variable);
+}
+
+void Procedure::deleteValue(Value* value)
+{
+ m_values.remove(value);
+}
+
+void Procedure::deleteOrphans()
+{
+ IndexSet<Value> valuesInBlocks;
+ for (BasicBlock* block : *this)
+ valuesInBlocks.addAll(*block);
+
+ // Since this method is not on any hot path, we do it conservatively: first a pass to
+ // identify the values to be removed, and then a second pass to remove them. This avoids any
+ // risk of the value iteration being broken by removals.
+ Vector<Value*, 16> toRemove;
+ for (Value* value : values()) {
+ if (!valuesInBlocks.contains(value))
+ toRemove.append(value);
+ else if (UpsilonValue* upsilon = value->as<UpsilonValue>()) {
+ if (!valuesInBlocks.contains(upsilon->phi()))
+ upsilon->replaceWithNop();
+ }
+ }
+
+ for (Value* value : toRemove)
+ deleteValue(value);
+}
+
+Dominators& Procedure::dominators()
+{
+ if (!m_dominators)
+ m_dominators = std::make_unique<Dominators>(*this);
+ return *m_dominators;
+}
+
+void Procedure::addFastConstant(const ValueKey& constant)
+{
+ RELEASE_ASSERT(constant.isConstant());
+ m_fastConstants.add(constant);
+}
+
+bool Procedure::isFastConstant(const ValueKey& constant)
+{
+ if (!constant)
+ return false;
+ return m_fastConstants.contains(constant);
+}
+
+CCallHelpers::Label Procedure::entrypointLabel(unsigned index) const
+{
+ return m_code->entrypointLabel(index);
+}
+
+void* Procedure::addDataSection(size_t size)
+{
+ if (!size)
+ return nullptr;
+ std::unique_ptr<DataSection> dataSection = std::make_unique<DataSection>(size);
+ void* result = dataSection->data();
+ m_byproducts->add(WTFMove(dataSection));
+ return result;
+}
+
+unsigned Procedure::callArgAreaSizeInBytes() const
+{
+ return code().callArgAreaSizeInBytes();
+}
+
+void Procedure::requestCallArgAreaSizeInBytes(unsigned size)
+{
+ code().requestCallArgAreaSizeInBytes(size);
+}
+
+void Procedure::pinRegister(Reg reg)
+{
+ code().pinRegister(reg);
+}
+
+unsigned Procedure::frameSize() const
+{
+ return code().frameSize();
+}
+
+const RegisterAtOffsetList& Procedure::calleeSaveRegisters() const
+{
+ return code().calleeSaveRegisters();
+}
+
+Value* Procedure::addValueImpl(Value* value)
+{
+ return m_values.add(std::unique_ptr<Value>(value));
+}
+
+void Procedure::setBlockOrderImpl(Vector<BasicBlock*>& blocks)
+{
+ IndexSet<BasicBlock> blocksSet;
+ blocksSet.addAll(blocks);
+
+ for (BasicBlock* block : *this) {
+ if (!blocksSet.contains(block))
+ blocks.append(block);
+ }
+
+ // Place blocks into this's block list by first leaking all of the blocks and then readopting
+ // them.
+ for (auto& entry : m_blocks)
+ entry.release();
+
+ m_blocks.resize(blocks.size());
+ for (unsigned i = 0; i < blocks.size(); ++i) {
+ BasicBlock* block = blocks[i];
+ block->m_index = i;
+ m_blocks[i] = std::unique_ptr<BasicBlock>(block);
+ }
+}
+
+void Procedure::setWasmBoundsCheckGenerator(RefPtr<WasmBoundsCheckGenerator> generator)
+{
+ code().setWasmBoundsCheckGenerator(generator);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Procedure.h b/Source/JavaScriptCore/b3/B3Procedure.h
new file mode 100644
index 000000000..2236145ef
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Procedure.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproducts.h"
+#include "B3Origin.h"
+#include "B3PCToOriginMap.h"
+#include "B3SparseCollection.h"
+#include "B3Type.h"
+#include "B3ValueKey.h"
+#include "CCallHelpers.h"
+#include "PureNaN.h"
+#include "RegisterAtOffsetList.h"
+#include <wtf/Bag.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/HashSet.h>
+#include <wtf/IndexedContainerIterator.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/PrintStream.h>
+#include <wtf/SharedTask.h>
+#include <wtf/TriState.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class BlockInsertionSet;
+class CFG;
+class Dominators;
+class StackSlot;
+class Value;
+class Variable;
+
+namespace Air { class Code; }
+
+typedef void WasmBoundsCheckGeneratorFunction(CCallHelpers&, GPRReg, unsigned);
+typedef SharedTask<WasmBoundsCheckGeneratorFunction> WasmBoundsCheckGenerator;
+
+// This represents B3's view of a piece of code. Note that this object must exist in a 1:1
+// relationship with Air::Code. B3::Procedure and Air::Code are just different facades of the B3
+// compiler's knowledge about a piece of code. Some kinds of state aren't perfect fits for either
+// Procedure or Code, and are placed in one or the other based on convenience. Procedure always
+// allocates a Code, and a Code cannot be allocated without an owning Procedure and they always
+// have references to each other.
+
+class Procedure {
+ WTF_MAKE_NONCOPYABLE(Procedure);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+
+ JS_EXPORT_PRIVATE Procedure();
+ JS_EXPORT_PRIVATE ~Procedure();
+
+ template<typename Callback>
+ void setOriginPrinter(Callback&& callback)
+ {
+ m_originPrinter = createSharedTask<void(PrintStream&, Origin)>(
+ std::forward<Callback>(callback));
+ }
+
+ // Usually you use this via OriginDump, though it's cool to use it directly.
+ void printOrigin(PrintStream& out, Origin origin) const;
+
+ // This is a debugging hack. Sometimes while debugging B3 you need to break the abstraction
+ // and get at the DFG Graph, or whatever data structure the frontend used to describe the
+ // program. The FTL passes the DFG Graph.
+ void setFrontendData(const void* value) { m_frontendData = value; }
+ const void* frontendData() const { return m_frontendData; }
+
+ JS_EXPORT_PRIVATE BasicBlock* addBlock(double frequency = 1);
+
+ // Changes the order of basic blocks to be as in the supplied vector. The vector does not
+ // need to mention every block in the procedure. Blocks not mentioned will be placed after
+ // these blocks in the same order as they were in originally.
+ template<typename BlockIterable>
+ void setBlockOrder(const BlockIterable& iterable)
+ {
+ Vector<BasicBlock*> blocks;
+ for (BasicBlock* block : iterable)
+ blocks.append(block);
+ setBlockOrderImpl(blocks);
+ }
+
+ JS_EXPORT_PRIVATE StackSlot* addStackSlot(unsigned byteSize);
+ JS_EXPORT_PRIVATE Variable* addVariable(Type);
+
+ template<typename ValueType, typename... Arguments>
+ ValueType* add(Arguments...);
+
+ Value* clone(Value*);
+
+ Value* addIntConstant(Origin, Type, int64_t value);
+ Value* addIntConstant(Value*, int64_t value);
+
+ Value* addBottom(Origin, Type);
+ Value* addBottom(Value*);
+
+ // Returns null for MixedTriState.
+ Value* addBoolConstant(Origin, TriState);
+
+ void resetValueOwners();
+ JS_EXPORT_PRIVATE void resetReachability();
+
+ // This destroys CFG analyses. If we ask for them again, we will recompute them. Usually you
+ // should call this anytime you call resetReachability().
+ void invalidateCFG();
+
+ JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+ unsigned size() const { return m_blocks.size(); }
+ BasicBlock* at(unsigned index) const { return m_blocks[index].get(); }
+ BasicBlock* operator[](unsigned index) const { return at(index); }
+
+ typedef WTF::IndexedContainerIterator<Procedure> iterator;
+
+ iterator begin() const { return iterator(*this, 0); }
+ iterator end() const { return iterator(*this, size()); }
+
+ Vector<BasicBlock*> blocksInPreOrder();
+ Vector<BasicBlock*> blocksInPostOrder();
+
+ SparseCollection<StackSlot>& stackSlots() { return m_stackSlots; }
+ const SparseCollection<StackSlot>& stackSlots() const { return m_stackSlots; }
+
+ // Short for stackSlots().remove(). It's better to call this method since it's out of line.
+ void deleteStackSlot(StackSlot*);
+
+ SparseCollection<Variable>& variables() { return m_variables; }
+ const SparseCollection<Variable>& variables() const { return m_variables; }
+
+ // Short for variables().remove(). It's better to call this method since it's out of line.
+ void deleteVariable(Variable*);
+
+ SparseCollection<Value>& values() { return m_values; }
+ const SparseCollection<Value>& values() const { return m_values; }
+
+ // Short for values().remove(). It's better to call this method since it's out of line.
+ void deleteValue(Value*);
+
+ // A valid procedure cannot contain any orphan values. An orphan is a value that is not in
+ // any basic block. It is possible to create an orphan value during code generation or during
+ // transformation. If you know that you may have created some, you can call this method to
+ // delete them, making the procedure valid again.
+ void deleteOrphans();
+
+ CFG& cfg() const { return *m_cfg; }
+
+ Dominators& dominators();
+
+ void addFastConstant(const ValueKey&);
+ bool isFastConstant(const ValueKey&);
+
+ unsigned numEntrypoints() const { return m_numEntrypoints; }
+ void setNumEntrypoints(unsigned numEntrypoints) { m_numEntrypoints = numEntrypoints; }
+
+ // Only call this after code generation is complete. Note that the label for the 0th entrypoint
+ // should point to exactly where the code generation cursor was before you started generating
+ // code.
+ JS_EXPORT_PRIVATE CCallHelpers::Label entrypointLabel(unsigned entrypointIndex) const;
+
+ // The name has to be a string literal, since we don't do any memory management for the string.
+ void setLastPhaseName(const char* name)
+ {
+ m_lastPhaseName = name;
+ }
+
+ const char* lastPhaseName() const { return m_lastPhaseName; }
+
+ // Allocates a slab of memory that will be kept alive by anyone who keeps the resulting code
+ // alive. Great for compiler-generated data sections, like switch jump tables and constant pools.
+ // This returns memory that has been zero-initialized.
+ JS_EXPORT_PRIVATE void* addDataSection(size_t);
+
+ OpaqueByproducts& byproducts() { return *m_byproducts; }
+
+ // Below are methods that make sense to call after you have generated code for the procedure.
+
+ // You have to call this method after calling generate(). The code generated by B3::generate()
+ // will require you to keep this object alive for as long as that code is runnable. Usually, this
+ // just keeps alive things like the double constant pool and switch lookup tables. If this sounds
+ // confusing, you should probably be using the B3::Compilation API to compile code. If you use
+ // that API, then you don't have to worry about this.
+ std::unique_ptr<OpaqueByproducts> releaseByproducts() { return WTFMove(m_byproducts); }
+
+ // This gives you direct access to Code. However, the idea is that clients of B3 shouldn't have to
+ // call this. So, Procedure has some methods (below) that expose some Air::Code functionality.
+ const Air::Code& code() const { return *m_code; }
+ Air::Code& code() { return *m_code; }
+
+ unsigned callArgAreaSizeInBytes() const;
+ void requestCallArgAreaSizeInBytes(unsigned size);
+
+ // This tells the register allocators to stay away from this register.
+ JS_EXPORT_PRIVATE void pinRegister(Reg);
+
+ JS_EXPORT_PRIVATE unsigned frameSize() const;
+ JS_EXPORT_PRIVATE const RegisterAtOffsetList& calleeSaveRegisters() const;
+
+ PCToOriginMap& pcToOriginMap() { return m_pcToOriginMap; }
+ PCToOriginMap releasePCToOriginMap() { return WTFMove(m_pcToOriginMap); }
+
+ JS_EXPORT_PRIVATE void setWasmBoundsCheckGenerator(RefPtr<WasmBoundsCheckGenerator>);
+
+ template<typename Functor>
+ void setWasmBoundsCheckGenerator(const Functor& functor)
+ {
+ setWasmBoundsCheckGenerator(RefPtr<WasmBoundsCheckGenerator>(createSharedTask<WasmBoundsCheckGeneratorFunction>(functor)));
+ }
+
+private:
+ friend class BlockInsertionSet;
+
+ JS_EXPORT_PRIVATE Value* addValueImpl(Value*);
+ void setBlockOrderImpl(Vector<BasicBlock*>&);
+
+ SparseCollection<StackSlot> m_stackSlots;
+ SparseCollection<Variable> m_variables;
+ Vector<std::unique_ptr<BasicBlock>> m_blocks;
+ SparseCollection<Value> m_values;
+ std::unique_ptr<CFG> m_cfg;
+ std::unique_ptr<Dominators> m_dominators;
+ HashSet<ValueKey> m_fastConstants;
+ unsigned m_numEntrypoints { 1 };
+ const char* m_lastPhaseName;
+ std::unique_ptr<OpaqueByproducts> m_byproducts;
+ std::unique_ptr<Air::Code> m_code;
+ RefPtr<SharedTask<void(PrintStream&, Origin)>> m_originPrinter;
+ const void* m_frontendData;
+ PCToOriginMap m_pcToOriginMap;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ProcedureInlines.h b/Source/JavaScriptCore/b3/B3ProcedureInlines.h
new file mode 100644
index 000000000..990ba31ee
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ProcedureInlines.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 {
+
+template<typename ValueType, typename... Arguments>
+ValueType* Procedure::add(Arguments... arguments)
+{
+ return static_cast<ValueType*>(addValueImpl(new ValueType(arguments...)));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3PureCSE.cpp b/Source/JavaScriptCore/b3/B3PureCSE.cpp
new file mode 100644
index 000000000..0ea344777
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PureCSE.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3PureCSE.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Dominators.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+PureCSE::PureCSE()
+{
+}
+
+PureCSE::~PureCSE()
+{
+}
+
+void PureCSE::clear()
+{
+ m_map.clear();
+}
+
+Value* PureCSE::findMatch(const ValueKey& key, BasicBlock* block, Dominators& dominators)
+{
+ if (!key)
+ return nullptr;
+
+ auto iter = m_map.find(key);
+ if (iter == m_map.end())
+ return nullptr;
+
+ for (Value* match : iter->value) {
+ if (!match->owner)
+ continue;
+ if (dominators.dominates(match->owner, block))
+ return match;
+ }
+
+ return nullptr;
+}
+
+bool PureCSE::process(Value* value, Dominators& dominators)
+{
+ if (value->opcode() == Identity)
+ return false;
+
+ ValueKey key = value->key();
+ if (!key)
+ return false;
+
+ Matches& matches = m_map.add(key, Matches()).iterator->value;
+
+ for (Value* match : matches) {
+ if (!match->owner)
+ continue;
+ if (dominators.dominates(match->owner, value->owner)) {
+ value->replaceWithIdentity(match);
+ return true;
+ }
+ }
+
+ matches.append(value);
+ return false;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3PureCSE.h b/Source/JavaScriptCore/b3/B3PureCSE.h
new file mode 100644
index 000000000..942966ceb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3PureCSE.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3ValueKey.h"
+#include <wtf/HashMap.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class Dominators;
+class Value;
+
+typedef Vector<Value*, 1> Matches;
+
+// This is a reusable utility for doing pure CSE. You can use it to do pure CSE on a program by just
+// proceeding in order an calling process().
+class PureCSE {
+public:
+ PureCSE();
+ ~PureCSE();
+
+ void clear();
+
+ Value* findMatch(const ValueKey&, BasicBlock*, Dominators&);
+
+ bool process(Value*, Dominators&);
+
+private:
+ HashMap<ValueKey, Matches> m_map;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.cpp b/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.cpp
new file mode 100644
index 000000000..ef928112f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.cpp
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3ReduceDoubleToFloat.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3UseCounts.h"
+#include "B3ValueInlines.h"
+#include <wtf/IndexSet.h>
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+bool verbose = false;
+bool printRemainingConversions = false;
+
+class DoubleToFloatReduction {
+public:
+ DoubleToFloatReduction(Procedure& procedure)
+ : m_procedure(procedure)
+ {
+ }
+
+ void run()
+ {
+ if (!findCandidates())
+ return;
+
+ findPhisContainingFloat();
+
+ simplify();
+
+ cleanUp();
+ }
+
+private:
+ // This step find values that are used as Double and cannot be converted to Float..
+ // It flows the information backward through Phi-Upsilons.
+ bool findCandidates()
+ {
+ bool foundConversionCandidate = false;
+ Vector<Value*, 32> upsilons;
+
+ // First, we find all values that are strictly used as double.
+ // Those are values used by something else than DoubleToFloat.
+ //
+ // We don't know the state of Upsilons until their Phi has been
+ // set. We just keep a list of them and update them next.
+ for (BasicBlock* block : m_procedure) {
+ for (Value* value : *block) {
+ value->performSubstitution();
+
+ if (value->opcode() == DoubleToFloat) {
+ foundConversionCandidate = true;
+
+ Value* child = value->child(0);
+ if (child->opcode() == FloatToDouble) {
+ // We don't really need to simplify this early but it simplifies debugging.
+ value->replaceWithIdentity(child->child(0));
+ }
+ continue;
+ }
+
+ if (value->opcode() == FloatToDouble)
+ foundConversionCandidate = true;
+
+ if (value->opcode() == Upsilon) {
+ Value* child = value->child(0);
+ if (child->type() == Double)
+ upsilons.append(value);
+ continue;
+ }
+
+ for (Value* child : value->children()) {
+ if (child->type() == Double)
+ m_valuesUsedAsDouble.add(child);
+ }
+ }
+ }
+
+ if (!foundConversionCandidate)
+ return false;
+
+ // Now we just need to propagate through Phi-Upsilon.
+ // A Upsilon can convert its input to float if its phi is never used as double.
+ // If we modify a phi, we need to continue until all the Upsilon-Phi converge.
+ bool changedPhiState;
+ do {
+ changedPhiState = false;
+ for (Value* value : upsilons) {
+ UpsilonValue* upsilon = value->as<UpsilonValue>();
+ Value* phi = upsilon->phi();
+ if (!m_valuesUsedAsDouble.contains(phi))
+ continue;
+
+ Value* child = value->child(0);
+ bool childChanged = m_valuesUsedAsDouble.add(child);
+ if (childChanged && child->opcode() == Phi)
+ changedPhiState = true;
+ }
+ } while (changedPhiState);
+
+ if (verbose) {
+ dataLog("Conversion candidates:\n");
+ for (BasicBlock* block : m_procedure) {
+ for (Value* value : *block) {
+ if (value->type() == Double && !m_valuesUsedAsDouble.contains(value))
+ dataLog(" ", deepDump(m_procedure, value), "\n");
+ }
+ }
+ dataLog("\n");
+ }
+
+ return true;
+ }
+
+ // This step finds Phis of type Double that effectively contains Float values.
+ // It flows that information forward through Phi-Upsilons.
+ void findPhisContainingFloat()
+ {
+ Vector<Value*, 32> upsilons;
+
+ // The Double value that can be safely turned into a Float are:
+ // - FloatToDouble
+ // - ConstDouble with a value that converts to Float without losing precision.
+ for (BasicBlock* block : m_procedure) {
+ for (Value* value : *block) {
+ if (value->opcode() != Upsilon)
+ continue;
+
+ Value* child = value->child(0);
+ if (child->type() != Double
+ || child->opcode() == FloatToDouble)
+ continue;
+
+ if (child->hasDouble()) {
+ double constValue = child->asDouble();
+ if (isIdentical(static_cast<double>(static_cast<float>(constValue)), constValue))
+ continue;
+ }
+
+ if (child->opcode() == Phi) {
+ upsilons.append(value);
+ continue;
+ }
+
+ UpsilonValue* upsilon = value->as<UpsilonValue>();
+ Value* phi = upsilon->phi();
+ m_phisContainingDouble.add(phi);
+ }
+ }
+
+ // Propagate the flags forward.
+ bool changedPhiState;
+ do {
+ changedPhiState = false;
+ for (Value* value : upsilons) {
+ Value* child = value->child(0);
+ if (m_phisContainingDouble.contains(child)) {
+ UpsilonValue* upsilon = value->as<UpsilonValue>();
+ Value* phi = upsilon->phi();
+ changedPhiState |= m_phisContainingDouble.add(phi);
+ }
+ }
+ } while (changedPhiState);
+
+ if (verbose) {
+ dataLog("Phis containing float values:\n");
+ for (BasicBlock* block : m_procedure) {
+ for (Value* value : *block) {
+ if (value->opcode() == Phi
+ && value->type() == Double
+ && !m_phisContainingDouble.contains(value))
+ dataLog(" ", deepDump(m_procedure, value), "\n");
+ }
+ }
+ dataLog("\n");
+ }
+ }
+
+ bool canBeTransformedToFloat(Value* value)
+ {
+ if (value->opcode() == FloatToDouble)
+ return true;
+
+ if (value->hasDouble())
+ return true; // Double constant truncated to float.
+
+ if (value->opcode() == Phi) {
+ return value->type() == Float
+ || (value->type() == Double && !m_phisContainingDouble.contains(value));
+ }
+ return false;
+ }
+
+ Value* transformToFloat(Value* value, unsigned valueIndex, InsertionSet& insertionSet)
+ {
+ ASSERT(canBeTransformedToFloat(value));
+ if (value->opcode() == FloatToDouble)
+ return value->child(0);
+
+ if (value->hasDouble())
+ return insertionSet.insert<ConstFloatValue>(valueIndex, value->origin(), static_cast<float>(value->asDouble()));
+
+ if (value->opcode() == Phi) {
+ ASSERT(value->type() == Double || value->type() == Float);
+ if (value->type() == Double)
+ convertPhi(value);
+ return value;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+ }
+
+ void convertPhi(Value* phi)
+ {
+ ASSERT(phi->opcode() == Phi);
+ ASSERT(phi->type() == Double);
+ phi->setType(Float);
+ m_convertedPhis.add(phi);
+ }
+
+ bool attemptTwoOperandsSimplify(Value* candidate, unsigned candidateIndex, InsertionSet& insertionSet)
+ {
+ Value* left = candidate->child(0);
+ Value* right = candidate->child(1);
+ if (!canBeTransformedToFloat(left) || !canBeTransformedToFloat(right))
+ return false;
+
+ m_convertedValue.add(candidate);
+ candidate->child(0) = transformToFloat(left, candidateIndex, insertionSet);
+ candidate->child(1) = transformToFloat(right, candidateIndex, insertionSet);
+ return true;
+ }
+
+ // Simplify Double operations into Float operations.
+ void simplify()
+ {
+ Vector<Value*, 32> upsilonReferencingDoublePhi;
+
+ InsertionSet insertionSet(m_procedure);
+ for (BasicBlock* block : m_procedure) {
+ for (unsigned index = 0; index < block->size(); ++index) {
+ Value* value = block->at(index);
+
+ switch (value->opcode()) {
+ case Equal:
+ case NotEqual:
+ case LessThan:
+ case GreaterThan:
+ case LessEqual:
+ case GreaterEqual:
+ case EqualOrUnordered:
+ attemptTwoOperandsSimplify(value, index, insertionSet);
+ continue;
+ case Upsilon: {
+ Value* child = value->child(0);
+ if (child->opcode() == Phi && child->type() == Double)
+ upsilonReferencingDoublePhi.append(value);
+ continue;
+ }
+ default:
+ break;
+ }
+
+ if (m_valuesUsedAsDouble.contains(value))
+ continue;
+
+ switch (value->opcode()) {
+ case Add:
+ case Sub:
+ case Mul:
+ case Div:
+ if (attemptTwoOperandsSimplify(value, index, insertionSet))
+ value->setType(Float);
+ break;
+ case Abs:
+ case Ceil:
+ case Floor:
+ case Neg:
+ case Sqrt: {
+ Value* child = value->child(0);
+ if (canBeTransformedToFloat(child)) {
+ value->child(0) = transformToFloat(child, index, insertionSet);
+ value->setType(Float);
+ m_convertedValue.add(value);
+ }
+ break;
+ }
+ case IToD: {
+ Value* iToF = insertionSet.insert<Value>(index, IToF, value->origin(), value->child(0));
+ value->setType(Float);
+ value->replaceWithIdentity(iToF);
+ m_convertedValue.add(value);
+ break;
+ }
+ case FloatToDouble:
+ // This happens if we round twice.
+ // Typically, this is indirect through Phi-Upsilons.
+ // The Upsilon rounds and the Phi rounds.
+ value->setType(Float);
+ value->replaceWithIdentity(value->child(0));
+ m_convertedValue.add(value);
+ break;
+ case Phi:
+ // If a Phi is always converted to Float, we always make it into a float Phi-Upsilon.
+ // This is a simplistic view of things. Ideally we should keep type that will minimize
+ // the amount of conversion in the loop.
+ if (value->type() == Double)
+ convertPhi(value);
+ break;
+ default:
+ break;
+ }
+ }
+ insertionSet.execute(block);
+ }
+
+ if (!upsilonReferencingDoublePhi.isEmpty()) {
+ // If a Phi contains Float values typed as Double, but is not used as Float
+ // by a non-trivial operation, we did not convert it.
+ //
+ // We fix that now by converting the remaining phis that contains
+ // float but where not converted to float.
+ bool changedPhi;
+ do {
+ changedPhi = false;
+
+ for (Value* value : upsilonReferencingDoublePhi) {
+ UpsilonValue* upsilon = value->as<UpsilonValue>();
+ Value* child = value->child(0);
+ Value* phi = upsilon->phi();
+ if (phi->type() == Float && child->type() == Double
+ && !m_phisContainingDouble.contains(child)) {
+ convertPhi(child);
+ changedPhi = true;
+ }
+ }
+
+ } while (changedPhi);
+ }
+ }
+
+ // We are in an inconsistent state where we have
+ // DoubleToFloat nodes over values producing float and Phis that are
+ // float for Upsilons that are Double.
+ //
+ // This steps puts us back in a consistent state.
+ void cleanUp()
+ {
+ InsertionSet insertionSet(m_procedure);
+
+ for (BasicBlock* block : m_procedure) {
+ for (unsigned index = 0; index < block->size(); ++index) {
+ Value* value = block->at(index);
+ if (value->opcode() == DoubleToFloat && value->child(0)->type() == Float) {
+ value->replaceWithIdentity(value->child(0));
+ continue;
+ }
+
+ if (value->opcode() == Upsilon) {
+ UpsilonValue* upsilon = value->as<UpsilonValue>();
+ Value* child = value->child(0);
+ Value* phi = upsilon->phi();
+
+ if (phi->type() == Float) {
+ if (child->type() == Double) {
+ Value* newChild = nullptr;
+ if (child->opcode() == FloatToDouble)
+ newChild = child->child(0);
+ else if (child->hasDouble())
+ newChild = insertionSet.insert<ConstFloatValue>(index, child->origin(), static_cast<float>(child->asDouble()));
+ else
+ newChild = insertionSet.insert<Value>(index, DoubleToFloat, upsilon->origin(), child);
+ upsilon->child(0) = newChild;
+ }
+ continue;
+ }
+ }
+
+ if (!m_convertedValue.contains(value)) {
+ // Phis can be converted from Double to Float if the value they contain
+ // is not more precise than a Float.
+ // If the value is needed as Double, it has to be converted back.
+ for (Value*& child : value->children()) {
+ if (m_convertedPhis.contains(child))
+ child = insertionSet.insert<Value>(index, FloatToDouble, value->origin(), child);
+ }
+ }
+ }
+ insertionSet.execute(block);
+ }
+ }
+
+ Procedure& m_procedure;
+
+ // Set of all the Double values that are actually used as Double.
+ // Converting any of them to Float would lose precision.
+ IndexSet<Value> m_valuesUsedAsDouble;
+
+ // Set of all the Phi of type Double that really contains a Double.
+ // Any Double Phi not in the set can be converted to Float without losing precision.
+ IndexSet<Value> m_phisContainingDouble;
+
+ // Any value that was converted from producing a Double to producing a Float.
+ // This set does not include Phi-Upsilons.
+ IndexSet<Value> m_convertedValue;
+
+ // Any value that previously produced Double and now produce Float.
+ IndexSet<Value> m_convertedPhis;
+};
+
+void printGraphIfConverting(Procedure& procedure)
+{
+ if (!printRemainingConversions)
+ return;
+
+ UseCounts useCount(procedure);
+
+ Vector<Value*> doubleToFloat;
+ Vector<Value*> floatToDouble;
+
+ for (BasicBlock* block : procedure) {
+ for (Value* value : *block) {
+ if (!useCount.numUses(value))
+ continue;
+
+ if (value->opcode() == DoubleToFloat)
+ doubleToFloat.append(value);
+ if (value->opcode() == FloatToDouble)
+ floatToDouble.append(value);
+ }
+ }
+
+ if (doubleToFloat.isEmpty() && floatToDouble.isEmpty())
+ return;
+
+ dataLog("Procedure with Float-Double conversion:\n", procedure, "\n");
+ dataLog("Converting nodes:\n");
+ for (Value* value : doubleToFloat)
+ dataLog(" ", deepDump(procedure, value), "\n");
+ for (Value* value : floatToDouble)
+ dataLog(" ", deepDump(procedure, value), "\n");
+
+}
+
+} // anonymous namespace.
+
+void reduceDoubleToFloat(Procedure& procedure)
+{
+ PhaseScope phaseScope(procedure, "reduceDoubleToFloat");
+
+ if (verbose)
+ dataLog("Before DoubleToFloatReduction:\n", procedure, "\n");
+
+ DoubleToFloatReduction doubleToFloatReduction(procedure);
+ doubleToFloatReduction.run();
+
+ if (verbose)
+ dataLog("After DoubleToFloatReduction:\n", procedure, "\n");
+
+ printGraphIfConverting(procedure);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.h b/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.h
new file mode 100644
index 000000000..899f770d3
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ReduceDoubleToFloat.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Change Double operations to Float operations when the difference is not observable
+// and doing so is likely beneficial.
+void reduceDoubleToFloat(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ReduceStrength.cpp b/Source/JavaScriptCore/b3/B3ReduceStrength.cpp
new file mode 100644
index 000000000..43c7302a6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ReduceStrength.cpp
@@ -0,0 +1,2518 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3ReduceStrength.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3ComputeDivisionMagic.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3PhiChildren.h"
+#include "B3ProcedureInlines.h"
+#include "B3PureCSE.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueKeyInlines.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include <wtf/GraphNodeWorklist.h>
+#include <wtf/HashMap.h>
+#include <wtf/IndexSet.h>
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+// The goal of this phase is to:
+//
+// - Replace operations with less expensive variants. This includes constant folding and classic
+// strength reductions like turning Mul(x, 1 << k) into Shl(x, k).
+//
+// - Reassociate constant operations. For example, Load(Add(x, c)) is turned into Load(x, offset = c)
+// and Add(Add(x, c), d) is turned into Add(x, c + d).
+//
+// - Canonicalize operations. There are some cases where it's not at all obvious which kind of
+// operation is less expensive, but it's useful for subsequent phases - particularly LowerToAir -
+// to have only one way of representing things.
+//
+// This phase runs to fixpoint. Therefore, the canonicalizations must be designed to be monotonic.
+// For example, if we had a canonicalization that said that Add(x, -c) should be Sub(x, c) and
+// another canonicalization that said that Sub(x, d) should be Add(x, -d), then this phase would end
+// up running forever. We don't want that.
+//
+// Therefore, we need to prioritize certain canonical forms over others. Naively, we want strength
+// reduction to reduce the number of values, and so a form involving fewer total values is more
+// canonical. But we might break this, for example when reducing strength of Mul(x, 9). This could be
+// better written as Add(Shl(x, 3), x), which also happens to be representable using a single
+// instruction on x86.
+//
+// Here are some of the rules we have:
+//
+// Canonical form of logical not: BitXor(value, 1). We may have to avoid using this form if we don't
+// know for sure that 'value' is 0-or-1 (i.e. returnsBool). In that case we fall back on
+// Equal(value, 0).
+//
+// Canonical form of commutative operations: if the operation involves a constant, the constant must
+// come second. Add(x, constant) is canonical, while Add(constant, x) is not. If there are no
+// constants then the canonical form involves the lower-indexed value first. Given Add(x, y), it's
+// canonical if x->index() <= y->index().
+
+bool verbose = false;
+
+// FIXME: This IntRange stuff should be refactored into a general constant propagator. It's weird
+// that it's just sitting here in this file.
+class IntRange {
+public:
+ IntRange()
+ {
+ }
+
+ IntRange(int64_t min, int64_t max)
+ : m_min(min)
+ , m_max(max)
+ {
+ }
+
+ template<typename T>
+ static IntRange top()
+ {
+ return IntRange(std::numeric_limits<T>::min(), std::numeric_limits<T>::max());
+ }
+
+ static IntRange top(Type type)
+ {
+ switch (type) {
+ case Int32:
+ return top<int32_t>();
+ case Int64:
+ return top<int64_t>();
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return IntRange();
+ }
+ }
+
+ template<typename T>
+ static IntRange rangeForMask(T mask)
+ {
+ if (!(mask + 1))
+ return top<T>();
+ return IntRange(0, mask);
+ }
+
+ static IntRange rangeForMask(int64_t mask, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return rangeForMask<int32_t>(static_cast<int32_t>(mask));
+ case Int64:
+ return rangeForMask<int64_t>(mask);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return IntRange();
+ }
+ }
+
+ template<typename T>
+ static IntRange rangeForZShr(int32_t shiftAmount)
+ {
+ typename std::make_unsigned<T>::type mask = 0;
+ mask--;
+ mask >>= shiftAmount;
+ return rangeForMask<T>(static_cast<T>(mask));
+ }
+
+ static IntRange rangeForZShr(int32_t shiftAmount, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return rangeForZShr<int32_t>(shiftAmount);
+ case Int64:
+ return rangeForZShr<int64_t>(shiftAmount);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return IntRange();
+ }
+ }
+
+ int64_t min() const { return m_min; }
+ int64_t max() const { return m_max; }
+
+ void dump(PrintStream& out) const
+ {
+ out.print("[", m_min, ",", m_max, "]");
+ }
+
+ template<typename T>
+ bool couldOverflowAdd(const IntRange& other)
+ {
+ return sumOverflows<T>(m_min, other.m_min)
+ || sumOverflows<T>(m_min, other.m_max)
+ || sumOverflows<T>(m_max, other.m_min)
+ || sumOverflows<T>(m_max, other.m_max);
+ }
+
+ bool couldOverflowAdd(const IntRange& other, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return couldOverflowAdd<int32_t>(other);
+ case Int64:
+ return couldOverflowAdd<int64_t>(other);
+ default:
+ return true;
+ }
+ }
+
+ template<typename T>
+ bool couldOverflowSub(const IntRange& other)
+ {
+ return differenceOverflows<T>(m_min, other.m_min)
+ || differenceOverflows<T>(m_min, other.m_max)
+ || differenceOverflows<T>(m_max, other.m_min)
+ || differenceOverflows<T>(m_max, other.m_max);
+ }
+
+ bool couldOverflowSub(const IntRange& other, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return couldOverflowSub<int32_t>(other);
+ case Int64:
+ return couldOverflowSub<int64_t>(other);
+ default:
+ return true;
+ }
+ }
+
+ template<typename T>
+ bool couldOverflowMul(const IntRange& other)
+ {
+ return productOverflows<T>(m_min, other.m_min)
+ || productOverflows<T>(m_min, other.m_max)
+ || productOverflows<T>(m_max, other.m_min)
+ || productOverflows<T>(m_max, other.m_max);
+ }
+
+ bool couldOverflowMul(const IntRange& other, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return couldOverflowMul<int32_t>(other);
+ case Int64:
+ return couldOverflowMul<int64_t>(other);
+ default:
+ return true;
+ }
+ }
+
+ template<typename T>
+ IntRange shl(int32_t shiftAmount)
+ {
+ T newMin = static_cast<T>(m_min) << static_cast<T>(shiftAmount);
+ T newMax = static_cast<T>(m_max) << static_cast<T>(shiftAmount);
+
+ if ((newMin >> shiftAmount) != static_cast<T>(m_min))
+ newMin = std::numeric_limits<T>::min();
+ if ((newMax >> shiftAmount) != static_cast<T>(m_max))
+ newMax = std::numeric_limits<T>::max();
+
+ return IntRange(newMin, newMax);
+ }
+
+ IntRange shl(int32_t shiftAmount, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return shl<int32_t>(shiftAmount);
+ case Int64:
+ return shl<int64_t>(shiftAmount);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return IntRange();
+ }
+ }
+
+ template<typename T>
+ IntRange sShr(int32_t shiftAmount)
+ {
+ T newMin = static_cast<T>(m_min) >> static_cast<T>(shiftAmount);
+ T newMax = static_cast<T>(m_max) >> static_cast<T>(shiftAmount);
+
+ return IntRange(newMin, newMax);
+ }
+
+ IntRange sShr(int32_t shiftAmount, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return sShr<int32_t>(shiftAmount);
+ case Int64:
+ return sShr<int64_t>(shiftAmount);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return IntRange();
+ }
+ }
+
+ template<typename T>
+ IntRange zShr(int32_t shiftAmount)
+ {
+ // This is an awkward corner case for all of the other logic.
+ if (!shiftAmount)
+ return *this;
+
+ // If the input range may be negative, then all we can say about the output range is that it
+ // will be masked. That's because -1 right shifted just produces that mask.
+ if (m_min < 0)
+ return rangeForZShr<T>(shiftAmount);
+
+ // If the input range is non-negative, then this just brings the range closer to zero.
+ typedef typename std::make_unsigned<T>::type UnsignedT;
+ UnsignedT newMin = static_cast<UnsignedT>(m_min) >> static_cast<UnsignedT>(shiftAmount);
+ UnsignedT newMax = static_cast<UnsignedT>(m_max) >> static_cast<UnsignedT>(shiftAmount);
+
+ return IntRange(newMin, newMax);
+ }
+
+ IntRange zShr(int32_t shiftAmount, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return zShr<int32_t>(shiftAmount);
+ case Int64:
+ return zShr<int64_t>(shiftAmount);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return IntRange();
+ }
+ }
+
+ template<typename T>
+ IntRange add(const IntRange& other)
+ {
+ if (couldOverflowAdd<T>(other))
+ return top<T>();
+ return IntRange(m_min + other.m_min, m_max + other.m_max);
+ }
+
+ IntRange add(const IntRange& other, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return add<int32_t>(other);
+ case Int64:
+ return add<int64_t>(other);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return IntRange();
+ }
+ }
+
+ template<typename T>
+ IntRange sub(const IntRange& other)
+ {
+ if (couldOverflowSub<T>(other))
+ return top<T>();
+ return IntRange(m_min - other.m_max, m_max - other.m_min);
+ }
+
+ IntRange sub(const IntRange& other, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return sub<int32_t>(other);
+ case Int64:
+ return sub<int64_t>(other);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return IntRange();
+ }
+ }
+
+ template<typename T>
+ IntRange mul(const IntRange& other)
+ {
+ if (couldOverflowMul<T>(other))
+ return top<T>();
+ return IntRange(
+ std::min(
+ std::min(m_min * other.m_min, m_min * other.m_max),
+ std::min(m_max * other.m_min, m_max * other.m_max)),
+ std::max(
+ std::max(m_min * other.m_min, m_min * other.m_max),
+ std::max(m_max * other.m_min, m_max * other.m_max)));
+ }
+
+ IntRange mul(const IntRange& other, Type type)
+ {
+ switch (type) {
+ case Int32:
+ return mul<int32_t>(other);
+ case Int64:
+ return mul<int64_t>(other);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return IntRange();
+ }
+ }
+
+private:
+ int64_t m_min { 0 };
+ int64_t m_max { 0 };
+};
+
+class ReduceStrength {
+public:
+ ReduceStrength(Procedure& proc)
+ : m_proc(proc)
+ , m_insertionSet(proc)
+ , m_blockInsertionSet(proc)
+ {
+ }
+
+ bool run()
+ {
+ bool result = false;
+ bool first = true;
+ unsigned index = 0;
+ do {
+ m_changed = false;
+ m_changedCFG = false;
+ ++index;
+
+ if (first)
+ first = false;
+ else if (verbose) {
+ dataLog("B3 after iteration #", index - 1, " of reduceStrength:\n");
+ dataLog(m_proc);
+ }
+
+ simplifyCFG();
+
+ if (m_changedCFG) {
+ m_proc.resetReachability();
+ m_proc.invalidateCFG();
+ m_changed = true;
+ }
+
+ // We definitely want to do DCE before we do CSE so that we don't hoist things. For
+ // example:
+ //
+ // @dead = Mul(@a, @b)
+ // ... lots of control flow and stuff
+ // @thing = Mul(@a, @b)
+ //
+ // If we do CSE before DCE, we will remove @thing and keep @dead. Effectively, we will
+ // "hoist" @thing. On the other hand, if we run DCE before CSE, we will kill @dead and
+ // keep @thing. That's better, since we usually want things to stay wherever the client
+ // put them. We're not actually smart enough to move things around at random.
+ killDeadCode();
+
+ simplifySSA();
+
+ m_proc.resetValueOwners();
+ m_dominators = &m_proc.dominators(); // Recompute if necessary.
+ m_pureCSE.clear();
+
+ for (BasicBlock* block : m_proc.blocksInPreOrder()) {
+ m_block = block;
+
+ for (m_index = 0; m_index < block->size(); ++m_index) {
+ if (verbose) {
+ dataLog(
+ "Looking at ", *block, " #", m_index, ": ",
+ deepDump(m_proc, block->at(m_index)), "\n");
+ }
+ m_value = m_block->at(m_index);
+ m_value->performSubstitution();
+
+ reduceValueStrength();
+ replaceIfRedundant();
+ }
+ m_insertionSet.execute(m_block);
+ }
+
+ m_changedCFG |= m_blockInsertionSet.execute();
+ if (m_changedCFG) {
+ m_proc.resetReachability();
+ m_proc.invalidateCFG();
+ m_dominators = nullptr; // Dominators are not valid anymore, and we don't need them yet.
+ m_changed = true;
+ }
+
+ result |= m_changed;
+ } while (m_changed);
+ return result;
+ }
+
+private:
+ void reduceValueStrength()
+ {
+ switch (m_value->opcode()) {
+ case Add:
+ handleCommutativity();
+
+ if (m_value->child(0)->opcode() == Add && isInt(m_value->type())) {
+ // Turn this: Add(Add(value, constant1), constant2)
+ // Into this: Add(value, constant1 + constant2)
+ Value* newSum = m_value->child(1)->addConstant(m_proc, m_value->child(0)->child(1));
+ if (newSum) {
+ m_insertionSet.insertValue(m_index, newSum);
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_value->child(1) = newSum;
+ m_changed = true;
+ break;
+ }
+
+ // Turn this: Add(Add(value, constant), otherValue)
+ // Into this: Add(Add(value, otherValue), constant)
+ if (!m_value->child(1)->hasInt() && m_value->child(0)->child(1)->hasInt()) {
+ Value* value = m_value->child(0)->child(0);
+ Value* constant = m_value->child(0)->child(1);
+ Value* otherValue = m_value->child(1);
+ // This could create duplicate code if Add(value, constant) is used elsewhere.
+ // However, we already model adding a constant as if it was free in other places
+ // so let's just roll with it. The alternative would mean having to do good use
+ // counts, which reduceStrength() currently doesn't have.
+ m_value->child(0) =
+ m_insertionSet.insert<Value>(
+ m_index, Add, m_value->origin(), value, otherValue);
+ m_value->child(1) = constant;
+ m_changed = true;
+ break;
+ }
+ }
+
+ // Turn this: Add(otherValue, Add(value, constant))
+ // Into this: Add(Add(value, otherValue), constant)
+ if (isInt(m_value->type())
+ && !m_value->child(0)->hasInt()
+ && m_value->child(1)->opcode() == Add
+ && m_value->child(1)->child(1)->hasInt()) {
+ Value* value = m_value->child(1)->child(0);
+ Value* constant = m_value->child(1)->child(1);
+ Value* otherValue = m_value->child(0);
+ // This creates a duplicate add. That's dangerous but probably fine, see above.
+ m_value->child(0) =
+ m_insertionSet.insert<Value>(
+ m_index, Add, m_value->origin(), value, otherValue);
+ m_value->child(1) = constant;
+ m_changed = true;
+ break;
+ }
+
+ // Turn this: Add(constant1, constant2)
+ // Into this: constant1 + constant2
+ if (Value* constantAdd = m_value->child(0)->addConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(constantAdd);
+ break;
+ }
+
+ // Turn this: Integer Add(value, value)
+ // Into this: Shl(value, 1)
+ // This is a useful canonicalization. It's not meant to be a strength reduction.
+ if (m_value->isInteger() && m_value->child(0) == m_value->child(1)) {
+ replaceWithNewValue(
+ m_proc.add<Value>(
+ Shl, m_value->origin(), m_value->child(0),
+ m_insertionSet.insert<Const32Value>(m_index, m_value->origin(), 1)));
+ break;
+ }
+
+ // Turn this: Add(value, zero)
+ // Into an Identity.
+ //
+ // Addition is subtle with doubles. Zero is not the neutral value, negative zero is:
+ // 0 + 0 = 0
+ // 0 + -0 = 0
+ // -0 + 0 = 0
+ // -0 + -0 = -0
+ if (m_value->child(1)->isInt(0) || m_value->child(1)->isNegativeZero()) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ // Turn this: Integer Add(Sub(0, value), -1)
+ // Into this: BitXor(value, -1)
+ if (m_value->isInteger()
+ && m_value->child(0)->opcode() == Sub
+ && m_value->child(1)->isInt(-1)
+ && m_value->child(0)->child(0)->isInt(0)) {
+ replaceWithNewValue(m_proc.add<Value>(BitXor, m_value->origin(), m_value->child(0)->child(1), m_value->child(1)));
+ break;
+ }
+
+ break;
+
+ case Sub:
+ // Turn this: Sub(constant1, constant2)
+ // Into this: constant1 - constant2
+ if (Value* constantSub = m_value->child(0)->subConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(constantSub);
+ break;
+ }
+
+ if (isInt(m_value->type())) {
+ // Turn this: Sub(value, constant)
+ // Into this: Add(value, -constant)
+ if (Value* negatedConstant = m_value->child(1)->negConstant(m_proc)) {
+ m_insertionSet.insertValue(m_index, negatedConstant);
+ replaceWithNew<Value>(
+ Add, m_value->origin(), m_value->child(0), negatedConstant);
+ break;
+ }
+
+ // Turn this: Sub(0, value)
+ // Into this: Neg(value)
+ if (m_value->child(0)->isInt(0)) {
+ replaceWithNew<Value>(Neg, m_value->origin(), m_value->child(1));
+ break;
+ }
+ }
+
+ break;
+
+ case Neg:
+ // Turn this: Neg(constant)
+ // Into this: -constant
+ if (Value* constant = m_value->child(0)->negConstant(m_proc)) {
+ replaceWithNewValue(constant);
+ break;
+ }
+
+ // Turn this: Neg(Neg(value))
+ // Into this: value
+ if (m_value->child(0)->opcode() == Neg) {
+ replaceWithIdentity(m_value->child(0)->child(0));
+ break;
+ }
+
+ break;
+
+ case Mul:
+ handleCommutativity();
+
+ // Turn this: Mul(constant1, constant2)
+ // Into this: constant1 * constant2
+ if (Value* value = m_value->child(0)->mulConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(value);
+ break;
+ }
+
+ if (m_value->child(1)->hasInt()) {
+ int64_t factor = m_value->child(1)->asInt();
+
+ // Turn this: Mul(value, 0)
+ // Into this: 0
+ // Note that we don't do this for doubles because that's wrong. For example, -1 * 0
+ // and 1 * 0 yield different results.
+ if (!factor) {
+ replaceWithIdentity(m_value->child(1));
+ break;
+ }
+
+ // Turn this: Mul(value, 1)
+ // Into this: value
+ if (factor == 1) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ // Turn this: Mul(value, -1)
+ // Into this: Sub(0, value)
+ if (factor == -1) {
+ replaceWithNewValue(
+ m_proc.add<Value>(
+ Sub, m_value->origin(),
+ m_insertionSet.insertIntConstant(m_index, m_value, 0),
+ m_value->child(0)));
+ break;
+ }
+
+ // Turn this: Mul(value, constant)
+ // Into this: Shl(value, log2(constant))
+ if (hasOneBitSet(factor)) {
+ unsigned shiftAmount = WTF::fastLog2(static_cast<uint64_t>(factor));
+ replaceWithNewValue(
+ m_proc.add<Value>(
+ Shl, m_value->origin(), m_value->child(0),
+ m_insertionSet.insert<Const32Value>(
+ m_index, m_value->origin(), shiftAmount)));
+ break;
+ }
+ } else if (m_value->child(1)->hasDouble()) {
+ double factor = m_value->child(1)->asDouble();
+
+ // Turn this: Mul(value, 1)
+ // Into this: value
+ if (factor == 1) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+ }
+
+ break;
+
+ case Div:
+ // Turn this: Div(constant1, constant2)
+ // Into this: constant1 / constant2
+ // Note that this uses Div<Chill> semantics. That's fine, because the rules for Div
+ // are strictly weaker: it has corner cases where it's allowed to do anything it
+ // likes.
+ if (replaceWithNewValue(m_value->child(0)->divConstant(m_proc, m_value->child(1))))
+ break;
+
+ if (m_value->child(1)->hasInt()) {
+ switch (m_value->child(1)->asInt()) {
+ case -1:
+ // Turn this: Div(value, -1)
+ // Into this: Neg(value)
+ replaceWithNewValue(
+ m_proc.add<Value>(Neg, m_value->origin(), m_value->child(0)));
+ break;
+
+ case 0:
+ // Turn this: Div(value, 0)
+ // Into this: 0
+ // We can do this because it's precisely correct for ChillDiv and for Div we
+ // are allowed to do whatever we want.
+ replaceWithIdentity(m_value->child(1));
+ break;
+
+ case 1:
+ // Turn this: Div(value, 1)
+ // Into this: value
+ replaceWithIdentity(m_value->child(0));
+ break;
+
+ default:
+ // Perform super comprehensive strength reduction of division. Currently we
+ // only do this for 32-bit divisions, since we need a high multiply
+ // operation. We emulate it using 64-bit multiply. We can't emulate 64-bit
+ // high multiply with a 128-bit multiply because we don't have a 128-bit
+ // multiply. We could do it with a patchpoint if we cared badly enough.
+
+ if (m_value->type() != Int32)
+ break;
+
+ int32_t divisor = m_value->child(1)->asInt32();
+ DivisionMagic<int32_t> magic = computeDivisionMagic(divisor);
+
+ // Perform the "high" multiplication. We do it just to get the high bits.
+ // This is sort of like multiplying by the reciprocal, just more gnarly. It's
+ // from Hacker's Delight and I don't claim to understand it.
+ Value* magicQuotient = m_insertionSet.insert<Value>(
+ m_index, Trunc, m_value->origin(),
+ m_insertionSet.insert<Value>(
+ m_index, ZShr, m_value->origin(),
+ m_insertionSet.insert<Value>(
+ m_index, Mul, m_value->origin(),
+ m_insertionSet.insert<Value>(
+ m_index, SExt32, m_value->origin(), m_value->child(0)),
+ m_insertionSet.insert<Const64Value>(
+ m_index, m_value->origin(), magic.magicMultiplier)),
+ m_insertionSet.insert<Const32Value>(
+ m_index, m_value->origin(), 32)));
+
+ if (divisor > 0 && magic.magicMultiplier < 0) {
+ magicQuotient = m_insertionSet.insert<Value>(
+ m_index, Add, m_value->origin(), magicQuotient, m_value->child(0));
+ }
+ if (divisor < 0 && magic.magicMultiplier > 0) {
+ magicQuotient = m_insertionSet.insert<Value>(
+ m_index, Sub, m_value->origin(), magicQuotient, m_value->child(0));
+ }
+ if (magic.shift > 0) {
+ magicQuotient = m_insertionSet.insert<Value>(
+ m_index, SShr, m_value->origin(), magicQuotient,
+ m_insertionSet.insert<Const32Value>(
+ m_index, m_value->origin(), magic.shift));
+ }
+ replaceWithIdentity(
+ m_insertionSet.insert<Value>(
+ m_index, Add, m_value->origin(), magicQuotient,
+ m_insertionSet.insert<Value>(
+ m_index, ZShr, m_value->origin(), magicQuotient,
+ m_insertionSet.insert<Const32Value>(
+ m_index, m_value->origin(), 31))));
+ break;
+ }
+ break;
+ }
+ break;
+
+ case UDiv:
+ // Turn this: UDiv(constant1, constant2)
+ // Into this: constant1 / constant2
+ if (replaceWithNewValue(m_value->child(0)->uDivConstant(m_proc, m_value->child(1))))
+ break;
+
+ if (m_value->child(1)->hasInt()) {
+ switch (m_value->child(1)->asInt()) {
+ case 0:
+ // Turn this: UDiv(value, 0)
+ // Into this: 0
+ // We can do whatever we want here so we might as well do the chill thing,
+ // in case we add chill versions of UDiv in the future.
+ replaceWithIdentity(m_value->child(1));
+ break;
+
+ case 1:
+ // Turn this: UDiv(value, 1)
+ // Into this: value
+ replaceWithIdentity(m_value->child(0));
+ break;
+ default:
+ // FIXME: We should do comprehensive strength reduction for unsigned numbers. Likely,
+ // we will just want copy what llvm does. https://bugs.webkit.org/show_bug.cgi?id=164809
+ break;
+ }
+ }
+ break;
+
+ case Mod:
+ // Turn this: Mod(constant1, constant2)
+ // Into this: constant1 / constant2
+ // Note that this uses Mod<Chill> semantics.
+ if (replaceWithNewValue(m_value->child(0)->modConstant(m_proc, m_value->child(1))))
+ break;
+
+ // Modulo by constant is more efficient if we turn it into Div, and then let Div get
+ // optimized.
+ if (m_value->child(1)->hasInt()) {
+ switch (m_value->child(1)->asInt()) {
+ case 0:
+ // Turn this: Mod(value, 0)
+ // Into this: 0
+ // This is correct according to ChillMod semantics.
+ replaceWithIdentity(m_value->child(1));
+ break;
+
+ default:
+ // Turn this: Mod(N, D)
+ // Into this: Sub(N, Mul(Div(N, D), D))
+ //
+ // This is a speed-up because we use our existing Div optimizations.
+ //
+ // Here's an easier way to look at it:
+ // N % D = N - N / D * D
+ //
+ // Note that this does not work for D = 0 and ChillMod. The expected result is 0.
+ // That's why we have a special-case above.
+ // X % 0 = X - X / 0 * 0 = X (should be 0)
+ //
+ // This does work for the D = -1 special case.
+ // -2^31 % -1 = -2^31 - -2^31 / -1 * -1
+ // = -2^31 - -2^31 * -1
+ // = -2^31 - -2^31
+ // = 0
+
+ Kind divKind = Div;
+ divKind.setIsChill(m_value->isChill());
+
+ replaceWithIdentity(
+ m_insertionSet.insert<Value>(
+ m_index, Sub, m_value->origin(),
+ m_value->child(0),
+ m_insertionSet.insert<Value>(
+ m_index, Mul, m_value->origin(),
+ m_insertionSet.insert<Value>(
+ m_index, divKind, m_value->origin(),
+ m_value->child(0), m_value->child(1)),
+ m_value->child(1))));
+ break;
+ }
+ break;
+ }
+
+ break;
+
+ case UMod:
+ // Turn this: UMod(constant1, constant2)
+ // Into this: constant1 / constant2
+ replaceWithNewValue(m_value->child(0)->uModConstant(m_proc, m_value->child(1)));
+ // FIXME: We should do what we do for Mod since the same principle applies here.
+ // https://bugs.webkit.org/show_bug.cgi?id=164809
+ break;
+
+ case BitAnd:
+ handleCommutativity();
+
+ // Turn this: BitAnd(constant1, constant2)
+ // Into this: constant1 & constant2
+ if (Value* constantBitAnd = m_value->child(0)->bitAndConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(constantBitAnd);
+ break;
+ }
+
+ // Turn this: BitAnd(BitAnd(value, constant1), constant2)
+ // Into this: BitAnd(value, constant1 & constant2).
+ if (m_value->child(0)->opcode() == BitAnd) {
+ Value* newConstant = m_value->child(1)->bitAndConstant(m_proc, m_value->child(0)->child(1));
+ if (newConstant) {
+ m_insertionSet.insertValue(m_index, newConstant);
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_value->child(1) = newConstant;
+ m_changed = true;
+ }
+ }
+
+ // Turn this: BitAnd(valueX, valueX)
+ // Into this: valueX.
+ if (m_value->child(0) == m_value->child(1)) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ // Turn this: BitAnd(value, zero-constant)
+ // Into this: zero-constant.
+ if (m_value->child(1)->isInt(0)) {
+ replaceWithIdentity(m_value->child(1));
+ break;
+ }
+
+ // Turn this: BitAnd(value, all-ones)
+ // Into this: value.
+ if ((m_value->type() == Int64 && m_value->child(1)->isInt(0xffffffffffffffff))
+ || (m_value->type() == Int32 && m_value->child(1)->isInt(0xffffffff))) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ // Turn this: BitAnd(64-bit value, 32 ones)
+ // Into this: ZExt32(Trunc(64-bit value))
+ if (m_value->child(1)->isInt64(0xffffffffllu)) {
+ Value* newValue = m_insertionSet.insert<Value>(
+ m_index, ZExt32, m_value->origin(),
+ m_insertionSet.insert<Value>(m_index, Trunc, m_value->origin(), m_value->child(0)));
+ replaceWithIdentity(newValue);
+ break;
+ }
+
+ // Turn this: BitAnd(SExt8(value), mask) where (mask & 0xffffff00) == 0
+ // Into this: BitAnd(value, mask)
+ if (m_value->child(0)->opcode() == SExt8 && m_value->child(1)->hasInt32()
+ && !(m_value->child(1)->asInt32() & 0xffffff00)) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_changed = true;
+ }
+
+ // Turn this: BitAnd(SExt16(value), mask) where (mask & 0xffff0000) == 0
+ // Into this: BitAnd(value, mask)
+ if (m_value->child(0)->opcode() == SExt16 && m_value->child(1)->hasInt32()
+ && !(m_value->child(1)->asInt32() & 0xffff0000)) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_changed = true;
+ }
+
+ // Turn this: BitAnd(SExt32(value), mask) where (mask & 0xffffffff00000000) == 0
+ // Into this: BitAnd(ZExt32(value), mask)
+ if (m_value->child(0)->opcode() == SExt32 && m_value->child(1)->hasInt32()
+ && !(m_value->child(1)->asInt32() & 0xffffffff00000000llu)) {
+ m_value->child(0) = m_insertionSet.insert<Value>(
+ m_index, ZExt32, m_value->origin(),
+ m_value->child(0)->child(0), m_value->child(0)->child(1));
+ m_changed = true;
+ }
+
+ // Turn this: BitAnd(Op(value, constant1), constant2)
+ // where !(constant1 & constant2)
+ // and Op is BitOr or BitXor
+ // into this: BitAnd(value, constant2)
+ if (m_value->child(1)->hasInt()) {
+ int64_t constant2 = m_value->child(1)->asInt();
+ switch (m_value->child(0)->opcode()) {
+ case BitOr:
+ case BitXor:
+ if (m_value->child(0)->child(1)->hasInt()
+ && !(m_value->child(0)->child(1)->asInt() & constant2)) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_changed = true;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+
+ case BitOr:
+ handleCommutativity();
+
+ // Turn this: BitOr(constant1, constant2)
+ // Into this: constant1 | constant2
+ if (Value* constantBitOr = m_value->child(0)->bitOrConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(constantBitOr);
+ break;
+ }
+
+ // Turn this: BitOr(BitOr(value, constant1), constant2)
+ // Into this: BitOr(value, constant1 & constant2).
+ if (m_value->child(0)->opcode() == BitOr) {
+ Value* newConstant = m_value->child(1)->bitOrConstant(m_proc, m_value->child(0)->child(1));
+ if (newConstant) {
+ m_insertionSet.insertValue(m_index, newConstant);
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_value->child(1) = newConstant;
+ m_changed = true;
+ }
+ }
+
+ // Turn this: BitOr(valueX, valueX)
+ // Into this: valueX.
+ if (m_value->child(0) == m_value->child(1)) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ // Turn this: BitOr(value, zero-constant)
+ // Into this: value.
+ if (m_value->child(1)->isInt(0)) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ // Turn this: BitOr(value, all-ones)
+ // Into this: all-ones.
+ if ((m_value->type() == Int64 && m_value->child(1)->isInt(0xffffffffffffffff))
+ || (m_value->type() == Int32 && m_value->child(1)->isInt(0xffffffff))) {
+ replaceWithIdentity(m_value->child(1));
+ break;
+ }
+
+ break;
+
+ case BitXor:
+ handleCommutativity();
+
+ // Turn this: BitXor(constant1, constant2)
+ // Into this: constant1 ^ constant2
+ if (Value* constantBitXor = m_value->child(0)->bitXorConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(constantBitXor);
+ break;
+ }
+
+ // Turn this: BitXor(BitXor(value, constant1), constant2)
+ // Into this: BitXor(value, constant1 ^ constant2).
+ if (m_value->child(0)->opcode() == BitXor) {
+ Value* newConstant = m_value->child(1)->bitXorConstant(m_proc, m_value->child(0)->child(1));
+ if (newConstant) {
+ m_insertionSet.insertValue(m_index, newConstant);
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_value->child(1) = newConstant;
+ m_changed = true;
+ }
+ }
+
+ // Turn this: BitXor(compare, 1)
+ // Into this: invertedCompare
+ if (m_value->child(1)->isInt32(1)) {
+ if (Value* invertedCompare = m_value->child(0)->invertedCompare(m_proc)) {
+ replaceWithNewValue(invertedCompare);
+ break;
+ }
+ }
+
+ // Turn this: BitXor(valueX, valueX)
+ // Into this: zero-constant.
+ if (m_value->child(0) == m_value->child(1)) {
+ replaceWithNewValue(m_proc.addIntConstant(m_value, 0));
+ break;
+ }
+
+ // Turn this: BitXor(value, zero-constant)
+ // Into this: value.
+ if (m_value->child(1)->isInt(0)) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ break;
+
+ case Shl:
+ // Turn this: Shl(constant1, constant2)
+ // Into this: constant1 << constant2
+ if (Value* constant = m_value->child(0)->shlConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(constant);
+ break;
+ }
+
+ handleShiftAmount();
+ break;
+
+ case SShr:
+ // Turn this: SShr(constant1, constant2)
+ // Into this: constant1 >> constant2
+ if (Value* constant = m_value->child(0)->sShrConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(constant);
+ break;
+ }
+
+ if (m_value->child(1)->hasInt32()
+ && m_value->child(0)->opcode() == Shl
+ && m_value->child(0)->child(1)->hasInt32()
+ && m_value->child(1)->asInt32() == m_value->child(0)->child(1)->asInt32()) {
+ switch (m_value->child(1)->asInt32()) {
+ case 16:
+ if (m_value->type() == Int32) {
+ // Turn this: SShr(Shl(value, 16), 16)
+ // Into this: SExt16(value)
+ replaceWithNewValue(
+ m_proc.add<Value>(
+ SExt16, m_value->origin(), m_value->child(0)->child(0)));
+ }
+ break;
+
+ case 24:
+ if (m_value->type() == Int32) {
+ // Turn this: SShr(Shl(value, 24), 24)
+ // Into this: SExt8(value)
+ replaceWithNewValue(
+ m_proc.add<Value>(
+ SExt8, m_value->origin(), m_value->child(0)->child(0)));
+ }
+ break;
+
+ case 32:
+ if (m_value->type() == Int64) {
+ // Turn this: SShr(Shl(value, 32), 32)
+ // Into this: SExt32(Trunc(value))
+ replaceWithNewValue(
+ m_proc.add<Value>(
+ SExt32, m_value->origin(),
+ m_insertionSet.insert<Value>(
+ m_index, Trunc, m_value->origin(),
+ m_value->child(0)->child(0))));
+ }
+ break;
+
+ // FIXME: Add cases for 48 and 56, but that would translate to SExt32(SExt8) or
+ // SExt32(SExt16), which we don't currently lower efficiently.
+
+ default:
+ break;
+ }
+
+ if (m_value->opcode() != SShr)
+ break;
+ }
+
+ handleShiftAmount();
+ break;
+
+ case ZShr:
+ // Turn this: ZShr(constant1, constant2)
+ // Into this: (unsigned)constant1 >> constant2
+ if (Value* constant = m_value->child(0)->zShrConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(constant);
+ break;
+ }
+
+ handleShiftAmount();
+ break;
+
+ case RotR:
+ // Turn this: RotR(constant1, constant2)
+ // Into this: (constant1 >> constant2) | (constant1 << sizeof(constant1) * 8 - constant2)
+ if (Value* constant = m_value->child(0)->rotRConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(constant);
+ break;
+ }
+
+ handleShiftAmount();
+ break;
+
+ case RotL:
+ // Turn this: RotL(constant1, constant2)
+ // Into this: (constant1 << constant2) | (constant1 >> sizeof(constant1) * 8 - constant2)
+ if (Value* constant = m_value->child(0)->rotLConstant(m_proc, m_value->child(1))) {
+ replaceWithNewValue(constant);
+ break;
+ }
+
+ handleShiftAmount();
+ break;
+
+ case Abs:
+ // Turn this: Abs(constant)
+ // Into this: fabs<value->type()>(constant)
+ if (Value* constant = m_value->child(0)->absConstant(m_proc)) {
+ replaceWithNewValue(constant);
+ break;
+ }
+
+ // Turn this: Abs(Abs(value))
+ // Into this: Abs(value)
+ if (m_value->child(0)->opcode() == Abs) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ // Turn this: Abs(BitwiseCast(value))
+ // Into this: BitwiseCast(And(value, mask-top-bit))
+ if (m_value->child(0)->opcode() == BitwiseCast) {
+ Value* mask;
+ if (m_value->type() == Double)
+ mask = m_insertionSet.insert<Const64Value>(m_index, m_value->origin(), ~(1ll << 63));
+ else
+ mask = m_insertionSet.insert<Const32Value>(m_index, m_value->origin(), ~(1l << 31));
+
+ Value* bitAnd = m_insertionSet.insert<Value>(m_index, BitAnd, m_value->origin(),
+ m_value->child(0)->child(0),
+ mask);
+ Value* cast = m_insertionSet.insert<Value>(m_index, BitwiseCast, m_value->origin(), bitAnd);
+ replaceWithIdentity(cast);
+ break;
+ }
+ break;
+
+ case Ceil:
+ // Turn this: Ceil(constant)
+ // Into this: ceil<value->type()>(constant)
+ if (Value* constant = m_value->child(0)->ceilConstant(m_proc)) {
+ replaceWithNewValue(constant);
+ break;
+ }
+
+ // Turn this: Ceil(roundedValue)
+ // Into this: roundedValue
+ if (m_value->child(0)->isRounded()) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+ break;
+
+ case Floor:
+ // Turn this: Floor(constant)
+ // Into this: floor<value->type()>(constant)
+ if (Value* constant = m_value->child(0)->floorConstant(m_proc)) {
+ replaceWithNewValue(constant);
+ break;
+ }
+
+ // Turn this: Floor(roundedValue)
+ // Into this: roundedValue
+ if (m_value->child(0)->isRounded()) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+ break;
+
+ case Sqrt:
+ // Turn this: Sqrt(constant)
+ // Into this: sqrt<value->type()>(constant)
+ if (Value* constant = m_value->child(0)->sqrtConstant(m_proc)) {
+ replaceWithNewValue(constant);
+ break;
+ }
+ break;
+
+ case BitwiseCast:
+ // Turn this: BitwiseCast(constant)
+ // Into this: bitwise_cast<value->type()>(constant)
+ if (Value* constant = m_value->child(0)->bitwiseCastConstant(m_proc)) {
+ replaceWithNewValue(constant);
+ break;
+ }
+
+ // Turn this: BitwiseCast(BitwiseCast(value))
+ // Into this: value
+ if (m_value->child(0)->opcode() == BitwiseCast) {
+ replaceWithIdentity(m_value->child(0)->child(0));
+ break;
+ }
+ break;
+
+ case SExt8:
+ // Turn this: SExt8(constant)
+ // Into this: static_cast<int8_t>(constant)
+ if (m_value->child(0)->hasInt32()) {
+ int32_t result = static_cast<int8_t>(m_value->child(0)->asInt32());
+ replaceWithNewValue(m_proc.addIntConstant(m_value, result));
+ break;
+ }
+
+ // Turn this: SExt8(SExt8(value))
+ // or this: SExt8(SExt16(value))
+ // Into this: SExt8(value)
+ if (m_value->child(0)->opcode() == SExt8 || m_value->child(0)->opcode() == SExt16) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_changed = true;
+ }
+
+ if (m_value->child(0)->opcode() == BitAnd && m_value->child(0)->child(1)->hasInt32()) {
+ Value* input = m_value->child(0)->child(0);
+ int32_t mask = m_value->child(0)->child(1)->asInt32();
+
+ // Turn this: SExt8(BitAnd(input, mask)) where (mask & 0xff) == 0xff
+ // Into this: SExt8(input)
+ if ((mask & 0xff) == 0xff) {
+ m_value->child(0) = input;
+ m_changed = true;
+ break;
+ }
+
+ // Turn this: SExt8(BitAnd(input, mask)) where (mask & 0x80) == 0
+ // Into this: BitAnd(input, const & 0x7f)
+ if (!(mask & 0x80)) {
+ replaceWithNewValue(
+ m_proc.add<Value>(
+ BitAnd, m_value->origin(), input,
+ m_insertionSet.insert<Const32Value>(
+ m_index, m_value->origin(), mask & 0x7f)));
+ break;
+ }
+ }
+ break;
+
+ case SExt16:
+ // Turn this: SExt16(constant)
+ // Into this: static_cast<int16_t>(constant)
+ if (m_value->child(0)->hasInt32()) {
+ int32_t result = static_cast<int16_t>(m_value->child(0)->asInt32());
+ replaceWithNewValue(m_proc.addIntConstant(m_value, result));
+ break;
+ }
+
+ // Turn this: SExt16(SExt16(value))
+ // Into this: SExt16(value)
+ if (m_value->child(0)->opcode() == SExt16) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_changed = true;
+ }
+
+ // Turn this: SExt16(SExt8(value))
+ // Into this: SExt8(value)
+ if (m_value->child(0)->opcode() == SExt8) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ if (m_value->child(0)->opcode() == BitAnd && m_value->child(0)->child(1)->hasInt32()) {
+ Value* input = m_value->child(0)->child(0);
+ int32_t mask = m_value->child(0)->child(1)->asInt32();
+
+ // Turn this: SExt16(BitAnd(input, mask)) where (mask & 0xffff) == 0xffff
+ // Into this: SExt16(input)
+ if ((mask & 0xffff) == 0xffff) {
+ m_value->child(0) = input;
+ m_changed = true;
+ break;
+ }
+
+ // Turn this: SExt16(BitAnd(input, mask)) where (mask & 0x8000) == 0
+ // Into this: BitAnd(input, const & 0x7fff)
+ if (!(mask & 0x8000)) {
+ replaceWithNewValue(
+ m_proc.add<Value>(
+ BitAnd, m_value->origin(), input,
+ m_insertionSet.insert<Const32Value>(
+ m_index, m_value->origin(), mask & 0x7fff)));
+ break;
+ }
+ }
+ break;
+
+ case SExt32:
+ // Turn this: SExt32(constant)
+ // Into this: static_cast<int64_t>(constant)
+ if (m_value->child(0)->hasInt32()) {
+ replaceWithNewValue(m_proc.addIntConstant(m_value, m_value->child(0)->asInt32()));
+ break;
+ }
+
+ // Turn this: SExt32(BitAnd(input, mask)) where (mask & 0x80000000) == 0
+ // Into this: ZExt32(BitAnd(input, mask))
+ if (m_value->child(0)->opcode() == BitAnd && m_value->child(0)->child(1)->hasInt32()
+ && !(m_value->child(0)->child(1)->asInt32() & 0x80000000)) {
+ replaceWithNewValue(
+ m_proc.add<Value>(
+ ZExt32, m_value->origin(), m_value->child(0)));
+ break;
+ }
+ break;
+
+ case ZExt32:
+ // Turn this: ZExt32(constant)
+ // Into this: static_cast<uint64_t>(static_cast<uint32_t>(constant))
+ if (m_value->child(0)->hasInt32()) {
+ replaceWithNewValue(
+ m_proc.addIntConstant(
+ m_value,
+ static_cast<uint64_t>(static_cast<uint32_t>(m_value->child(0)->asInt32()))));
+ break;
+ }
+ break;
+
+ case Trunc:
+ // Turn this: Trunc(constant)
+ // Into this: static_cast<int32_t>(constant)
+ if (m_value->child(0)->hasInt64() || m_value->child(0)->hasDouble()) {
+ replaceWithNewValue(
+ m_proc.addIntConstant(m_value, static_cast<int32_t>(m_value->child(0)->asInt64())));
+ break;
+ }
+
+ // Turn this: Trunc(SExt32(value)) or Trunc(ZExt32(value))
+ // Into this: value
+ if (m_value->child(0)->opcode() == SExt32 || m_value->child(0)->opcode() == ZExt32) {
+ replaceWithIdentity(m_value->child(0)->child(0));
+ break;
+ }
+
+ // Turn this: Trunc(Op(value, constant))
+ // where !(constant & 0xffffffff)
+ // and Op is Add, Sub, BitOr, or BitXor
+ // into this: Trunc(value)
+ switch (m_value->child(0)->opcode()) {
+ case Add:
+ case Sub:
+ case BitOr:
+ case BitXor:
+ if (m_value->child(0)->child(1)->hasInt64()
+ && !(m_value->child(0)->child(1)->asInt64() & 0xffffffffll)) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_changed = true;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case IToD:
+ // Turn this: IToD(constant)
+ // Into this: ConstDouble(constant)
+ if (Value* constant = m_value->child(0)->iToDConstant(m_proc)) {
+ replaceWithNewValue(constant);
+ break;
+ }
+ break;
+
+ case IToF:
+ // Turn this: IToF(constant)
+ // Into this: ConstFloat(constant)
+ if (Value* constant = m_value->child(0)->iToFConstant(m_proc)) {
+ replaceWithNewValue(constant);
+ break;
+ }
+ break;
+
+ case FloatToDouble:
+ // Turn this: FloatToDouble(constant)
+ // Into this: ConstDouble(constant)
+ if (Value* constant = m_value->child(0)->floatToDoubleConstant(m_proc)) {
+ replaceWithNewValue(constant);
+ break;
+ }
+ break;
+
+ case DoubleToFloat:
+ // Turn this: DoubleToFloat(FloatToDouble(value))
+ // Into this: value
+ if (m_value->child(0)->opcode() == FloatToDouble) {
+ replaceWithIdentity(m_value->child(0)->child(0));
+ break;
+ }
+
+ // Turn this: DoubleToFloat(constant)
+ // Into this: ConstFloat(constant)
+ if (Value* constant = m_value->child(0)->doubleToFloatConstant(m_proc)) {
+ replaceWithNewValue(constant);
+ break;
+ }
+ break;
+
+ case Select:
+ // Turn this: Select(constant, a, b)
+ // Into this: constant ? a : b
+ if (m_value->child(0)->hasInt32()) {
+ replaceWithIdentity(
+ m_value->child(0)->asInt32() ? m_value->child(1) : m_value->child(2));
+ break;
+ }
+
+ // Turn this: Select(Equal(x, 0), a, b)
+ // Into this: Select(x, b, a)
+ if (m_value->child(0)->opcode() == Equal && m_value->child(0)->child(1)->isInt(0)) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ std::swap(m_value->child(1), m_value->child(2));
+ m_changed = true;
+ break;
+ }
+
+ // Turn this: Select(BitXor(bool, 1), a, b)
+ // Into this: Select(bool, b, a)
+ if (m_value->child(0)->opcode() == BitXor
+ && m_value->child(0)->child(1)->isInt32(1)
+ && m_value->child(0)->child(0)->returnsBool()) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ std::swap(m_value->child(1), m_value->child(2));
+ m_changed = true;
+ break;
+ }
+
+ // Turn this: Select(BitAnd(bool, xyz1), a, b)
+ // Into this: Select(bool, a, b)
+ if (m_value->child(0)->opcode() == BitAnd
+ && m_value->child(0)->child(1)->hasInt()
+ && m_value->child(0)->child(1)->asInt() & 1
+ && m_value->child(0)->child(0)->returnsBool()) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_changed = true;
+ break;
+ }
+
+ // Turn this: Select(stuff, x, x)
+ // Into this: x
+ if (m_value->child(1) == m_value->child(2)) {
+ replaceWithIdentity(m_value->child(1));
+ break;
+ }
+ break;
+
+ case Load8Z:
+ case Load8S:
+ case Load16Z:
+ case Load16S:
+ case Load:
+ case Store8:
+ case Store16:
+ case Store: {
+ Value* address = m_value->lastChild();
+ MemoryValue* memory = m_value->as<MemoryValue>();
+
+ // Turn this: Load(Add(address, offset1), offset = offset2)
+ // Into this: Load(address, offset = offset1 + offset2)
+ //
+ // Also turns this: Store(value, Add(address, offset1), offset = offset2)
+ // Into this: Store(value, address, offset = offset1 + offset2)
+ if (address->opcode() == Add && address->child(1)->hasIntPtr()) {
+ intptr_t offset = address->child(1)->asIntPtr();
+ if (!sumOverflows<intptr_t>(offset, memory->offset())) {
+ offset += memory->offset();
+ int32_t smallOffset = static_cast<int32_t>(offset);
+ if (smallOffset == offset) {
+ address = address->child(0);
+ memory->lastChild() = address;
+ memory->setOffset(smallOffset);
+ m_changed = true;
+ }
+ }
+ }
+
+ // Turn this: Load(constant1, offset = constant2)
+ // Into this: Load(constant1 + constant2)
+ //
+ // This is a fun canonicalization. It purely regresses naively generated code. We rely
+ // on constant materialization to be smart enough to materialize this constant the smart
+ // way. We want this canonicalization because we want to know if two memory accesses see
+ // the same address.
+ if (memory->offset()) {
+ if (Value* newAddress = address->addConstant(m_proc, memory->offset())) {
+ m_insertionSet.insertValue(m_index, newAddress);
+ address = newAddress;
+ memory->lastChild() = newAddress;
+ memory->setOffset(0);
+ m_changed = true;
+ }
+ }
+
+ break;
+ }
+
+ case CCall: {
+ // Turn this: Call(fmod, constant1, constant2)
+ // Into this: fcall-constant(constant1, constant2)
+ double(*fmodDouble)(double, double) = fmod;
+ if (m_value->type() == Double
+ && m_value->numChildren() == 3
+ && m_value->child(0)->isIntPtr(reinterpret_cast<intptr_t>(fmodDouble))
+ && m_value->child(1)->type() == Double
+ && m_value->child(2)->type() == Double) {
+ replaceWithNewValue(m_value->child(1)->modConstant(m_proc, m_value->child(2)));
+ }
+ break;
+ }
+ case Equal:
+ handleCommutativity();
+
+ // Turn this: Equal(bool, 0)
+ // Into this: BitXor(bool, 1)
+ if (m_value->child(0)->returnsBool() && m_value->child(1)->isInt32(0)) {
+ replaceWithNew<Value>(
+ BitXor, m_value->origin(), m_value->child(0),
+ m_insertionSet.insert<Const32Value>(m_index, m_value->origin(), 1));
+ break;
+ }
+
+ // Turn this Equal(bool, 1)
+ // Into this: bool
+ if (m_value->child(0)->returnsBool() && m_value->child(1)->isInt32(1)) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ // Turn this: Equal(const1, const2)
+ // Into this: const1 == const2
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(0)->equalConstant(m_value->child(1))));
+ break;
+
+ case NotEqual:
+ handleCommutativity();
+
+ if (m_value->child(0)->returnsBool()) {
+ // Turn this: NotEqual(bool, 0)
+ // Into this: bool
+ if (m_value->child(1)->isInt32(0)) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ // Turn this: NotEqual(bool, 1)
+ // Into this: Equal(bool, 0)
+ if (m_value->child(1)->isInt32(1)) {
+ replaceWithNew<Value>(
+ Equal, m_value->origin(), m_value->child(0),
+ m_insertionSet.insertIntConstant(m_index, m_value->origin(), Int32, 0));
+ break;
+ }
+ }
+
+ // Turn this: NotEqual(const1, const2)
+ // Into this: const1 != const2
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(0)->notEqualConstant(m_value->child(1))));
+ break;
+
+ case LessThan:
+ // FIXME: We could do a better job of canonicalizing integer comparisons.
+ // https://bugs.webkit.org/show_bug.cgi?id=150958
+
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(0)->lessThanConstant(m_value->child(1))));
+ break;
+
+ case GreaterThan:
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(0)->greaterThanConstant(m_value->child(1))));
+ break;
+
+ case LessEqual:
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(0)->lessEqualConstant(m_value->child(1))));
+ break;
+
+ case GreaterEqual:
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(0)->greaterEqualConstant(m_value->child(1))));
+ break;
+
+ case Above:
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(0)->aboveConstant(m_value->child(1))));
+ break;
+
+ case Below:
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(0)->belowConstant(m_value->child(1))));
+ break;
+
+ case AboveEqual:
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(0)->aboveEqualConstant(m_value->child(1))));
+ break;
+
+ case BelowEqual:
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(0)->belowEqualConstant(m_value->child(1))));
+ break;
+
+ case EqualOrUnordered:
+ handleCommutativity();
+
+ // Turn this: Equal(const1, const2)
+ // Into this: isunordered(const1, const2) || const1 == const2.
+ // Turn this: Equal(value, const_NaN)
+ // Into this: 1.
+ replaceWithNewValue(
+ m_proc.addBoolConstant(
+ m_value->origin(),
+ m_value->child(1)->equalOrUnorderedConstant(m_value->child(0))));
+ break;
+
+ case CheckAdd: {
+ if (replaceWithNewValue(m_value->child(0)->checkAddConstant(m_proc, m_value->child(1))))
+ break;
+
+ handleCommutativity();
+
+ if (m_value->child(1)->isInt(0)) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ IntRange leftRange = rangeFor(m_value->child(0));
+ IntRange rightRange = rangeFor(m_value->child(1));
+ if (!leftRange.couldOverflowAdd(rightRange, m_value->type())) {
+ replaceWithNewValue(
+ m_proc.add<Value>(Add, m_value->origin(), m_value->child(0), m_value->child(1)));
+ break;
+ }
+ break;
+ }
+
+ case CheckSub: {
+ if (replaceWithNewValue(m_value->child(0)->checkSubConstant(m_proc, m_value->child(1))))
+ break;
+
+ if (m_value->child(1)->isInt(0)) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+
+ if (Value* negatedConstant = m_value->child(1)->checkNegConstant(m_proc)) {
+ m_insertionSet.insertValue(m_index, negatedConstant);
+ m_value->as<CheckValue>()->convertToAdd();
+ m_value->child(1) = negatedConstant;
+ m_changed = true;
+ break;
+ }
+
+ IntRange leftRange = rangeFor(m_value->child(0));
+ IntRange rightRange = rangeFor(m_value->child(1));
+ if (!leftRange.couldOverflowSub(rightRange, m_value->type())) {
+ replaceWithNewValue(
+ m_proc.add<Value>(Sub, m_value->origin(), m_value->child(0), m_value->child(1)));
+ break;
+ }
+ break;
+ }
+
+ case CheckMul: {
+ if (replaceWithNewValue(m_value->child(0)->checkMulConstant(m_proc, m_value->child(1))))
+ break;
+
+ handleCommutativity();
+
+ if (m_value->child(1)->hasInt()) {
+ bool modified = true;
+ switch (m_value->child(1)->asInt()) {
+ case 0:
+ replaceWithNewValue(m_proc.addIntConstant(m_value, 0));
+ break;
+ case 1:
+ replaceWithIdentity(m_value->child(0));
+ break;
+ case 2:
+ m_value->as<CheckValue>()->convertToAdd();
+ m_value->child(1) = m_value->child(0);
+ m_changed = true;
+ break;
+ default:
+ modified = false;
+ break;
+ }
+ if (modified)
+ break;
+ }
+
+ IntRange leftRange = rangeFor(m_value->child(0));
+ IntRange rightRange = rangeFor(m_value->child(1));
+ if (!leftRange.couldOverflowMul(rightRange, m_value->type())) {
+ replaceWithNewValue(
+ m_proc.add<Value>(Mul, m_value->origin(), m_value->child(0), m_value->child(1)));
+ break;
+ }
+ break;
+ }
+
+ case Check: {
+ CheckValue* checkValue = m_value->as<CheckValue>();
+
+ if (checkValue->child(0)->isLikeZero()) {
+ checkValue->replaceWithNop();
+ m_changed = true;
+ break;
+ }
+
+ if (checkValue->child(0)->isLikeNonZero()) {
+ PatchpointValue* patchpoint =
+ m_insertionSet.insert<PatchpointValue>(m_index, Void, checkValue->origin());
+
+ patchpoint->effects = Effects();
+ patchpoint->effects.reads = HeapRange::top();
+ patchpoint->effects.exitsSideways = true;
+
+ for (unsigned i = 1; i < checkValue->numChildren(); ++i)
+ patchpoint->append(checkValue->constrainedChild(i));
+
+ patchpoint->setGenerator(checkValue->generator());
+
+ // Replace the rest of the block with an Oops.
+ for (unsigned i = m_index + 1; i < m_block->size() - 1; ++i)
+ m_block->at(i)->replaceWithBottom(m_insertionSet, m_index);
+ m_block->last()->replaceWithOops(m_block);
+ m_block->last()->setOrigin(checkValue->origin());
+
+ // Replace ourselves last.
+ checkValue->replaceWithNop();
+ m_changedCFG = true;
+ break;
+ }
+
+ if (checkValue->child(0)->opcode() == NotEqual
+ && checkValue->child(0)->child(1)->isInt(0)) {
+ checkValue->child(0) = checkValue->child(0)->child(0);
+ m_changed = true;
+ }
+
+ // If we are checking some bounded-size SSA expression that leads to a Select that
+ // has a constant as one of its results, then turn the Select into a Branch and split
+ // the code between the Check and the Branch. For example, this:
+ //
+ // @a = Select(@p, @x, 42)
+ // @b = Add(@a, 35)
+ // Check(@b)
+ //
+ // becomes this:
+ //
+ // Branch(@p, #truecase, #falsecase)
+ //
+ // BB#truecase:
+ // @b_truecase = Add(@x, 35)
+ // Check(@b_truecase)
+ // Upsilon(@x, ^a)
+ // Upsilon(@b_truecase, ^b)
+ // Jump(#continuation)
+ //
+ // BB#falsecase:
+ // @b_falsecase = Add(42, 35)
+ // Check(@b_falsecase)
+ // Upsilon(42, ^a)
+ // Upsilon(@b_falsecase, ^b)
+ // Jump(#continuation)
+ //
+ // BB#continuation:
+ // @a = Phi()
+ // @b = Phi()
+ //
+ // The goal of this optimization is to kill a lot of code in one of those basic
+ // blocks. This is pretty much guaranteed since one of those blocks will replace all
+ // uses of the Select with a constant, and that constant will be transitively used
+ // from the check.
+ static const unsigned selectSpecializationBound = 3;
+ Value* select = findRecentNodeMatching(
+ m_value->child(0), selectSpecializationBound,
+ [&] (Value* value) -> bool {
+ return value->opcode() == Select
+ && (value->child(1)->isConstant() && value->child(2)->isConstant());
+ });
+
+ if (select) {
+ specializeSelect(select);
+ break;
+ }
+ break;
+ }
+
+ case Branch: {
+ // Turn this: Branch(NotEqual(x, 0))
+ // Into this: Branch(x)
+ if (m_value->child(0)->opcode() == NotEqual && m_value->child(0)->child(1)->isInt(0)) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_changed = true;
+ }
+
+ // Turn this: Branch(Equal(x, 0), then, else)
+ // Into this: Branch(x, else, then)
+ if (m_value->child(0)->opcode() == Equal && m_value->child(0)->child(1)->isInt(0)) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ std::swap(m_block->taken(), m_block->notTaken());
+ m_changed = true;
+ }
+
+ // Turn this: Branch(BitXor(bool, 1), then, else)
+ // Into this: Branch(bool, else, then)
+ if (m_value->child(0)->opcode() == BitXor
+ && m_value->child(0)->child(1)->isInt32(1)
+ && m_value->child(0)->child(0)->returnsBool()) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ std::swap(m_block->taken(), m_block->notTaken());
+ m_changed = true;
+ }
+
+ // Turn this: Branch(BitAnd(bool, xyb1), then, else)
+ // Into this: Branch(bool, then, else)
+ if (m_value->child(0)->opcode() == BitAnd
+ && m_value->child(0)->child(1)->hasInt()
+ && m_value->child(0)->child(1)->asInt() & 1
+ && m_value->child(0)->child(0)->returnsBool()) {
+ m_value->child(0) = m_value->child(0)->child(0);
+ m_changed = true;
+ }
+
+ TriState triState = m_value->child(0)->asTriState();
+
+ // Turn this: Branch(0, then, else)
+ // Into this: Jump(else)
+ if (triState == FalseTriState) {
+ m_block->taken().block()->removePredecessor(m_block);
+ m_value->replaceWithJump(m_block, m_block->notTaken());
+ m_changedCFG = true;
+ break;
+ }
+
+ // Turn this: Branch(not 0, then, else)
+ // Into this: Jump(then)
+ if (triState == TrueTriState) {
+ m_block->notTaken().block()->removePredecessor(m_block);
+ m_value->replaceWithJump(m_block, m_block->taken());
+ m_changedCFG = true;
+ break;
+ }
+
+ // If a check for the same property dominates us, we can kill the branch. This sort
+ // of makes sense here because it's cheap, but hacks like this show that we're going
+ // to need SCCP.
+ Value* check = m_pureCSE.findMatch(
+ ValueKey(Check, Void, m_value->child(0)), m_block, *m_dominators);
+ if (check) {
+ // The Check would have side-exited if child(0) was non-zero. So, it must be
+ // zero here.
+ m_block->taken().block()->removePredecessor(m_block);
+ m_value->replaceWithJump(m_block, m_block->notTaken());
+ m_changedCFG = true;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ // Find a node that:
+ // - functor(node) returns true.
+ // - it's reachable from the given node via children.
+ // - it's in the last "bound" slots in the current basic block.
+ // This algorithm is optimized under the assumption that the bound is small.
+ template<typename Functor>
+ Value* findRecentNodeMatching(Value* start, unsigned bound, const Functor& functor)
+ {
+ unsigned startIndex = bound < m_index ? m_index - bound : 0;
+ Value* result = nullptr;
+ start->walk(
+ [&] (Value* value) -> Value::WalkStatus {
+ bool found = false;
+ for (unsigned i = startIndex; i <= m_index; ++i) {
+ if (m_block->at(i) == value)
+ found = true;
+ }
+ if (!found)
+ return Value::IgnoreChildren;
+
+ if (functor(value)) {
+ result = value;
+ return Value::Stop;
+ }
+
+ return Value::Continue;
+ });
+ return result;
+ }
+
+ // This specializes a sequence of code up to a Select. This doesn't work when we're at a
+ // terminal. It would be cool to fix that eventually. The main problem is that instead of
+ // splitting the block, we should just insert the then/else blocks. We'll have to create
+ // double the Phis and double the Upsilons. It'll probably be the sort of optimization that
+ // we want to do only after we've done loop optimizations, since this will *definitely*
+ // obscure things. In fact, even this simpler form of select specialization will possibly
+ // obscure other optimizations. It would be great to have two modes of strength reduction,
+ // one that does obscuring optimizations and runs late, and another that does not do
+ // obscuring optimizations and runs early.
+ // FIXME: Make select specialization handle branches.
+ // FIXME: Have a form of strength reduction that does no obscuring optimizations and runs
+ // early.
+ void specializeSelect(Value* source)
+ {
+ if (verbose)
+ dataLog("Specializing select: ", deepDump(m_proc, source), "\n");
+
+ // This mutates startIndex to account for the fact that m_block got the front of it
+ // chopped off.
+ BasicBlock* predecessor =
+ m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet);
+
+ // Splitting will commit the insertion set, which changes the exact position of the
+ // source. That's why we do the search after splitting.
+ unsigned startIndex = UINT_MAX;
+ for (unsigned i = predecessor->size(); i--;) {
+ if (predecessor->at(i) == source) {
+ startIndex = i;
+ break;
+ }
+ }
+
+ RELEASE_ASSERT(startIndex != UINT_MAX);
+
+ // By BasicBlock convention, caseIndex == 0 => then, caseIndex == 1 => else.
+ static const unsigned numCases = 2;
+ BasicBlock* cases[numCases];
+ for (unsigned i = 0; i < numCases; ++i)
+ cases[i] = m_blockInsertionSet.insertBefore(m_block);
+
+ HashMap<Value*, Value*> mappings[2];
+
+ // Save things we want to know about the source.
+ Value* predicate = source->child(0);
+
+ for (unsigned i = 0; i < numCases; ++i)
+ mappings[i].add(source, source->child(1 + i));
+
+ auto cloneValue = [&] (Value* value) {
+ ASSERT(value != source);
+
+ for (unsigned i = 0; i < numCases; ++i) {
+ Value* clone = m_proc.clone(value);
+ for (Value*& child : clone->children()) {
+ if (Value* newChild = mappings[i].get(child))
+ child = newChild;
+ }
+ if (value->type() != Void)
+ mappings[i].add(value, clone);
+
+ cases[i]->append(clone);
+ if (value->type() != Void)
+ cases[i]->appendNew<UpsilonValue>(m_proc, value->origin(), clone, value);
+ }
+
+ value->replaceWithPhi();
+ };
+
+ // The jump that the splitter inserted is of no use to us.
+ predecessor->removeLast(m_proc);
+
+ // Hance the source, it's special.
+ for (unsigned i = 0; i < numCases; ++i) {
+ cases[i]->appendNew<UpsilonValue>(
+ m_proc, source->origin(), source->child(1 + i), source);
+ }
+ source->replaceWithPhi();
+ m_insertionSet.insertValue(m_index, source);
+
+ // Now handle all values between the source and the check.
+ for (unsigned i = startIndex + 1; i < predecessor->size(); ++i) {
+ Value* value = predecessor->at(i);
+ value->owner = nullptr;
+
+ cloneValue(value);
+
+ if (value->type() != Void)
+ m_insertionSet.insertValue(m_index, value);
+ else
+ m_proc.deleteValue(value);
+ }
+
+ // Finally, deal with the check.
+ cloneValue(m_value);
+
+ // Remove the values from the predecessor.
+ predecessor->values().resize(startIndex);
+
+ predecessor->appendNew<Value>(m_proc, Branch, source->origin(), predicate);
+ predecessor->setSuccessors(FrequentedBlock(cases[0]), FrequentedBlock(cases[1]));
+
+ for (unsigned i = 0; i < numCases; ++i) {
+ cases[i]->appendNew<Value>(m_proc, Jump, m_value->origin());
+ cases[i]->setSuccessors(FrequentedBlock(m_block));
+ }
+
+ m_changed = true;
+
+ predecessor->updatePredecessorsAfter();
+ }
+
+ // Turn this: Add(constant, value)
+ // Into this: Add(value, constant)
+ //
+ // Also:
+ // Turn this: Add(value1, value2)
+ // Into this: Add(value2, value1)
+ // If we decide that value2 coming first is the canonical ordering.
+ void handleCommutativity()
+ {
+ // Note that we have commutative operations that take more than two children. Those operations may
+ // commute their first two children while leaving the rest unaffected.
+ ASSERT(m_value->numChildren() >= 2);
+
+ // Leave it alone if the right child is a constant.
+ if (m_value->child(1)->isConstant())
+ return;
+
+ if (m_value->child(0)->isConstant()) {
+ std::swap(m_value->child(0), m_value->child(1));
+ m_changed = true;
+ return;
+ }
+
+ // Sort the operands. This is an important canonicalization. We use the index instead of
+ // the address to make this at least slightly deterministic.
+ if (m_value->child(0)->index() > m_value->child(1)->index()) {
+ std::swap(m_value->child(0), m_value->child(1));
+ m_changed = true;
+ return;
+ }
+ }
+
+ // FIXME: This should really be a forward analysis. Instead, we uses a bounded-search backwards
+ // analysis.
+ IntRange rangeFor(Value* value, unsigned timeToLive = 5)
+ {
+ if (!timeToLive)
+ return IntRange::top(value->type());
+
+ switch (value->opcode()) {
+ case Const32:
+ case Const64: {
+ int64_t intValue = value->asInt();
+ return IntRange(intValue, intValue);
+ }
+
+ case BitAnd:
+ if (value->child(1)->hasInt())
+ return IntRange::rangeForMask(value->child(1)->asInt(), value->type());
+ break;
+
+ case SShr:
+ if (value->child(1)->hasInt32()) {
+ return rangeFor(value->child(0), timeToLive - 1).sShr(
+ value->child(1)->asInt32(), value->type());
+ }
+ break;
+
+ case ZShr:
+ if (value->child(1)->hasInt32()) {
+ return rangeFor(value->child(0), timeToLive - 1).zShr(
+ value->child(1)->asInt32(), value->type());
+ }
+ break;
+
+ case Shl:
+ if (value->child(1)->hasInt32()) {
+ return rangeFor(value->child(0), timeToLive - 1).shl(
+ value->child(1)->asInt32(), value->type());
+ }
+ break;
+
+ case Add:
+ return rangeFor(value->child(0), timeToLive - 1).add(
+ rangeFor(value->child(1), timeToLive - 1), value->type());
+
+ case Sub:
+ return rangeFor(value->child(0), timeToLive - 1).sub(
+ rangeFor(value->child(1), timeToLive - 1), value->type());
+
+ case Mul:
+ return rangeFor(value->child(0), timeToLive - 1).mul(
+ rangeFor(value->child(1), timeToLive - 1), value->type());
+
+ default:
+ break;
+ }
+
+ return IntRange::top(value->type());
+ }
+
+ template<typename ValueType, typename... Arguments>
+ void replaceWithNew(Arguments... arguments)
+ {
+ replaceWithNewValue(m_proc.add<ValueType>(arguments...));
+ }
+
+ bool replaceWithNewValue(Value* newValue)
+ {
+ if (!newValue)
+ return false;
+ m_insertionSet.insertValue(m_index, newValue);
+ m_value->replaceWithIdentity(newValue);
+ m_changed = true;
+ return true;
+ }
+
+ void replaceWithIdentity(Value* newValue)
+ {
+ m_value->replaceWithIdentity(newValue);
+ m_changed = true;
+ }
+
+ void handleShiftAmount()
+ {
+ // Shift anything by zero is identity.
+ if (m_value->child(1)->isInt32(0)) {
+ replaceWithIdentity(m_value->child(0));
+ return;
+ }
+
+ // The shift already masks its shift amount. If the shift amount is being masked by a
+ // redundant amount, then remove the mask. For example,
+ // Turn this: Shl(@x, BitAnd(@y, 63))
+ // Into this: Shl(@x, @y)
+ unsigned mask = sizeofType(m_value->type()) * 8 - 1;
+ if (m_value->child(1)->opcode() == BitAnd
+ && m_value->child(1)->child(1)->hasInt32()
+ && (m_value->child(1)->child(1)->asInt32() & mask) == mask) {
+ m_value->child(1) = m_value->child(1)->child(0);
+ m_changed = true;
+ }
+ }
+
+ void replaceIfRedundant()
+ {
+ m_changed |= m_pureCSE.process(m_value, *m_dominators);
+ }
+
+ void simplifyCFG()
+ {
+ if (verbose) {
+ dataLog("Before simplifyCFG:\n");
+ dataLog(m_proc);
+ }
+
+ // We have three easy simplification rules:
+ //
+ // 1) If a successor is a block that just jumps to another block, then jump directly to
+ // that block.
+ //
+ // 2) If all successors are the same and the operation has no effects, then use a jump
+ // instead.
+ //
+ // 3) If you jump to a block that is not you and has one predecessor, then merge.
+ //
+ // Note that because of the first rule, this phase may introduce critical edges. That's fine.
+ // If you need broken critical edges, then you have to break them yourself.
+
+ // Note that this relies on predecessors being at least conservatively correct. It's fine for
+ // predecessors to mention a block that isn't actually a predecessor. It's *not* fine for a
+ // predecessor to be omitted. We assert as much in the loop. In practice, we precisely preserve
+ // predecessors during strength reduction since that minimizes the total number of fixpoint
+ // iterations needed to kill a lot of code.
+
+ for (BasicBlock* block : m_proc) {
+ if (verbose)
+ dataLog("Considering block ", *block, ":\n");
+
+ checkPredecessorValidity();
+
+ // We don't care about blocks that don't have successors.
+ if (!block->numSuccessors())
+ continue;
+
+ // First check if any of the successors of this block can be forwarded over.
+ for (BasicBlock*& successor : block->successorBlocks()) {
+ if (successor != block
+ && successor->size() == 1
+ && successor->last()->opcode() == Jump) {
+ BasicBlock* newSuccessor = successor->successorBlock(0);
+ if (newSuccessor != successor) {
+ if (verbose) {
+ dataLog(
+ "Replacing ", pointerDump(block), "->", pointerDump(successor),
+ " with ", pointerDump(block), "->", pointerDump(newSuccessor),
+ "\n");
+ }
+ // Note that we do not do replacePredecessor() because the block we're
+ // skipping will still have newSuccessor as its successor.
+ newSuccessor->addPredecessor(block);
+ successor = newSuccessor;
+ m_changedCFG = true;
+ }
+ }
+ }
+
+ // Now check if the block's terminal can be replaced with a jump.
+ if (block->numSuccessors() > 1) {
+ // The terminal must not have weird effects.
+ Effects effects = block->last()->effects();
+ effects.terminal = false;
+ if (!effects.mustExecute()) {
+ // All of the successors must be the same.
+ bool allSame = true;
+ BasicBlock* firstSuccessor = block->successorBlock(0);
+ for (unsigned i = 1; i < block->numSuccessors(); ++i) {
+ if (block->successorBlock(i) != firstSuccessor) {
+ allSame = false;
+ break;
+ }
+ }
+ if (allSame) {
+ if (verbose) {
+ dataLog(
+ "Changing ", pointerDump(block), "'s terminal to a Jump.\n");
+ }
+ block->last()->replaceWithJump(block, FrequentedBlock(firstSuccessor));
+ m_changedCFG = true;
+ }
+ }
+ }
+
+ // Finally handle jumps to a block with one predecessor.
+ if (block->numSuccessors() == 1) {
+ BasicBlock* successor = block->successorBlock(0);
+ if (successor != block && successor->numPredecessors() == 1) {
+ RELEASE_ASSERT(successor->predecessor(0) == block);
+
+ // We can merge the two blocks, because the predecessor only jumps to the successor
+ // and the successor is only reachable from the predecessor.
+
+ // Remove the terminal.
+ Value* value = block->values().takeLast();
+ Origin jumpOrigin = value->origin();
+ RELEASE_ASSERT(value->effects().terminal);
+ m_proc.deleteValue(value);
+
+ // Append the full contents of the successor to the predecessor.
+ block->values().appendVector(successor->values());
+ block->successors() = successor->successors();
+
+ // Make sure that the successor has nothing left in it. Make sure that the block
+ // has a terminal so that nobody chokes when they look at it.
+ successor->values().resize(0);
+ successor->appendNew<Value>(m_proc, Oops, jumpOrigin);
+ successor->clearSuccessors();
+
+ // Ensure that predecessors of block's new successors know what's up.
+ for (BasicBlock* newSuccessor : block->successorBlocks())
+ newSuccessor->replacePredecessor(successor, block);
+
+ if (verbose) {
+ dataLog(
+ "Merged ", pointerDump(block), "->", pointerDump(successor), "\n");
+ }
+
+ m_changedCFG = true;
+ }
+ }
+ }
+
+ if (m_changedCFG && verbose) {
+ dataLog("B3 after simplifyCFG:\n");
+ dataLog(m_proc);
+ }
+ }
+
+ void checkPredecessorValidity()
+ {
+ if (!shouldValidateIRAtEachPhase())
+ return;
+
+ for (BasicBlock* block : m_proc) {
+ for (BasicBlock* successor : block->successorBlocks())
+ RELEASE_ASSERT(successor->containsPredecessor(block));
+ }
+ }
+
+ void killDeadCode()
+ {
+ GraphNodeWorklist<Value*, IndexSet<Value>> worklist;
+ Vector<UpsilonValue*, 64> upsilons;
+ for (BasicBlock* block : m_proc) {
+ for (Value* value : *block) {
+ Effects effects;
+ // We don't care about effects of SSA operations, since we model them more
+ // accurately than the effects() method does.
+ if (value->opcode() != Phi && value->opcode() != Upsilon)
+ effects = value->effects();
+
+ if (effects.mustExecute())
+ worklist.push(value);
+
+ if (UpsilonValue* upsilon = value->as<UpsilonValue>())
+ upsilons.append(upsilon);
+ }
+ }
+ for (;;) {
+ while (Value* value = worklist.pop()) {
+ for (Value* child : value->children())
+ worklist.push(child);
+ }
+
+ bool didPush = false;
+ for (size_t upsilonIndex = 0; upsilonIndex < upsilons.size(); ++upsilonIndex) {
+ UpsilonValue* upsilon = upsilons[upsilonIndex];
+ if (worklist.saw(upsilon->phi())) {
+ worklist.push(upsilon);
+ upsilons[upsilonIndex--] = upsilons.last();
+ upsilons.takeLast();
+ didPush = true;
+ }
+ }
+ if (!didPush)
+ break;
+ }
+
+ IndexSet<Variable> liveVariables;
+
+ for (BasicBlock* block : m_proc) {
+ size_t sourceIndex = 0;
+ size_t targetIndex = 0;
+ while (sourceIndex < block->size()) {
+ Value* value = block->at(sourceIndex++);
+ if (worklist.saw(value)) {
+ if (VariableValue* variableValue = value->as<VariableValue>())
+ liveVariables.add(variableValue->variable());
+ block->at(targetIndex++) = value;
+ } else {
+ m_proc.deleteValue(value);
+ m_changed = true;
+ }
+ }
+ block->values().resize(targetIndex);
+ }
+
+ for (Variable* variable : m_proc.variables()) {
+ if (!liveVariables.contains(variable))
+ m_proc.deleteVariable(variable);
+ }
+ }
+
+ void simplifySSA()
+ {
+ // This runs Aycock and Horspool's algorithm on our Phi functions [1]. For most CFG patterns,
+ // this can take a suboptimal arrangement of Phi functions and make it optimal, as if you had
+ // run Cytron, Ferrante, Rosen, Wegman, and Zadeck. It's only suboptimal for irreducible
+ // CFGs. In practice, that doesn't matter, since we expect clients of B3 to run their own SSA
+ // conversion before lowering to B3, and in the case of the DFG, that conversion uses Cytron
+ // et al. In that context, this algorithm is intended to simplify Phi functions that were
+ // made redundant by prior CFG simplification. But according to Aycock and Horspool's paper,
+ // this algorithm is good enough that a B3 client could just give us maximal Phi's (i.e. Phi
+ // for each variable at each basic block) and we will make them optimal.
+ // [1] http://pages.cpsc.ucalgary.ca/~aycock/papers/ssa.ps
+
+ // Aycock and Horspool prescribe two rules that are to be run to fixpoint:
+ //
+ // 1) If all of the Phi's children are the same (i.e. it's one child referenced from one or
+ // more Upsilons), then replace all uses of the Phi with the one child.
+ //
+ // 2) If all of the Phi's children are either the Phi itself or exactly one other child, then
+ // replace all uses of the Phi with the one other child.
+ //
+ // Rule (2) subsumes rule (1), so we can just run (2). We only run one fixpoint iteration
+ // here. This premise is that in common cases, this will only find optimization opportunities
+ // as a result of CFG simplification and usually CFG simplification will only do one round
+ // of block merging per ReduceStrength fixpoint iteration, so it's OK for this to only do one
+ // round of Phi merging - since Phis are the value analogue of blocks.
+
+ PhiChildren phiChildren(m_proc);
+
+ for (Value* phi : phiChildren.phis()) {
+ Value* otherChild = nullptr;
+ bool ok = true;
+ for (Value* child : phiChildren[phi].values()) {
+ if (child == phi)
+ continue;
+ if (child == otherChild)
+ continue;
+ if (!otherChild) {
+ otherChild = child;
+ continue;
+ }
+ ok = false;
+ break;
+ }
+ if (!ok)
+ continue;
+ if (!otherChild) {
+ // Wow, this would be super weird. It probably won't happen, except that things could
+ // get weird as a consequence of stepwise simplifications in the strength reduction
+ // fixpoint.
+ continue;
+ }
+
+ // Turn the Phi into an Identity and turn the Upsilons into Nops.
+ m_changed = true;
+ for (Value* upsilon : phiChildren[phi])
+ upsilon->replaceWithNop();
+ phi->replaceWithIdentity(otherChild);
+ }
+ }
+
+ Procedure& m_proc;
+ InsertionSet m_insertionSet;
+ BlockInsertionSet m_blockInsertionSet;
+ BasicBlock* m_block { nullptr };
+ unsigned m_index { 0 };
+ Value* m_value { nullptr };
+ Dominators* m_dominators { nullptr };
+ PureCSE m_pureCSE;
+ bool m_changed { false };
+ bool m_changedCFG { false };
+};
+
+} // anonymous namespace
+
+bool reduceStrength(Procedure& proc)
+{
+ PhaseScope phaseScope(proc, "reduceStrength");
+ ReduceStrength reduceStrength(proc);
+ return reduceStrength.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3ReduceStrength.h b/Source/JavaScriptCore/b3/B3ReduceStrength.h
new file mode 100644
index 000000000..1abb80f64
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ReduceStrength.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Does strength reduction, constant folding, canonicalization, CFG simplification, DCE, and very
+// simple CSE. This phase runs those optimizations to fixpoint. The goal of the phase is to
+// dramatically reduce the complexity of the code. In the future, it's preferable to add optimizations
+// to this phase rather than creating new optimizations because then the optimizations can participate
+// in the fixpoint. However, because of the many interlocking optimizations, it can be difficult to
+// add sophisticated optimizations to it. For that reason we have full CSE in a different phase, for
+// example.
+
+JS_EXPORT_PRIVATE bool reduceStrength(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SSACalculator.cpp b/Source/JavaScriptCore/b3/B3SSACalculator.cpp
new file mode 100644
index 000000000..30692a997
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SSACalculator.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3SSACalculator.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 {
+
+void SSACalculator::Variable::dump(PrintStream& out) const
+{
+ out.print("var", m_index);
+}
+
+void SSACalculator::Variable::dumpVerbose(PrintStream& out) const
+{
+ dump(out);
+ if (!m_blocksWithDefs.isEmpty()) {
+ out.print("(defs: ");
+ CommaPrinter comma;
+ for (BasicBlock* block : m_blocksWithDefs)
+ out.print(comma, *block);
+ out.print(")");
+ }
+}
+
+void SSACalculator::Def::dump(PrintStream& out) const
+{
+ out.print("def(", *m_variable, ", ", *m_block, ", ", pointerDump(m_value), ")");
+}
+
+SSACalculator::SSACalculator(Procedure& proc)
+ : m_data(proc.size())
+ , m_proc(proc)
+{
+}
+
+SSACalculator::~SSACalculator()
+{
+}
+
+void SSACalculator::reset()
+{
+ m_variables.clear();
+ m_defs.clear();
+ m_phis.clear();
+ for (unsigned blockIndex = m_data.size(); blockIndex--;) {
+ m_data[blockIndex].m_defs.clear();
+ m_data[blockIndex].m_phis.clear();
+ }
+}
+
+SSACalculator::Variable* SSACalculator::newVariable()
+{
+ return &m_variables.alloc(Variable(m_variables.size()));
+}
+
+SSACalculator::Def* SSACalculator::newDef(Variable* variable, BasicBlock* block, Value* value)
+{
+ Def* def = m_defs.add(Def(variable, block, value));
+ auto result = m_data[block].m_defs.add(variable, def);
+ if (result.isNewEntry)
+ variable->m_blocksWithDefs.append(block);
+ else
+ result.iterator->value = def;
+ return def;
+}
+
+SSACalculator::Def* SSACalculator::nonLocalReachingDef(BasicBlock* block, Variable* variable)
+{
+ return reachingDefAtTail(m_dominators->idom(block), variable);
+}
+
+SSACalculator::Def* SSACalculator::reachingDefAtTail(BasicBlock* block, Variable* variable)
+{
+ for (; block; block = m_dominators->idom(block)) {
+ if (Def* def = m_data[block].m_defs.get(variable))
+ return def;
+ }
+ return nullptr;
+}
+
+void SSACalculator::dump(PrintStream& out) const
+{
+ out.print("<Variables: [");
+ CommaPrinter comma;
+ for (unsigned i = 0; i < m_variables.size(); ++i) {
+ out.print(comma);
+ m_variables[i].dumpVerbose(out);
+ }
+ out.print("], Defs: [");
+ comma = CommaPrinter();
+ for (Def* def : const_cast<SSACalculator*>(this)->m_defs)
+ out.print(comma, *def);
+ out.print("], Phis: [");
+ comma = CommaPrinter();
+ for (Def* def : const_cast<SSACalculator*>(this)->m_phis)
+ out.print(comma, *def);
+ out.print("], Block data: [");
+ comma = CommaPrinter();
+ for (unsigned blockIndex = 0; blockIndex < m_proc.size(); ++blockIndex) {
+ BasicBlock* block = m_proc[blockIndex];
+ if (!block)
+ continue;
+
+ out.print(comma, *block, "=>(");
+ out.print("Defs: {");
+ CommaPrinter innerComma;
+ for (auto entry : m_data[block].m_defs)
+ out.print(innerComma, *entry.key, "->", *entry.value);
+ out.print("}, Phis: {");
+ innerComma = CommaPrinter();
+ for (Def* def : m_data[block].m_phis)
+ out.print(innerComma, *def);
+ out.print("})");
+ }
+ out.print("]>");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3SSACalculator.h b/Source/JavaScriptCore/b3/B3SSACalculator.h
new file mode 100644
index 000000000..be9a0648f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SSACalculator.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Dominators.h"
+#include "B3ProcedureInlines.h"
+#include <wtf/Bag.h>
+#include <wtf/IndexMap.h>
+#include <wtf/SegmentedVector.h>
+
+namespace JSC { namespace B3 {
+
+// SSACalculator provides a reusable tool for building SSA's. It's modeled after
+// DFG::SSACalculator.
+
+class SSACalculator {
+public:
+ SSACalculator(Procedure&);
+ ~SSACalculator();
+
+ void reset();
+
+ class Variable {
+ public:
+ unsigned index() const { return m_index; }
+
+ void dump(PrintStream&) const;
+ void dumpVerbose(PrintStream&) const;
+
+ private:
+ friend class SSACalculator;
+
+ Variable()
+ : m_index(UINT_MAX)
+ {
+ }
+
+ Variable(unsigned index)
+ : m_index(index)
+ {
+ }
+
+ Vector<BasicBlock*, 4> m_blocksWithDefs;
+ unsigned m_index;
+ };
+
+ class Def {
+ public:
+ Variable* variable() const { return m_variable; }
+ BasicBlock* block() const { return m_block; }
+
+ Value* value() const { return m_value; }
+
+ void dump(PrintStream&) const;
+
+ private:
+ friend class SSACalculator;
+
+ Def()
+ : m_variable(nullptr)
+ , m_block(nullptr)
+ , m_value(nullptr)
+ {
+ }
+
+ Def(Variable* variable, BasicBlock* block, Value* value)
+ : m_variable(variable)
+ , m_block(block)
+ , m_value(value)
+ {
+ }
+
+ Variable* m_variable;
+ BasicBlock* m_block;
+ Value* m_value;
+ };
+
+ Variable* newVariable();
+ Def* newDef(Variable*, BasicBlock*, Value*);
+
+ Variable* variable(unsigned index) { return &m_variables[index]; }
+
+ template<typename Functor>
+ void computePhis(const Functor& functor)
+ {
+ m_dominators = &m_proc.dominators();
+ for (Variable& variable : m_variables) {
+ m_dominators->forAllBlocksInPrunedIteratedDominanceFrontierOf(
+ variable.m_blocksWithDefs,
+ [&] (BasicBlock* block) -> bool {
+ Value* phi = functor(&variable, block);
+ if (!phi)
+ return false;
+
+ BlockData& data = m_data[block];
+ Def* phiDef = m_phis.add(Def(&variable, block, phi));
+ data.m_phis.append(phiDef);
+
+ data.m_defs.add(&variable, phiDef);
+ return true;
+ });
+ }
+ }
+
+ const Vector<Def*>& phisForBlock(BasicBlock* block)
+ {
+ return m_data[block].m_phis;
+ }
+
+ // Ignores defs within the given block; it assumes that you've taken care of those
+ // yourself.
+ Def* nonLocalReachingDef(BasicBlock*, Variable*);
+ Def* reachingDefAtHead(BasicBlock* block, Variable* variable)
+ {
+ return nonLocalReachingDef(block, variable);
+ }
+
+ // Considers the def within the given block, but only works at the tail of the block.
+ Def* reachingDefAtTail(BasicBlock*, Variable*);
+
+ void dump(PrintStream&) const;
+
+private:
+ SegmentedVector<Variable> m_variables;
+ Bag<Def> m_defs;
+
+ Bag<Def> m_phis;
+
+ struct BlockData {
+ HashMap<Variable*, Def*> m_defs;
+ Vector<Def*> m_phis;
+ };
+
+ IndexMap<BasicBlock, BlockData> m_data;
+
+ Dominators* m_dominators { nullptr };
+ Procedure& m_proc;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SlotBaseValue.cpp b/Source/JavaScriptCore/b3/B3SlotBaseValue.cpp
new file mode 100644
index 000000000..b5fd69bc8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SlotBaseValue.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3SlotBaseValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackSlot.h"
+
+namespace JSC { namespace B3 {
+
+SlotBaseValue::~SlotBaseValue()
+{
+}
+
+void SlotBaseValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(comma, pointerDump(m_slot));
+}
+
+Value* SlotBaseValue::cloneImpl() const
+{
+ return new SlotBaseValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SlotBaseValue.h b/Source/JavaScriptCore/b3/B3SlotBaseValue.h
new file mode 100644
index 000000000..19392ea02
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SlotBaseValue.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class StackSlot;
+
+class JS_EXPORT_PRIVATE SlotBaseValue : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == SlotBase; }
+
+ ~SlotBaseValue();
+
+ StackSlot* slot() const { return m_slot; }
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ SlotBaseValue(Origin origin, StackSlot* slot)
+ : Value(CheckedOpcode, SlotBase, pointerType(), origin)
+ , m_slot(slot)
+ {
+ }
+
+ StackSlot* m_slot;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SparseCollection.h b/Source/JavaScriptCore/b3/B3SparseCollection.h
new file mode 100644
index 000000000..46c33a930
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SparseCollection.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/StdLibExtras.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+// B3::Procedure and Air::Code have a lot of collections of indexed things. This has all of the
+// logic.
+
+template<typename T>
+class SparseCollection {
+ typedef Vector<std::unique_ptr<T>> VectorType;
+
+public:
+ SparseCollection()
+ {
+ }
+
+ T* add(std::unique_ptr<T> value)
+ {
+ T* result = value.get();
+
+ size_t index;
+ if (m_indexFreeList.isEmpty()) {
+ index = m_vector.size();
+ m_vector.append(nullptr);
+ } else
+ index = m_indexFreeList.takeLast();
+
+ value->m_index = index;
+ ASSERT(!m_vector[index]);
+ new (NotNull, &m_vector[index]) std::unique_ptr<T>(WTFMove(value));
+
+ return result;
+ }
+
+ template<typename... Arguments>
+ T* addNew(Arguments&&... arguments)
+ {
+ return add(std::unique_ptr<T>(new T(std::forward<Arguments>(arguments)...)));
+ }
+
+ void remove(T* value)
+ {
+ RELEASE_ASSERT(m_vector[value->m_index].get() == value);
+ m_indexFreeList.append(value->m_index);
+ m_vector[value->m_index] = nullptr;
+ }
+
+ unsigned size() const { return m_vector.size(); }
+ bool isEmpty() const { return m_vector.isEmpty(); }
+
+ T* at(unsigned index) const { return m_vector[index].get(); }
+ T* operator[](unsigned index) const { return at(index); }
+
+ class iterator {
+ public:
+ iterator()
+ : m_collection(nullptr)
+ , m_index(0)
+ {
+ }
+
+ iterator(const SparseCollection& collection, unsigned index)
+ : m_collection(&collection)
+ , m_index(findNext(index))
+ {
+ }
+
+ T* operator*()
+ {
+ return m_collection->at(m_index);
+ }
+
+ iterator& operator++()
+ {
+ m_index = findNext(m_index + 1);
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ ASSERT(m_collection == other.m_collection);
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ unsigned findNext(unsigned index)
+ {
+ while (index < m_collection->size() && !m_collection->at(index))
+ index++;
+ return index;
+ }
+
+ const SparseCollection* m_collection;
+ unsigned m_index;
+ };
+
+ iterator begin() const { return iterator(*this, 0); }
+ iterator end() const { return iterator(*this, size()); }
+
+private:
+ Vector<std::unique_ptr<T>, 0, UnsafeVectorOverflow> m_vector;
+ Vector<size_t, 0, UnsafeVectorOverflow> m_indexFreeList;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3StackSlot.cpp b/Source/JavaScriptCore/b3/B3StackSlot.cpp
new file mode 100644
index 000000000..4e22014a4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackSlot.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3StackSlot.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+StackSlot::~StackSlot()
+{
+}
+
+void StackSlot::dump(PrintStream& out) const
+{
+ out.print("stack", m_index);
+}
+
+void StackSlot::deepDump(PrintStream& out) const
+{
+ out.print("byteSize = ", m_byteSize, ", offsetFromFP = ", m_offsetFromFP);
+}
+
+StackSlot::StackSlot(unsigned byteSize)
+ : m_byteSize(byteSize)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3StackSlot.h b/Source/JavaScriptCore/b3/B3StackSlot.h
new file mode 100644
index 000000000..4a475099b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackSlot.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3SparseCollection.h"
+#include <limits.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+namespace Air {
+class StackSlot;
+} // namespace Air
+
+class StackSlot {
+ WTF_MAKE_NONCOPYABLE(StackSlot);
+ WTF_MAKE_FAST_ALLOCATED;
+
+public:
+ ~StackSlot();
+
+ unsigned byteSize() const { return m_byteSize; }
+ unsigned index() const { return m_index; }
+
+ // This gets assigned at the end of compilation. But, you can totally pin stack slots. Use the
+ // set method to do that.
+ intptr_t offsetFromFP() const { return m_offsetFromFP; }
+
+ // Note that this is meaningless unless the stack slot is Locked.
+ void setOffsetFromFP(intptr_t value)
+ {
+ m_offsetFromFP = value;
+ }
+
+ void dump(PrintStream&) const;
+ void deepDump(PrintStream&) const;
+
+private:
+ friend class Air::StackSlot;
+ friend class Procedure;
+ friend class SparseCollection<StackSlot>;
+
+ StackSlot(unsigned byteSize);
+
+ unsigned m_index { UINT_MAX };
+ unsigned m_byteSize { 0 };
+ intptr_t m_offsetFromFP { 0 };
+};
+
+class DeepStackSlotDump {
+public:
+ DeepStackSlotDump(const StackSlot* slot)
+ : m_slot(slot)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_slot)
+ m_slot->deepDump(out);
+ else
+ out.print("<null>");
+ }
+
+private:
+ const StackSlot* m_slot;
+};
+
+inline DeepStackSlotDump deepDump(const StackSlot* slot)
+{
+ return DeepStackSlotDump(slot);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3StackmapGenerationParams.cpp b/Source/JavaScriptCore/b3/B3StackmapGenerationParams.cpp
new file mode 100644
index 000000000..0a07e4e08
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapGenerationParams.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3StackmapGenerationParams.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "B3StackmapValue.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+const RegisterSet& StackmapGenerationParams::usedRegisters() const
+{
+ return m_value->m_usedRegisters;
+}
+
+RegisterSet StackmapGenerationParams::unavailableRegisters() const
+{
+ RegisterSet result = usedRegisters();
+
+ RegisterSet unsavedCalleeSaves = RegisterSet::vmCalleeSaveRegisters();
+ for (const RegisterAtOffset& regAtOffset : m_context.code->calleeSaveRegisters())
+ unsavedCalleeSaves.clear(regAtOffset.reg());
+
+ result.merge(unsavedCalleeSaves);
+
+ for (GPRReg gpr : m_gpScratch)
+ result.clear(gpr);
+ for (FPRReg fpr : m_fpScratch)
+ result.clear(fpr);
+
+ return result;
+}
+
+Vector<Box<CCallHelpers::Label>> StackmapGenerationParams::successorLabels() const
+{
+ RELEASE_ASSERT(m_context.indexInBlock == m_context.currentBlock->size() - 1);
+ RELEASE_ASSERT(m_value->effects().terminal);
+
+ Vector<Box<CCallHelpers::Label>> result(m_context.currentBlock->numSuccessors());
+ for (unsigned i = m_context.currentBlock->numSuccessors(); i--;)
+ result[i] = m_context.blockLabels[m_context.currentBlock->successorBlock(i)];
+ return result;
+}
+
+bool StackmapGenerationParams::fallsThroughToSuccessor(unsigned successorIndex) const
+{
+ RELEASE_ASSERT(m_context.indexInBlock == m_context.currentBlock->size() - 1);
+ RELEASE_ASSERT(m_value->effects().terminal);
+
+ Air::BasicBlock* successor = m_context.currentBlock->successorBlock(successorIndex);
+ Air::BasicBlock* nextBlock = m_context.code->findNextBlock(m_context.currentBlock);
+ return successor == nextBlock;
+}
+
+Procedure& StackmapGenerationParams::proc() const
+{
+ return m_context.code->proc();
+}
+
+StackmapGenerationParams::StackmapGenerationParams(
+ StackmapValue* value, const Vector<ValueRep>& reps, Air::GenerationContext& context)
+ : m_value(value)
+ , m_reps(reps)
+ , m_context(context)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3StackmapGenerationParams.h b/Source/JavaScriptCore/b3/B3StackmapGenerationParams.h
new file mode 100644
index 000000000..31d19edb9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapGenerationParams.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirGenerationContext.h"
+#include "B3ValueRep.h"
+#include "CCallHelpers.h"
+#include "RegisterSet.h"
+#include <wtf/Box.h>
+
+namespace JSC { namespace B3 {
+
+class CheckSpecial;
+class PatchpointSpecial;
+class Procedure;
+class StackmapValue;
+
+// NOTE: It's possible to capture StackmapGenerationParams by value, but not all of the methods will
+// work if you do that.
+class StackmapGenerationParams {
+public:
+ // This is the stackmap value that we're generating.
+ StackmapValue* value() const { return m_value; }
+
+ // This tells you the actual value representations that were chosen. This is usually different
+ // from the constraints we supplied.
+ const Vector<ValueRep>& reps() const { return m_reps; };
+
+ // Usually we wish to access the reps. We make this easy by making ourselves appear to be a
+ // collection of reps.
+ unsigned size() const { return m_reps.size(); }
+ const ValueRep& at(unsigned index) const { return m_reps[index]; }
+ const ValueRep& operator[](unsigned index) const { return at(index); }
+ Vector<ValueRep>::const_iterator begin() const { return m_reps.begin(); }
+ Vector<ValueRep>::const_iterator end() const { return m_reps.end(); }
+
+ // This tells you the registers that were used.
+ const RegisterSet& usedRegisters() const;
+
+ // This is a useful helper if you want to do register allocation inside of a patchpoint. The
+ // usedRegisters() set is not directly useful for this purpose because:
+ //
+ // - You can only use callee-save registers for scratch if they were saved in the prologue. So,
+ // if a register is callee-save, it's not enough that it's not in usedRegisters().
+ //
+ // - Scratch registers are going to be in usedRegisters() at the patchpoint. So, if you want to
+ // find one of your requested scratch registers using usedRegisters(), you'll have a bad time.
+ //
+ // This gives you the used register set that's useful for allocating scratch registers. This set
+ // is defined as:
+ //
+ // (usedRegisters() | (RegisterSet::calleeSaveRegisters() - proc.calleeSaveRegisters()))
+ // - gpScratchRegisters - fpScratchRegisters
+ //
+ // I.e. it is like usedRegisters() but also includes unsaved callee-saves and excludes scratch
+ // registers.
+ JS_EXPORT_PRIVATE RegisterSet unavailableRegisters() const;
+
+ GPRReg gpScratch(unsigned index) const { return m_gpScratch[index]; }
+ FPRReg fpScratch(unsigned index) const { return m_fpScratch[index]; }
+
+ // This is computed lazily, so it won't work if you capture StackmapGenerationParams by value.
+ // These labels will get populated before any late paths or link tasks execute.
+ JS_EXPORT_PRIVATE Vector<Box<CCallHelpers::Label>> successorLabels() const;
+
+ // This is computed lazily, so it won't work if you capture StackmapGenerationParams by value.
+ // Returns true if the successor at the given index is going to be emitted right after the
+ // patchpoint.
+ JS_EXPORT_PRIVATE bool fallsThroughToSuccessor(unsigned successorIndex) const;
+
+ // This is provided for convenience; it means that you don't have to capture it if you don't want to.
+ JS_EXPORT_PRIVATE Procedure& proc() const;
+
+ // The Air::GenerationContext gives you even more power.
+ Air::GenerationContext& context() const { return m_context; };
+
+ template<typename Functor>
+ void addLatePath(const Functor& functor) const
+ {
+ context().latePaths.append(
+ createSharedTask<Air::GenerationContext::LatePathFunction>(
+ [=] (CCallHelpers& jit, Air::GenerationContext&) {
+ functor(jit);
+ }));
+ }
+
+private:
+ friend class CheckSpecial;
+ friend class PatchpointSpecial;
+
+ StackmapGenerationParams(StackmapValue*, const Vector<ValueRep>& reps, Air::GenerationContext&);
+
+ StackmapValue* m_value;
+ Vector<ValueRep> m_reps;
+ Vector<GPRReg> m_gpScratch;
+ Vector<FPRReg> m_fpScratch;
+ Air::GenerationContext& m_context;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3StackmapSpecial.cpp b/Source/JavaScriptCore/b3/B3StackmapSpecial.cpp
new file mode 100644
index 000000000..b5aa6c3ff
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapSpecial.cpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3StackmapSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+StackmapSpecial::StackmapSpecial()
+{
+}
+
+StackmapSpecial::~StackmapSpecial()
+{
+}
+
+void StackmapSpecial::reportUsedRegisters(Inst& inst, const RegisterSet& usedRegisters)
+{
+ StackmapValue* value = inst.origin->as<StackmapValue>();
+ ASSERT(value);
+
+ // FIXME: If the Inst that uses the StackmapSpecial gets duplicated, then we end up merging used
+ // register sets from multiple places. This currently won't happen since Air doesn't have taildup
+ // or things like that. But maybe eventually it could be a problem.
+ value->m_usedRegisters.merge(usedRegisters);
+}
+
+RegisterSet StackmapSpecial::extraClobberedRegs(Inst& inst)
+{
+ StackmapValue* value = inst.origin->as<StackmapValue>();
+ ASSERT(value);
+
+ return value->lateClobbered();
+}
+
+RegisterSet StackmapSpecial::extraEarlyClobberedRegs(Inst& inst)
+{
+ StackmapValue* value = inst.origin->as<StackmapValue>();
+ ASSERT(value);
+
+ return value->earlyClobbered();
+}
+
+void StackmapSpecial::forEachArgImpl(
+ unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+ Inst& inst, RoleMode roleMode, std::optional<unsigned> firstRecoverableIndex,
+ const ScopedLambda<Inst::EachArgCallback>& callback)
+{
+ StackmapValue* value = inst.origin->as<StackmapValue>();
+ ASSERT(value);
+
+ // Check that insane things have not happened.
+ ASSERT(inst.args.size() >= numIgnoredAirArgs);
+ ASSERT(value->children().size() >= numIgnoredB3Args);
+ ASSERT(inst.args.size() - numIgnoredAirArgs >= value->children().size() - numIgnoredB3Args);
+
+ for (unsigned i = 0; i < value->children().size() - numIgnoredB3Args; ++i) {
+ Arg& arg = inst.args[i + numIgnoredAirArgs];
+ ConstrainedValue child = value->constrainedChild(i + numIgnoredB3Args);
+
+ Arg::Role role;
+ switch (roleMode) {
+ case ForceLateUseUnlessRecoverable:
+ ASSERT(firstRecoverableIndex);
+ if (arg != inst.args[*firstRecoverableIndex] && arg != inst.args[*firstRecoverableIndex + 1]) {
+ role = Arg::LateColdUse;
+ break;
+ }
+ FALLTHROUGH;
+ case SameAsRep:
+ switch (child.rep().kind()) {
+ case ValueRep::WarmAny:
+ case ValueRep::SomeRegister:
+ case ValueRep::Register:
+ case ValueRep::Stack:
+ case ValueRep::StackArgument:
+ case ValueRep::Constant:
+ role = Arg::Use;
+ break;
+ case ValueRep::LateRegister:
+ role = Arg::LateUse;
+ break;
+ case ValueRep::ColdAny:
+ role = Arg::ColdUse;
+ break;
+ case ValueRep::LateColdAny:
+ role = Arg::LateColdUse;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ break;
+ case ForceLateUse:
+ role = Arg::LateColdUse;
+ break;
+ }
+
+ Type type = child.value()->type();
+ callback(arg, role, Arg::typeForB3Type(type), Arg::widthForB3Type(type));
+ }
+}
+
+bool StackmapSpecial::isValidImpl(
+ unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+ Inst& inst)
+{
+ StackmapValue* value = inst.origin->as<StackmapValue>();
+ ASSERT(value);
+
+ // Check that insane things have not happened.
+ ASSERT(inst.args.size() >= numIgnoredAirArgs);
+ ASSERT(value->children().size() >= numIgnoredB3Args);
+
+ // For the Inst to be valid, it needs to have the right number of arguments.
+ if (inst.args.size() - numIgnoredAirArgs < value->children().size() - numIgnoredB3Args)
+ return false;
+
+ // Regardless of constraints, stackmaps have some basic requirements for their arguments. For
+ // example, you can't have a non-FP-offset address. This verifies those conditions as well as the
+ // argument types.
+ for (unsigned i = 0; i < value->children().size() - numIgnoredB3Args; ++i) {
+ Value* child = value->child(i + numIgnoredB3Args);
+ Arg& arg = inst.args[i + numIgnoredAirArgs];
+
+ if (!isArgValidForValue(arg, child))
+ return false;
+ }
+
+ // The number of constraints has to be no greater than the number of B3 children.
+ ASSERT(value->m_reps.size() <= value->children().size());
+
+ // Verify any explicitly supplied constraints.
+ for (unsigned i = numIgnoredB3Args; i < value->m_reps.size(); ++i) {
+ ValueRep& rep = value->m_reps[i];
+ Arg& arg = inst.args[i - numIgnoredB3Args + numIgnoredAirArgs];
+
+ if (!isArgValidForRep(code(), arg, rep))
+ return false;
+ }
+
+ return true;
+}
+
+bool StackmapSpecial::admitsStackImpl(
+ unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+ Inst& inst, unsigned argIndex)
+{
+ StackmapValue* value = inst.origin->as<StackmapValue>();
+ ASSERT(value);
+
+ unsigned stackmapArgIndex = argIndex - numIgnoredAirArgs + numIgnoredB3Args;
+
+ if (stackmapArgIndex >= value->numChildren()) {
+ // It's not a stackmap argument, so as far as we are concerned, it doesn't admit stack.
+ return false;
+ }
+
+ if (stackmapArgIndex >= value->m_reps.size()) {
+ // This means that there was no constraint.
+ return true;
+ }
+
+ // We only admit stack for Any's, since Stack is not a valid input constraint, and StackArgument
+ // translates to a CallArg in Air.
+ if (value->m_reps[stackmapArgIndex].isAny())
+ return true;
+
+ return false;
+}
+
+Vector<ValueRep> StackmapSpecial::repsImpl(
+ GenerationContext& context, unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs, Inst& inst)
+{
+ Vector<ValueRep> result;
+ for (unsigned i = 0; i < inst.origin->numChildren() - numIgnoredB3Args; ++i)
+ result.append(repForArg(*context.code, inst.args[i + numIgnoredAirArgs]));
+ return result;
+}
+
+bool StackmapSpecial::isArgValidForValue(const Air::Arg& arg, Value* value)
+{
+ switch (arg.kind()) {
+ case Arg::Tmp:
+ case Arg::Imm:
+ case Arg::BigImm:
+ break;
+ default:
+ if (!arg.isStackMemory())
+ return false;
+ break;
+ }
+
+ return arg.canRepresent(value);
+}
+
+bool StackmapSpecial::isArgValidForRep(Air::Code& code, const Air::Arg& arg, const ValueRep& rep)
+{
+ switch (rep.kind()) {
+ case ValueRep::WarmAny:
+ case ValueRep::ColdAny:
+ case ValueRep::LateColdAny:
+ // We already verified by isArgValidForValue().
+ return true;
+ case ValueRep::SomeRegister:
+ case ValueRep::SomeEarlyRegister:
+ return arg.isTmp();
+ case ValueRep::LateRegister:
+ case ValueRep::Register:
+ return arg == Tmp(rep.reg());
+ case ValueRep::StackArgument:
+ if (arg == Arg::callArg(rep.offsetFromSP()))
+ return true;
+ if (arg.isAddr() && code.frameSize()) {
+ if (arg.base() == Tmp(GPRInfo::callFrameRegister)
+ && arg.offset() == rep.offsetFromSP() - code.frameSize())
+ return true;
+ if (arg.base() == Tmp(MacroAssembler::stackPointerRegister)
+ && arg.offset() == rep.offsetFromSP())
+ return true;
+ }
+ return false;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+}
+
+ValueRep StackmapSpecial::repForArg(Code& code, const Arg& arg)
+{
+ switch (arg.kind()) {
+ case Arg::Tmp:
+ return ValueRep::reg(arg.reg());
+ break;
+ case Arg::Imm:
+ case Arg::BigImm:
+ return ValueRep::constant(arg.value());
+ break;
+ case Arg::Addr:
+ if (arg.base() == Tmp(GPRInfo::callFrameRegister))
+ return ValueRep::stack(arg.offset());
+ ASSERT(arg.base() == Tmp(MacroAssembler::stackPointerRegister));
+ return ValueRep::stack(arg.offset() - static_cast<int32_t>(code.frameSize()));
+ default:
+ ASSERT_NOT_REACHED();
+ return ValueRep();
+ }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, StackmapSpecial::RoleMode mode)
+{
+ switch (mode) {
+ case StackmapSpecial::SameAsRep:
+ out.print("SameAsRep");
+ return;
+ case StackmapSpecial::ForceLateUseUnlessRecoverable:
+ out.print("ForceLateUseUnlessRecoverable");
+ return;
+ case StackmapSpecial::ForceLateUse:
+ out.print("ForceLateUse");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3StackmapSpecial.h b/Source/JavaScriptCore/b3/B3StackmapSpecial.h
new file mode 100644
index 000000000..97a0813d1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapSpecial.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirSpecial.h"
+#include "B3ValueRep.h"
+
+namespace JSC { namespace B3 {
+
+namespace Air { class Code; }
+
+// This is a base class for specials that have stackmaps. Note that it can find the Stackmap by
+// asking for the Inst's origin. Hence, these objects don't need to even hold a reference to the
+// Stackmap.
+
+class StackmapSpecial : public Air::Special {
+public:
+ StackmapSpecial();
+ virtual ~StackmapSpecial();
+
+ enum RoleMode : int8_t {
+ SameAsRep,
+ ForceLateUseUnlessRecoverable,
+ ForceLateUse
+ };
+
+protected:
+ void reportUsedRegisters(Air::Inst&, const RegisterSet&) override;
+ RegisterSet extraEarlyClobberedRegs(Air::Inst&) override;
+ RegisterSet extraClobberedRegs(Air::Inst&) override;
+
+ // Note that this does not override generate() or dumpImpl()/deepDumpImpl(). We have many some
+ // subclasses that implement that.
+ void forEachArgImpl(
+ unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+ Air::Inst&, RoleMode, std::optional<unsigned> firstRecoverableIndex,
+ const ScopedLambda<Air::Inst::EachArgCallback>&);
+
+ bool isValidImpl(
+ unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+ Air::Inst&);
+ bool admitsStackImpl(
+ unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+ Air::Inst&, unsigned argIndex);
+
+ // Appends the reps for the Inst's args, starting with numIgnoredArgs, to the given vector.
+ Vector<ValueRep> repsImpl(
+ Air::GenerationContext&, unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs, Air::Inst&);
+
+ static bool isArgValidForValue(const Air::Arg&, Value*);
+ static bool isArgValidForRep(Air::Code&, const Air::Arg&, const ValueRep&);
+ static ValueRep repForArg(Air::Code&, const Air::Arg&);
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::B3::StackmapSpecial::RoleMode);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3StackmapValue.cpp b/Source/JavaScriptCore/b3/B3StackmapValue.cpp
new file mode 100644
index 000000000..9b0db2f46
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapValue.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3StackmapValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+StackmapValue::~StackmapValue()
+{
+}
+
+void StackmapValue::append(Value* value, const ValueRep& rep)
+{
+ if (rep == ValueRep::ColdAny) {
+ children().append(value);
+ return;
+ }
+
+ while (m_reps.size() < numChildren())
+ m_reps.append(ValueRep::ColdAny);
+
+ children().append(value);
+ m_reps.append(rep);
+}
+
+void StackmapValue::appendSomeRegister(Value* value)
+{
+ append(ConstrainedValue(value, ValueRep::SomeRegister));
+}
+
+void StackmapValue::setConstrainedChild(unsigned index, const ConstrainedValue& constrainedValue)
+{
+ child(index) = constrainedValue.value();
+ setConstraint(index, constrainedValue.rep());
+}
+
+void StackmapValue::setConstraint(unsigned index, const ValueRep& rep)
+{
+ if (rep == ValueRep(ValueRep::ColdAny))
+ return;
+
+ while (m_reps.size() <= index)
+ m_reps.append(ValueRep::ColdAny);
+
+ m_reps[index] = rep;
+}
+
+void StackmapValue::dumpChildren(CommaPrinter& comma, PrintStream& out) const
+{
+ for (ConstrainedValue value : constrainedChildren())
+ out.print(comma, value);
+}
+
+void StackmapValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(
+ comma, "generator = ", RawPointer(m_generator.get()), ", earlyClobbered = ", m_earlyClobbered,
+ ", lateClobbered = ", m_lateClobbered, ", usedRegisters = ", m_usedRegisters);
+}
+
+StackmapValue::StackmapValue(CheckedOpcodeTag, Kind kind, Type type, Origin origin)
+ : Value(CheckedOpcode, kind, type, origin)
+{
+ ASSERT(accepts(kind));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3StackmapValue.h b/Source/JavaScriptCore/b3/B3StackmapValue.h
new file mode 100644
index 000000000..66fc644b1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3StackmapValue.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3ConstrainedValue.h"
+#include "B3Value.h"
+#include "B3ValueRep.h"
+#include "CCallHelpers.h"
+#include "RegisterSet.h"
+#include <wtf/SharedTask.h>
+
+namespace JSC { namespace B3 {
+
+class StackmapGenerationParams;
+
+typedef void StackmapGeneratorFunction(CCallHelpers&, const StackmapGenerationParams&);
+typedef SharedTask<StackmapGeneratorFunction> StackmapGenerator;
+
+class JS_EXPORT_PRIVATE StackmapValue : public Value {
+public:
+ static bool accepts(Kind kind)
+ {
+ // This needs to include opcodes of all subclasses.
+ switch (kind.opcode()) {
+ case CheckAdd:
+ case CheckSub:
+ case CheckMul:
+ case Check:
+ case Patchpoint:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ ~StackmapValue();
+
+ // Use this to add children. Note that you could also add children by doing
+ // children().append(). That will work fine, but it's not recommended.
+ void append(const ConstrainedValue& value)
+ {
+ append(value.value(), value.rep());
+ }
+
+ void append(Value*, const ValueRep&);
+
+ template<typename VectorType>
+ void appendVector(const VectorType& vector)
+ {
+ for (const auto& value : vector)
+ append(value);
+ }
+
+ // Helper for appending a bunch of values with some ValueRep.
+ template<typename VectorType>
+ void appendVectorWithRep(const VectorType& vector, const ValueRep& rep)
+ {
+ for (Value* value : vector)
+ append(value, rep);
+ }
+
+ // Helper for appending cold any's. This often used by clients to implement OSR.
+ template<typename VectorType>
+ void appendColdAnys(const VectorType& vector)
+ {
+ appendVectorWithRep(vector, ValueRep::ColdAny);
+ }
+ template<typename VectorType>
+ void appendLateColdAnys(const VectorType& vector)
+ {
+ appendVectorWithRep(vector, ValueRep::LateColdAny);
+ }
+
+ // This is a helper for something you might do a lot of: append a value that should be constrained
+ // to SomeRegister.
+ void appendSomeRegister(Value*);
+
+ const Vector<ValueRep>& reps() const { return m_reps; }
+
+ // Stackmaps allow you to specify that the operation may clobber some registers. Clobbering a register
+ // means that the operation appears to store a value into the register, but the compiler doesn't
+ // assume to know anything about what kind of value might have been stored. In B3's model of
+ // execution, registers are read or written at instruction boundaries rather than inside the
+ // instructions themselves. A register could be read or written immediately before the instruction
+ // executes, or immediately after. Note that at a boundary between instruction A and instruction B we
+ // simultaneously look at what A does after it executes and what B does before it executes. This is
+ // because when the compiler considers what happens to registers, it views the boundary between two
+ // instructions as a kind of atomic point where the late effects of A happen at the same time as the
+ // early effects of B.
+ //
+ // The compiler views a stackmap as a single instruction, even though of course the stackmap may be
+ // composed of any number of instructions (if it's a Patchpoint). You can claim that a stackmap value
+ // clobbers a set of registers before the stackmap's instruction or after. Clobbering before is called
+ // early clobber, while clobbering after is called late clobber.
+ //
+ // This is quite flexible but it has its limitations. Any register listed as an early clobber will
+ // interfere with all uses of the stackmap. Any register listed as a late clobber will interfere with
+ // all defs of the stackmap (i.e. the result). This means that it's currently not possible to claim
+ // to clobber a register while still allowing that register to be used for both an input and an output
+ // of the instruction. It just so happens that B3's sole client (the FTL) currently never wants to
+ // convey such a constraint, but it will want it eventually (FIXME:
+ // https://bugs.webkit.org/show_bug.cgi?id=151823).
+ //
+ // Note that a common use case of early clobber sets is to indicate that this is the set of registers
+ // that shall not be used for inputs to the value. But B3 supports two different ways of specifying
+ // this, the other being LateUse in combination with late clobber (not yet available to stackmaps
+ // directly, FIXME: https://bugs.webkit.org/show_bug.cgi?id=151335). A late use makes the use of that
+ // value appear to happen after the instruction. This means that a late use cannot use the same
+ // register as the result and it cannot use the same register as either early or late clobbered
+ // registers. Late uses are usually a better way of saying that a clobbered register cannot be used
+ // for an input. Early clobber means that some register(s) interfere with *all* inputs, while LateUse
+ // means that some value interferes with whatever is live after the instruction. Below is a list of
+ // examples of how the FTL can handle its various kinds of scenarios using a combination of early
+ // clobber, late clobber, and late use. These examples are for X86_64, w.l.o.g.
+ //
+ // Basic ById patchpoint: Early and late clobber of r11. Early clobber prevents any inputs from using
+ // r11 since that would mess with the MacroAssembler's assumptions when we
+ // AllowMacroScratchRegisterUsage. Late clobber tells B3 that the patchpoint may overwrite r11.
+ //
+ // ById patchpoint in a try block with some live state: This might throw an exception after already
+ // assigning to the result. So, this should LateUse all stackmap values to ensure that the stackmap
+ // values don't interfere with the result. Note that we do not LateUse the non-OSR inputs of the ById
+ // since LateUse implies that the use is cold: the register allocator will assume that the use is not
+ // important for the critical path. Also, early and late clobber of r11.
+ //
+ // Basic ByIdFlush patchpoint: We could do Flush the same way we did it with LLVM: ignore it and let
+ // PolymorphicAccess figure it out. Or, we could add internal clobber support (FIXME:
+ // https://bugs.webkit.org/show_bug.cgi?id=151823). Or, we could do it by early clobbering r11, late
+ // clobbering all volatile registers, and constraining the result to some register. Or, we could do
+ // that but leave the result constrained to SomeRegister, which will cause it to use a callee-save
+ // register. Internal clobber support would allow us to use SomeRegister while getting the result into
+ // a volatile register.
+ //
+ // ByIdFlush patchpoint in a try block with some live state: LateUse all for-OSR stackmap values,
+ // early clobber of r11 to prevent the other inputs from using r11, and late clobber of all volatile
+ // registers to make way for the call. To handle the result, we could do any of what is listed in the
+ // previous paragraph.
+ //
+ // Basic JS call: Force all non-OSR inputs into specific locations (register, stack, whatever).
+ // All volatile registers are late-clobbered. The output is constrained to a register as well.
+ //
+ // JS call in a try block with some live state: LateUse all for-OSR stackmap values, fully constrain
+ // all non-OSR inputs and the result, and late clobber all volatile registers.
+ //
+ // JS tail call: Pass all inputs as a warm variant of Any (FIXME:
+ // https://bugs.webkit.org/show_bug.cgi?id=151811).
+ //
+ // Note that we cannot yet do all of these things because although Air already supports all of these
+ // various forms of uses (LateUse and warm unconstrained use), B3 doesn't yet expose all of it. The
+ // bugs are:
+ // https://bugs.webkit.org/show_bug.cgi?id=151335 (LateUse)
+ // https://bugs.webkit.org/show_bug.cgi?id=151811 (warm Any)
+ void clobberEarly(const RegisterSet& set)
+ {
+ m_earlyClobbered.merge(set);
+ }
+
+ void clobberLate(const RegisterSet& set)
+ {
+ m_lateClobbered.merge(set);
+ }
+
+ void clobber(const RegisterSet& set)
+ {
+ clobberEarly(set);
+ clobberLate(set);
+ }
+
+ RegisterSet& earlyClobbered() { return m_earlyClobbered; }
+ RegisterSet& lateClobbered() { return m_lateClobbered; }
+ const RegisterSet& earlyClobbered() const { return m_earlyClobbered; }
+ const RegisterSet& lateClobbered() const { return m_lateClobbered; }
+
+ void setGenerator(RefPtr<StackmapGenerator> generator)
+ {
+ m_generator = generator;
+ }
+
+ template<typename Functor>
+ void setGenerator(const Functor& functor)
+ {
+ m_generator = createSharedTask<StackmapGeneratorFunction>(functor);
+ }
+
+ RefPtr<StackmapGenerator> generator() const { return m_generator; }
+
+ ConstrainedValue constrainedChild(unsigned index) const
+ {
+ return ConstrainedValue(child(index), index < m_reps.size() ? m_reps[index] : ValueRep::ColdAny);
+ }
+
+ void setConstrainedChild(unsigned index, const ConstrainedValue&);
+
+ void setConstraint(unsigned index, const ValueRep&);
+
+ class ConstrainedValueCollection {
+ public:
+ ConstrainedValueCollection(const StackmapValue& value)
+ : m_value(value)
+ {
+ }
+
+ unsigned size() const { return m_value.numChildren(); }
+
+ ConstrainedValue at(unsigned index) const { return m_value.constrainedChild(index); }
+
+ ConstrainedValue operator[](unsigned index) const { return at(index); }
+
+ class iterator {
+ public:
+ iterator()
+ : m_collection(nullptr)
+ , m_index(0)
+ {
+ }
+
+ iterator(const ConstrainedValueCollection& collection, unsigned index)
+ : m_collection(&collection)
+ , m_index(index)
+ {
+ }
+
+ ConstrainedValue operator*() const
+ {
+ return m_collection->at(m_index);
+ }
+
+ iterator& operator++()
+ {
+ m_index++;
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ ASSERT(m_collection == other.m_collection);
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ const ConstrainedValueCollection* m_collection;
+ unsigned m_index;
+ };
+
+ iterator begin() const { return iterator(*this, 0); }
+ iterator end() const { return iterator(*this, size()); }
+
+ private:
+ const StackmapValue& m_value;
+ };
+
+ ConstrainedValueCollection constrainedChildren() const
+ {
+ return ConstrainedValueCollection(*this);
+ }
+
+protected:
+ void dumpChildren(CommaPrinter&, PrintStream&) const override;
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ StackmapValue(CheckedOpcodeTag, Kind, Type, Origin);
+
+private:
+ friend class CheckSpecial;
+ friend class PatchpointSpecial;
+ friend class StackmapGenerationParams;
+ friend class StackmapSpecial;
+
+ Vector<ValueRep> m_reps;
+ RefPtr<StackmapGenerator> m_generator;
+ RegisterSet m_earlyClobbered;
+ RegisterSet m_lateClobbered;
+ RegisterSet m_usedRegisters; // Stackmaps could be further duplicated by Air, but that's unlikely, so we just merge the used registers sets if that were to happen.
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SuccessorCollection.h b/Source/JavaScriptCore/b3/B3SuccessorCollection.h
new file mode 100644
index 000000000..0a7df247b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SuccessorCollection.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+// This is a generic wrapper around lists of frequented blocks, which gives you just the blocks.
+
+template<typename BasicBlock, typename SuccessorList>
+class SuccessorCollection {
+public:
+ SuccessorCollection(SuccessorList& list)
+ : m_list(list)
+ {
+ }
+
+ size_t size() const { return m_list.size(); }
+ BasicBlock* at(size_t index) const { return m_list[index].block(); }
+ BasicBlock*& at(size_t index) { return m_list[index].block(); }
+ BasicBlock* operator[](size_t index) const { return at(index); }
+ BasicBlock*& operator[](size_t index) { return at(index); }
+
+ class iterator {
+ public:
+ iterator()
+ : m_collection(nullptr)
+ , m_index(0)
+ {
+ }
+
+ iterator(SuccessorCollection& collection, size_t index)
+ : m_collection(&collection)
+ , m_index(index)
+ {
+ }
+
+ BasicBlock*& operator*() const
+ {
+ return m_collection->at(m_index);
+ }
+
+ iterator& operator++()
+ {
+ m_index++;
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ ASSERT(m_collection == other.m_collection);
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ SuccessorCollection* m_collection;
+ size_t m_index;
+ };
+
+ iterator begin() { return iterator(*this, 0); }
+ iterator end() { return iterator(*this, size()); }
+
+ class const_iterator {
+ public:
+ const_iterator()
+ : m_collection(nullptr)
+ , m_index(0)
+ {
+ }
+
+ const_iterator(const SuccessorCollection& collection, size_t index)
+ : m_collection(&collection)
+ , m_index(index)
+ {
+ }
+
+ BasicBlock* operator*() const
+ {
+ return m_collection->at(m_index);
+ }
+
+ const_iterator& operator++()
+ {
+ m_index++;
+ return *this;
+ }
+
+ bool operator==(const const_iterator& other) const
+ {
+ ASSERT(m_collection == other.m_collection);
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const const_iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ const SuccessorCollection* m_collection;
+ size_t m_index;
+ };
+
+ const_iterator begin() const { return const_iterator(*this, 0); }
+ const_iterator end() const { return const_iterator(*this, size()); }
+
+private:
+ SuccessorList& m_list;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SwitchCase.cpp b/Source/JavaScriptCore/b3/B3SwitchCase.cpp
new file mode 100644
index 000000000..d05332bc6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SwitchCase.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3SwitchCase.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+
+namespace JSC { namespace B3 {
+
+void SwitchCase::dump(PrintStream& out) const
+{
+ out.print(m_caseValue, "->", m_target);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SwitchCase.h b/Source/JavaScriptCore/b3/B3SwitchCase.h
new file mode 100644
index 000000000..5ba6a484c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SwitchCase.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3FrequentedBlock.h"
+#include <wtf/StdLibExtras.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+class SwitchCase {
+public:
+ SwitchCase()
+ {
+ }
+
+ SwitchCase(int64_t caseValue, const FrequentedBlock& target)
+ : m_caseValue(caseValue)
+ , m_target(target)
+ {
+ }
+
+ explicit operator bool() const { return !!m_target; }
+
+ int64_t caseValue() const { return m_caseValue; }
+ FrequentedBlock target() const { return m_target; }
+ BasicBlock* targetBlock() const { return m_target.block(); }
+
+ void dump(PrintStream& out) const;
+
+private:
+ int64_t m_caseValue;
+ FrequentedBlock m_target;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SwitchValue.cpp b/Source/JavaScriptCore/b3/B3SwitchValue.cpp
new file mode 100644
index 000000000..8b880347a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SwitchValue.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3SwitchValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 {
+
+SwitchValue::~SwitchValue()
+{
+}
+
+SwitchCase SwitchValue::removeCase(BasicBlock* block, unsigned index)
+{
+ FrequentedBlock resultBlock = block->successor(index);
+ int64_t resultValue = m_values[index];
+ block->successor(index) = block->successors().last();
+ block->successors().removeLast();
+ m_values[index] = m_values.last();
+ m_values.removeLast();
+ return SwitchCase(resultValue, resultBlock);
+}
+
+bool SwitchValue::hasFallThrough(const BasicBlock* block) const
+{
+ unsigned numSuccessors = block->numSuccessors();
+ unsigned numValues = m_values.size();
+ RELEASE_ASSERT(numValues == numSuccessors || numValues + 1 == numSuccessors);
+
+ return numValues + 1 == numSuccessors;
+}
+
+bool SwitchValue::hasFallThrough() const
+{
+ return hasFallThrough(owner);
+}
+
+void SwitchValue::setFallThrough(BasicBlock* block, const FrequentedBlock& target)
+{
+ if (!hasFallThrough())
+ block->successors().append(target);
+ else
+ block->successors().last() = target;
+ ASSERT(hasFallThrough(block));
+}
+
+void SwitchValue::appendCase(BasicBlock* block, const SwitchCase& switchCase)
+{
+ if (!hasFallThrough())
+ block->successors().append(switchCase.target());
+ else {
+ block->successors().append(block->successors().last());
+ block->successor(block->numSuccessors() - 2) = switchCase.target();
+ }
+ m_values.append(switchCase.caseValue());
+}
+
+void SwitchValue::setFallThrough(const FrequentedBlock& target)
+{
+ setFallThrough(owner, target);
+}
+
+void SwitchValue::appendCase(const SwitchCase& switchCase)
+{
+ appendCase(owner, switchCase);
+}
+
+void SwitchValue::dumpSuccessors(const BasicBlock* block, PrintStream& out) const
+{
+ // We must not crash due to a number-of-successors mismatch! Someone debugging a
+ // number-of-successors bug will want to dump IR!
+ if (numCaseValues() + 1 != block->numSuccessors()) {
+ Value::dumpSuccessors(block, out);
+ return;
+ }
+
+ out.print(cases(block));
+}
+
+void SwitchValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(comma, "cases = [", listDump(m_values), "]");
+}
+
+Value* SwitchValue::cloneImpl() const
+{
+ return new SwitchValue(*this);
+}
+
+SwitchValue::SwitchValue(Origin origin, Value* child)
+ : Value(CheckedOpcode, Switch, Void, origin, child)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3SwitchValue.h b/Source/JavaScriptCore/b3/B3SwitchValue.h
new file mode 100644
index 000000000..a1c27cd9d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3SwitchValue.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CaseCollection.h"
+#include "B3SwitchCase.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class SwitchValue : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == Switch; }
+
+ ~SwitchValue();
+
+ // numCaseValues() + 1 == numSuccessors().
+ unsigned numCaseValues() const { return m_values.size(); }
+
+ // The successor for this case value is at the same index.
+ int64_t caseValue(unsigned index) const { return m_values[index]; }
+
+ const Vector<int64_t>& caseValues() const { return m_values; }
+
+ CaseCollection cases(const BasicBlock* owner) const { return CaseCollection(this, owner); }
+ CaseCollection cases() const { return cases(owner); }
+
+ // This removes the case and reorders things a bit. If you're iterating the cases from 0 to N,
+ // then you can keep iterating after this so long as you revisit this same index (which will now
+ // contain some other case value). This removes the case that was removed.
+ SwitchCase removeCase(BasicBlock*, unsigned index);
+
+ bool hasFallThrough(const BasicBlock*) const;
+ bool hasFallThrough() const;
+
+ // These two functions can be called in any order.
+ void setFallThrough(BasicBlock*, const FrequentedBlock&);
+ void appendCase(BasicBlock*, const SwitchCase&);
+
+ JS_EXPORT_PRIVATE void setFallThrough(const FrequentedBlock&);
+ JS_EXPORT_PRIVATE void appendCase(const SwitchCase&);
+
+ void dumpSuccessors(const BasicBlock*, PrintStream&) const override;
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ JS_EXPORT_PRIVATE SwitchValue(Origin, Value* child);
+
+ Vector<int64_t> m_values;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3TimingScope.cpp b/Source/JavaScriptCore/b3/B3TimingScope.cpp
new file mode 100644
index 000000000..d8ad42133
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3TimingScope.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3TimingScope.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Common.h"
+#include <wtf/CurrentTime.h>
+#include <wtf/DataLog.h>
+
+namespace JSC { namespace B3 {
+
+TimingScope::TimingScope(const char* name)
+ : m_name(name)
+{
+ if (shouldMeasurePhaseTiming())
+ m_before = monotonicallyIncreasingTimeMS();
+}
+
+TimingScope::~TimingScope()
+{
+ if (shouldMeasurePhaseTiming()) {
+ double after = monotonicallyIncreasingTimeMS();
+ dataLog("[B3] ", m_name, " took: ", after - m_before, " ms.\n");
+ }
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3TimingScope.h b/Source/JavaScriptCore/b3/B3TimingScope.h
new file mode 100644
index 000000000..a957a0eb0
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3TimingScope.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/Noncopyable.h>
+
+namespace JSC { namespace B3 {
+
+class TimingScope {
+ WTF_MAKE_NONCOPYABLE(TimingScope);
+public:
+ TimingScope(const char* name);
+ ~TimingScope();
+
+private:
+ const char* m_name;
+ double m_before;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Type.cpp b/Source/JavaScriptCore/b3/B3Type.cpp
new file mode 100644
index 000000000..0057eaf61
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Type.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Type.h"
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, Type type)
+{
+ switch (type) {
+ case Void:
+ out.print("Void");
+ return;
+ case Int32:
+ out.print("Int32");
+ return;
+ case Int64:
+ out.print("Int64");
+ return;
+ case Float:
+ out.print("Float");
+ return;
+ case Double:
+ out.print("Double");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Type.h b/Source/JavaScriptCore/b3/B3Type.h
new file mode 100644
index 000000000..4ceaa8a1d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Type.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Common.h"
+#include <wtf/StdLibExtras.h>
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+enum Type : int8_t {
+ Void,
+ Int32,
+ Int64,
+ Float,
+ Double,
+};
+
+inline bool isInt(Type type)
+{
+ return type == Int32 || type == Int64;
+}
+
+inline bool isFloat(Type type)
+{
+ return type == Float || type == Double;
+}
+
+inline Type pointerType()
+{
+ if (is32Bit())
+ return Int32;
+ return Int64;
+}
+
+inline size_t sizeofType(Type type)
+{
+ switch (type) {
+ case Void:
+ return 0;
+ case Int32:
+ case Float:
+ return 4;
+ case Int64:
+ case Double:
+ return 8;
+ }
+ ASSERT_NOT_REACHED();
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::B3::Type);
+
+} // namespace WTF
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3TypeMap.h b/Source/JavaScriptCore/b3/B3TypeMap.h
new file mode 100644
index 000000000..c0ea41304
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3TypeMap.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Type.h"
+#include <wtf/PrintStream.h>
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+template<typename T>
+class TypeMap {
+public:
+ TypeMap()
+ : m_void()
+ , m_int32()
+ , m_int64()
+ , m_float()
+ , m_double()
+ {
+ }
+
+ T& at(Type type)
+ {
+ switch (type) {
+ case Void:
+ return m_void;
+ case Int32:
+ return m_int32;
+ case Int64:
+ return m_int64;
+ case Float:
+ return m_float;
+ case Double:
+ return m_double;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ const T& at(Type type) const
+ {
+ return bitwise_cast<TypeMap*>(this)->at(type);
+ }
+
+ T& operator[](Type type)
+ {
+ return at(type);
+ }
+
+ const T& operator[](Type type) const
+ {
+ return at(type);
+ }
+
+ void dump(PrintStream& out) const
+ {
+ out.print(
+ "{void = ", m_void,
+ ", int32 = ", m_int32,
+ ", int64 = ", m_int64,
+ ", float = ", m_float,
+ ", double = ", m_double, "}");
+ }
+
+private:
+ T m_void;
+ T m_int32;
+ T m_int64;
+ T m_float;
+ T m_double;
+};
+
+} } // namespace JSC::B3
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3UpsilonValue.cpp b/Source/JavaScriptCore/b3/B3UpsilonValue.cpp
new file mode 100644
index 000000000..c87432fb7
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3UpsilonValue.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3UpsilonValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+UpsilonValue::~UpsilonValue()
+{
+}
+
+void UpsilonValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ if (m_phi)
+ out.print(comma, "^", m_phi->index());
+ else {
+ // We want to have a dump for when the Phi isn't set yet, since although such IR won't pass
+ // validation, we may have such IR as an intermediate step.
+ out.print(comma, "^(null)");
+ }
+}
+
+Value* UpsilonValue::cloneImpl() const
+{
+ return new UpsilonValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3UpsilonValue.h b/Source/JavaScriptCore/b3/B3UpsilonValue.h
new file mode 100644
index 000000000..4c479e419
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3UpsilonValue.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE UpsilonValue : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == Upsilon; }
+
+ ~UpsilonValue();
+
+ Value* phi() const { return m_phi; }
+ void setPhi(Value* phi)
+ {
+ ASSERT(child(0)->type() == phi->type());
+ ASSERT(phi->opcode() == Phi);
+ m_phi = phi;
+ }
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ // Note that passing the Phi during construction is optional. A valid pattern is to first create
+ // the Upsilons without the Phi, then create the Phi, then go back and tell the Upsilons about
+ // the Phi. This allows you to emit code in its natural order.
+ UpsilonValue(Origin origin, Value* value, Value* phi = nullptr)
+ : Value(CheckedOpcode, Upsilon, Void, origin, value)
+ , m_phi(phi)
+ {
+ if (phi)
+ ASSERT(value->type() == phi->type());
+ }
+
+ Value* m_phi;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3UseCounts.cpp b/Source/JavaScriptCore/b3/B3UseCounts.cpp
new file mode 100644
index 000000000..5fe18d4ff
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3UseCounts.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3UseCounts.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 {
+
+UseCounts::UseCounts(Procedure& procedure)
+ : m_counts(procedure.values().size())
+{
+ Vector<Value*, 64> children;
+ for (Value* value : procedure.values()) {
+ children.resize(0);
+ for (Value* child : value->children()) {
+ m_counts[child].numUses++;
+ children.append(child);
+ }
+ std::sort(children.begin(), children.end());
+ Value* last = nullptr;
+ for (Value* child : children) {
+ if (child == last)
+ continue;
+
+ m_counts[child].numUsingInstructions++;
+ last = child;
+ }
+ }
+}
+
+UseCounts::~UseCounts()
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3UseCounts.h b/Source/JavaScriptCore/b3/B3UseCounts.h
new file mode 100644
index 000000000..f5a0492a9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3UseCounts.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include <wtf/IndexMap.h>
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class UseCounts {
+public:
+ JS_EXPORT_PRIVATE UseCounts(Procedure&);
+ JS_EXPORT_PRIVATE ~UseCounts();
+
+ unsigned numUses(Value* value) const { return m_counts[value].numUses; }
+ unsigned numUsingInstructions(Value* value) const { return m_counts[value].numUsingInstructions; }
+
+private:
+ struct Counts {
+ unsigned numUses { 0 };
+ unsigned numUsingInstructions { 0 };
+ };
+
+ IndexMap<Value, Counts> m_counts;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Validate.cpp b/Source/JavaScriptCore/b3/B3Validate.cpp
new file mode 100644
index 000000000..8df8ace8f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Validate.cpp
@@ -0,0 +1,595 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Validate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3Dominators.h"
+#include "B3MemoryValue.h"
+#include "B3Procedure.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include "B3WasmBoundsCheckValue.h"
+#include <wtf/HashSet.h>
+#include <wtf/StringPrintStream.h>
+#include <wtf/text/CString.h>
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class Validater {
+public:
+ Validater(Procedure& procedure, const char* dumpBefore)
+ : m_procedure(procedure)
+ , m_dumpBefore(dumpBefore)
+ {
+ }
+
+#define VALIDATE(condition, message) do { \
+ if (condition) \
+ break; \
+ fail(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #condition, toCString message); \
+ } while (false)
+
+ void run()
+ {
+ HashSet<BasicBlock*> blocks;
+ HashSet<Value*> valueInProc;
+ HashMap<Value*, unsigned> valueInBlock;
+ HashMap<Value*, BasicBlock*> valueOwner;
+ HashMap<Value*, unsigned> valueIndex;
+
+ for (BasicBlock* block : m_procedure) {
+ blocks.add(block);
+ for (unsigned i = 0; i < block->size(); ++i) {
+ Value* value = block->at(i);
+ valueInBlock.add(value, 0).iterator->value++;
+ valueOwner.add(value, block);
+ valueIndex.add(value, i);
+ }
+ }
+
+ for (Value* value : m_procedure.values())
+ valueInProc.add(value);
+
+ for (Value* value : valueInProc)
+ VALIDATE(valueInBlock.contains(value), ("At ", *value));
+ for (auto& entry : valueInBlock) {
+ VALIDATE(valueInProc.contains(entry.key), ("At ", *entry.key));
+ VALIDATE(entry.value == 1, ("At ", *entry.key));
+ }
+
+ // Compute dominators ourselves to avoid perturbing Procedure.
+ Dominators dominators(m_procedure);
+
+ for (Value* value : valueInProc) {
+ for (Value* child : value->children()) {
+ VALIDATE(child, ("At ", *value));
+ VALIDATE(valueInProc.contains(child), ("At ", *value, "->", pointerDump(child)));
+ if (valueOwner.get(child) == valueOwner.get(value))
+ VALIDATE(valueIndex.get(value) > valueIndex.get(child), ("At ", *value, "->", pointerDump(child)));
+ else
+ VALIDATE(dominators.dominates(valueOwner.get(child), valueOwner.get(value)), ("at ", *value, "->", pointerDump(child)));
+ }
+ }
+
+ HashMap<BasicBlock*, HashSet<BasicBlock*>> allPredecessors;
+ for (BasicBlock* block : blocks) {
+ VALIDATE(block->size() >= 1, ("At ", *block));
+ for (unsigned i = 0; i < block->size() - 1; ++i)
+ VALIDATE(!block->at(i)->effects().terminal, ("At ", *block->at(i)));
+ VALIDATE(block->last()->effects().terminal, ("At ", *block->last()));
+
+ for (BasicBlock* successor : block->successorBlocks()) {
+ allPredecessors.add(successor, HashSet<BasicBlock*>()).iterator->value.add(block);
+ VALIDATE(
+ blocks.contains(successor), ("At ", *block, "->", pointerDump(successor)));
+ }
+ }
+
+ // Note that this totally allows dead code.
+ for (auto& entry : allPredecessors) {
+ BasicBlock* successor = entry.key;
+ HashSet<BasicBlock*>& predecessors = entry.value;
+ VALIDATE(predecessors == successor->predecessors(), ("At ", *successor));
+ }
+
+ for (Value* value : m_procedure.values()) {
+ for (Value* child : value->children())
+ VALIDATE(child->type() != Void, ("At ", *value, "->", *child));
+ switch (value->opcode()) {
+ case Nop:
+ case Fence:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() == Void, ("At ", *value));
+ break;
+ case Identity:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+ VALIDATE(value->type() != Void, ("At ", *value));
+ break;
+ case Const32:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() == Int32, ("At ", *value));
+ break;
+ case Const64:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() == Int64, ("At ", *value));
+ break;
+ case ConstDouble:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() == Double, ("At ", *value));
+ break;
+ case ConstFloat:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() == Float, ("At ", *value));
+ break;
+ case Set:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->child(0)->type() == value->as<VariableValue>()->variable()->type(), ("At ", *value));
+ break;
+ case Get:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() == value->as<VariableValue>()->variable()->type(), ("At ", *value));
+ break;
+ case SlotBase:
+ case FramePointer:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() == pointerType(), ("At ", *value));
+ break;
+ case ArgumentReg:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(
+ (value->as<ArgumentRegValue>()->argumentReg().isGPR() ? pointerType() : Double)
+ == value->type(), ("At ", *value));
+ break;
+ case Add:
+ case Sub:
+ case Mul:
+ case Div:
+ case UDiv:
+ case Mod:
+ case UMod:
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ VALIDATE(!value->kind().traps(), ("At ", *value));
+ switch (value->opcode()) {
+ case Div:
+ case Mod:
+ if (value->isChill()) {
+ VALIDATE(value->opcode() == Div || value->opcode() == Mod, ("At ", *value));
+ VALIDATE(isInt(value->type()), ("At ", *value));
+ }
+ break;
+ default:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ break;
+ }
+ VALIDATE(value->numChildren() == 2, ("At ", *value));
+ VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+ VALIDATE(value->type() == value->child(1)->type(), ("At ", *value));
+ VALIDATE(value->type() != Void, ("At ", *value));
+ break;
+ case Neg:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+ VALIDATE(value->type() != Void, ("At ", *value));
+ break;
+ case Shl:
+ case SShr:
+ case ZShr:
+ case RotR:
+ case RotL:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 2, ("At ", *value));
+ VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+ VALIDATE(value->child(1)->type() == Int32, ("At ", *value));
+ VALIDATE(isInt(value->type()), ("At ", *value));
+ break;
+ case BitwiseCast:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->type() != value->child(0)->type(), ("At ", *value));
+ VALIDATE(
+ (value->type() == Int64 && value->child(0)->type() == Double)
+ || (value->type() == Double && value->child(0)->type() == Int64)
+ || (value->type() == Float && value->child(0)->type() == Int32)
+ || (value->type() == Int32 && value->child(0)->type() == Float),
+ ("At ", *value));
+ break;
+ case SExt8:
+ case SExt16:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+ VALIDATE(value->type() == Int32, ("At ", *value));
+ break;
+ case SExt32:
+ case ZExt32:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+ VALIDATE(value->type() == Int64, ("At ", *value));
+ break;
+ case Clz:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+ VALIDATE(isInt(value->type()), ("At ", *value));
+ break;
+ case Trunc:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(
+ (value->type() == Int32 && value->child(0)->type() == Int64)
+ || (value->type() == Float && value->child(0)->type() == Double),
+ ("At ", *value));
+ break;
+ case Abs:
+ case Ceil:
+ case Floor:
+ case Sqrt:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(isFloat(value->child(0)->type()), ("At ", *value));
+ VALIDATE(isFloat(value->type()), ("At ", *value));
+ break;
+ case IToD:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+ VALIDATE(value->type() == Double, ("At ", *value));
+ break;
+ case IToF:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+ VALIDATE(value->type() == Float, ("At ", *value));
+ break;
+ case FloatToDouble:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->child(0)->type() == Float, ("At ", *value));
+ VALIDATE(value->type() == Double, ("At ", *value));
+ break;
+ case DoubleToFloat:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->child(0)->type() == Double, ("At ", *value));
+ VALIDATE(value->type() == Float, ("At ", *value));
+ break;
+ case Equal:
+ case NotEqual:
+ case LessThan:
+ case GreaterThan:
+ case LessEqual:
+ case GreaterEqual:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 2, ("At ", *value));
+ VALIDATE(value->child(0)->type() == value->child(1)->type(), ("At ", *value));
+ VALIDATE(value->type() == Int32, ("At ", *value));
+ break;
+ case Above:
+ case Below:
+ case AboveEqual:
+ case BelowEqual:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 2, ("At ", *value));
+ VALIDATE(value->child(0)->type() == value->child(1)->type(), ("At ", *value));
+ VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+ VALIDATE(value->type() == Int32, ("At ", *value));
+ break;
+ case EqualOrUnordered:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 2, ("At ", *value));
+ VALIDATE(value->child(0)->type() == value->child(1)->type(), ("At ", *value));
+ VALIDATE(isFloat(value->child(0)->type()), ("At ", *value));
+ VALIDATE(value->type() == Int32, ("At ", *value));
+ break;
+ case Select:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 3, ("At ", *value));
+ VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+ VALIDATE(value->type() == value->child(1)->type(), ("At ", *value));
+ VALIDATE(value->type() == value->child(2)->type(), ("At ", *value));
+ break;
+ case Load8Z:
+ case Load8S:
+ case Load16Z:
+ case Load16S:
+ VALIDATE(!value->kind().isChill(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+ VALIDATE(value->type() == Int32, ("At ", *value));
+ validateStackAccess(value);
+ break;
+ case Load:
+ VALIDATE(!value->kind().isChill(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+ VALIDATE(value->type() != Void, ("At ", *value));
+ validateStackAccess(value);
+ break;
+ case Store8:
+ case Store16:
+ VALIDATE(!value->kind().isChill(), ("At ", *value));
+ VALIDATE(value->numChildren() == 2, ("At ", *value));
+ VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+ VALIDATE(value->child(1)->type() == pointerType(), ("At ", *value));
+ VALIDATE(value->type() == Void, ("At ", *value));
+ validateStackAccess(value);
+ break;
+ case Store:
+ VALIDATE(!value->kind().isChill(), ("At ", *value));
+ VALIDATE(value->numChildren() == 2, ("At ", *value));
+ VALIDATE(value->child(1)->type() == pointerType(), ("At ", *value));
+ VALIDATE(value->type() == Void, ("At ", *value));
+ validateStackAccess(value);
+ break;
+ case WasmAddress:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+ VALIDATE(value->type() == pointerType(), ("At ", *value));
+ break;
+ case CCall:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() >= 1, ("At ", *value));
+ VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+ break;
+ case Patchpoint:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ if (value->type() == Void)
+ VALIDATE(value->as<PatchpointValue>()->resultConstraint == ValueRep::WarmAny, ("At ", *value));
+ else {
+ switch (value->as<PatchpointValue>()->resultConstraint.kind()) {
+ case ValueRep::WarmAny:
+ case ValueRep::SomeRegister:
+ case ValueRep::SomeEarlyRegister:
+ case ValueRep::Register:
+ case ValueRep::StackArgument:
+ break;
+ default:
+ VALIDATE(false, ("At ", *value));
+ break;
+ }
+
+ validateStackmapConstraint(value, ConstrainedValue(value, value->as<PatchpointValue>()->resultConstraint), ConstraintRole::Def);
+ }
+ validateStackmap(value);
+ break;
+ case CheckAdd:
+ case CheckSub:
+ case CheckMul:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() >= 2, ("At ", *value));
+ VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+ VALIDATE(isInt(value->child(1)->type()), ("At ", *value));
+ VALIDATE(value->as<StackmapValue>()->constrainedChild(0).rep() == ValueRep::WarmAny, ("At ", *value));
+ VALIDATE(value->as<StackmapValue>()->constrainedChild(1).rep() == ValueRep::WarmAny, ("At ", *value));
+ validateStackmap(value);
+ break;
+ case Check:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() >= 1, ("At ", *value));
+ VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+ VALIDATE(value->as<StackmapValue>()->constrainedChild(0).rep() == ValueRep::WarmAny, ("At ", *value));
+ validateStackmap(value);
+ break;
+ case WasmBoundsCheck:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+ VALIDATE(m_procedure.code().isPinned(value->as<WasmBoundsCheckValue>()->pinnedGPR()), ("At ", *value));
+ VALIDATE(m_procedure.code().wasmBoundsCheckGenerator(), ("At ", *value));
+ break;
+ case Upsilon:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(value->as<UpsilonValue>()->phi(), ("At ", *value));
+ VALIDATE(value->as<UpsilonValue>()->phi()->opcode() == Phi, ("At ", *value));
+ VALIDATE(value->child(0)->type() == value->as<UpsilonValue>()->phi()->type(), ("At ", *value));
+ VALIDATE(valueInProc.contains(value->as<UpsilonValue>()->phi()), ("At ", *value));
+ break;
+ case Phi:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() != Void, ("At ", *value));
+ break;
+ case Jump:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() == Void, ("At ", *value));
+ VALIDATE(valueOwner.get(value)->numSuccessors() == 1, ("At ", *value));
+ break;
+ case Oops:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() == Void, ("At ", *value));
+ VALIDATE(!valueOwner.get(value)->numSuccessors(), ("At ", *value));
+ break;
+ case Return:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() <= 1, ("At ", *value));
+ VALIDATE(value->type() == Void, ("At ", *value));
+ VALIDATE(!valueOwner.get(value)->numSuccessors(), ("At ", *value));
+ break;
+ case Branch:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+ VALIDATE(value->type() == Void, ("At ", *value));
+ VALIDATE(valueOwner.get(value)->numSuccessors() == 2, ("At ", *value));
+ break;
+ case Switch: {
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(value->numChildren() == 1, ("At ", *value));
+ VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+ VALIDATE(value->type() == Void, ("At ", *value));
+ VALIDATE(value->as<SwitchValue>()->hasFallThrough(valueOwner.get(value)), ("At ", *value));
+ // This validates the same thing as hasFallThrough, but more explicitly. We want to
+ // make sure that if anyone tries to change the definition of hasFallThrough, they
+ // will feel some pain here, since this is fundamental.
+ VALIDATE(valueOwner.get(value)->numSuccessors() == value->as<SwitchValue>()->numCaseValues() + 1, ("At ", *value));
+
+ // Check that there are no duplicate cases.
+ Vector<int64_t> caseValues = value->as<SwitchValue>()->caseValues();
+ std::sort(caseValues.begin(), caseValues.end());
+ for (unsigned i = 1; i < caseValues.size(); ++i)
+ VALIDATE(caseValues[i - 1] != caseValues[i], ("At ", *value, ", caseValue = ", caseValues[i]));
+ break;
+ }
+ case EntrySwitch:
+ VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+ VALIDATE(!value->numChildren(), ("At ", *value));
+ VALIDATE(value->type() == Void, ("At ", *value));
+ VALIDATE(valueOwner.get(value)->numSuccessors() == m_procedure.numEntrypoints(), ("At ", *value));
+ break;
+ }
+
+ VALIDATE(!(value->effects().writes && value->key()), ("At ", *value));
+ }
+
+ for (Variable* variable : m_procedure.variables())
+ VALIDATE(variable->type() != Void, ("At ", *variable));
+ }
+
+private:
+ void validateStackmap(Value* value)
+ {
+ StackmapValue* stackmap = value->as<StackmapValue>();
+ VALIDATE(stackmap, ("At ", *value));
+ VALIDATE(stackmap->numChildren() >= stackmap->reps().size(), ("At ", *stackmap));
+ for (ConstrainedValue child : stackmap->constrainedChildren())
+ validateStackmapConstraint(stackmap, child);
+ }
+
+ enum class ConstraintRole {
+ Use,
+ Def
+ };
+ void validateStackmapConstraint(Value* context, const ConstrainedValue& value, ConstraintRole role = ConstraintRole::Use)
+ {
+ switch (value.rep().kind()) {
+ case ValueRep::WarmAny:
+ case ValueRep::ColdAny:
+ case ValueRep::LateColdAny:
+ case ValueRep::SomeRegister:
+ case ValueRep::StackArgument:
+ break;
+ case ValueRep::SomeEarlyRegister:
+ VALIDATE(role == ConstraintRole::Def, ("At ", *context, ": ", value));
+ break;
+ case ValueRep::Register:
+ case ValueRep::LateRegister:
+ if (value.rep().reg().isGPR())
+ VALIDATE(isInt(value.value()->type()), ("At ", *context, ": ", value));
+ else
+ VALIDATE(isFloat(value.value()->type()), ("At ", *context, ": ", value));
+ break;
+ default:
+ VALIDATE(false, ("At ", *context, ": ", value));
+ break;
+ }
+ }
+
+ void validateStackAccess(Value* value)
+ {
+ MemoryValue* memory = value->as<MemoryValue>();
+ SlotBaseValue* slotBase = value->lastChild()->as<SlotBaseValue>();
+ if (!slotBase)
+ return;
+
+ StackSlot* stack = slotBase->slot();
+
+ VALIDATE(memory->offset() >= 0, ("At ", *value));
+ VALIDATE(memory->offset() + memory->accessByteSize() <= stack->byteSize(), ("At ", *value));
+ }
+
+ NO_RETURN_DUE_TO_CRASH void fail(
+ const char* filename, int lineNumber, const char* function, const char* condition,
+ CString message)
+ {
+ CString failureMessage;
+ {
+ StringPrintStream out;
+ out.print("B3 VALIDATION FAILURE\n");
+ out.print(" ", condition, " (", filename, ":", lineNumber, ")\n");
+ out.print(" ", message, "\n");
+ out.print(" After ", m_procedure.lastPhaseName(), "\n");
+ failureMessage = out.toCString();
+ }
+
+ dataLog(failureMessage);
+ if (m_dumpBefore) {
+ dataLog("Before ", m_procedure.lastPhaseName(), ":\n");
+ dataLog(m_dumpBefore);
+ }
+ dataLog("At time of failure:\n");
+ dataLog(m_procedure);
+
+ dataLog(failureMessage);
+ WTFReportAssertionFailure(filename, lineNumber, function, condition);
+ CRASH();
+ }
+
+ Procedure& m_procedure;
+ const char* m_dumpBefore;
+};
+
+} // anonymous namespace
+
+void validate(Procedure& procedure, const char* dumpBefore)
+{
+ Validater validater(procedure, dumpBefore);
+ validater.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Validate.h b/Source/JavaScriptCore/b3/B3Validate.h
new file mode 100644
index 000000000..d115e22e4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Validate.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+JS_EXPORT_PRIVATE void validate(Procedure&, const char* dumpBefore = nullptr);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Value.cpp b/Source/JavaScriptCore/b3/B3Value.cpp
new file mode 100644
index 000000000..b4fc43369
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Value.cpp
@@ -0,0 +1,870 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Value.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BottomProvider.h"
+#include "B3CCallValue.h"
+#include "B3FenceValue.h"
+#include "B3MemoryValue.h"
+#include "B3OriginDump.h"
+#include "B3ProcedureInlines.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "B3ValueKeyInlines.h"
+#include "B3VariableValue.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/ListDump.h>
+#include <wtf/StringPrintStream.h>
+
+namespace JSC { namespace B3 {
+
+const char* const Value::dumpPrefix = "@";
+
+Value::~Value()
+{
+}
+
+void Value::replaceWithIdentity(Value* value)
+{
+ // This is a bit crazy. It does an in-place replacement of whatever Value subclass this is with
+ // a plain Identity Value. We first collect all of the information we need, then we destruct the
+ // previous value in place, and then we construct the Identity Value in place.
+
+ ASSERT(m_type == value->m_type);
+
+ if (m_type == Void) {
+ replaceWithNopIgnoringType();
+ return;
+ }
+
+ unsigned index = m_index;
+ Type type = m_type;
+ Origin origin = m_origin;
+ BasicBlock* owner = this->owner;
+
+ RELEASE_ASSERT(type == value->type());
+
+ this->~Value();
+
+ new (this) Value(Identity, type, origin, value);
+
+ this->owner = owner;
+ this->m_index = index;
+}
+
+void Value::replaceWithBottom(InsertionSet& insertionSet, size_t index)
+{
+ replaceWithBottom(BottomProvider(insertionSet, index));
+}
+
+void Value::replaceWithNop()
+{
+ RELEASE_ASSERT(m_type == Void);
+ replaceWithNopIgnoringType();
+}
+
+void Value::replaceWithNopIgnoringType()
+{
+ unsigned index = m_index;
+ Origin origin = m_origin;
+ BasicBlock* owner = this->owner;
+
+ this->~Value();
+
+ new (this) Value(Nop, Void, origin);
+
+ this->owner = owner;
+ this->m_index = index;
+}
+
+void Value::replaceWithPhi()
+{
+ if (m_type == Void) {
+ replaceWithNop();
+ return;
+ }
+
+ unsigned index = m_index;
+ Origin origin = m_origin;
+ BasicBlock* owner = this->owner;
+ Type type = m_type;
+
+ this->~Value();
+
+ new (this) Value(Phi, type, origin);
+
+ this->owner = owner;
+ this->m_index = index;
+}
+
+void Value::replaceWithJump(BasicBlock* owner, FrequentedBlock target)
+{
+ RELEASE_ASSERT(owner->last() == this);
+
+ unsigned index = m_index;
+ Origin origin = m_origin;
+
+ this->~Value();
+
+ new (this) Value(Jump, Void, origin);
+
+ this->owner = owner;
+ this->m_index = index;
+
+ owner->setSuccessors(target);
+}
+
+void Value::replaceWithOops(BasicBlock* owner)
+{
+ RELEASE_ASSERT(owner->last() == this);
+
+ unsigned index = m_index;
+ Origin origin = m_origin;
+
+ this->~Value();
+
+ new (this) Value(Oops, Void, origin);
+
+ this->owner = owner;
+ this->m_index = index;
+
+ owner->clearSuccessors();
+}
+
+void Value::replaceWithJump(FrequentedBlock target)
+{
+ replaceWithJump(owner, target);
+}
+
+void Value::replaceWithOops()
+{
+ replaceWithOops(owner);
+}
+
+void Value::dump(PrintStream& out) const
+{
+ bool isConstant = false;
+
+ switch (opcode()) {
+ case Const32:
+ out.print("$", asInt32(), "(");
+ isConstant = true;
+ break;
+ case Const64:
+ out.print("$", asInt64(), "(");
+ isConstant = true;
+ break;
+ case ConstFloat:
+ out.print("$", asFloat(), "(");
+ isConstant = true;
+ break;
+ case ConstDouble:
+ out.print("$", asDouble(), "(");
+ isConstant = true;
+ break;
+ default:
+ break;
+ }
+
+ out.print(dumpPrefix, m_index);
+
+ if (isConstant)
+ out.print(")");
+}
+
+Value* Value::cloneImpl() const
+{
+ return new Value(*this);
+}
+
+void Value::dumpChildren(CommaPrinter& comma, PrintStream& out) const
+{
+ for (Value* child : children())
+ out.print(comma, pointerDump(child));
+}
+
+void Value::deepDump(const Procedure* proc, PrintStream& out) const
+{
+ out.print(m_type, " ", dumpPrefix, m_index, " = ", m_kind);
+
+ out.print("(");
+ CommaPrinter comma;
+ dumpChildren(comma, out);
+
+ if (m_origin)
+ out.print(comma, OriginDump(proc, m_origin));
+
+ dumpMeta(comma, out);
+
+ {
+ CString string = toCString(effects());
+ if (string.length())
+ out.print(comma, string);
+ }
+
+ out.print(")");
+}
+
+void Value::dumpSuccessors(const BasicBlock* block, PrintStream& out) const
+{
+ // Note that this must not crash if we have the wrong number of successors, since someone
+ // debugging a number-of-successors bug will probably want to dump IR!
+
+ if (opcode() == Branch && block->numSuccessors() == 2) {
+ out.print("Then:", block->taken(), ", Else:", block->notTaken());
+ return;
+ }
+
+ out.print(listDump(block->successors()));
+}
+
+Value* Value::negConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+Value* Value::addConstant(Procedure&, int32_t) const
+{
+ return nullptr;
+}
+
+Value* Value::addConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::subConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::mulConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::checkAddConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::checkSubConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::checkMulConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::checkNegConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+Value* Value::divConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::uDivConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::modConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::uModConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::bitAndConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::bitOrConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::bitXorConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::shlConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::sShrConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::zShrConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::rotRConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::rotLConstant(Procedure&, const Value*) const
+{
+ return nullptr;
+}
+
+Value* Value::bitwiseCastConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+Value* Value::iToDConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+Value* Value::iToFConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+Value* Value::doubleToFloatConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+Value* Value::floatToDoubleConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+Value* Value::absConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+Value* Value::ceilConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+Value* Value::floorConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+Value* Value::sqrtConstant(Procedure&) const
+{
+ return nullptr;
+}
+
+TriState Value::equalConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+TriState Value::notEqualConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+TriState Value::lessThanConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+TriState Value::greaterThanConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+TriState Value::lessEqualConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+TriState Value::greaterEqualConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+TriState Value::aboveConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+TriState Value::belowConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+TriState Value::aboveEqualConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+TriState Value::belowEqualConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+TriState Value::equalOrUnorderedConstant(const Value*) const
+{
+ return MixedTriState;
+}
+
+Value* Value::invertedCompare(Procedure& proc) const
+{
+ if (!numChildren())
+ return nullptr;
+ if (std::optional<Opcode> invertedOpcode = B3::invertedCompare(opcode(), child(0)->type())) {
+ ASSERT(!kind().hasExtraBits());
+ return proc.add<Value>(*invertedOpcode, type(), origin(), children());
+ }
+ return nullptr;
+}
+
+bool Value::isRounded() const
+{
+ ASSERT(isFloat(type()));
+ switch (opcode()) {
+ case Floor:
+ case Ceil:
+ case IToD:
+ case IToF:
+ return true;
+
+ case ConstDouble: {
+ double value = asDouble();
+ return std::isfinite(value) && value == ceil(value);
+ }
+
+ case ConstFloat: {
+ float value = asFloat();
+ return std::isfinite(value) && value == ceilf(value);
+ }
+
+ default:
+ return false;
+ }
+}
+
+bool Value::returnsBool() const
+{
+ if (type() != Int32)
+ return false;
+ switch (opcode()) {
+ case Const32:
+ return asInt32() == 0 || asInt32() == 1;
+ case BitAnd:
+ return child(1)->isInt32(1)
+ || (child(0)->returnsBool() && child(1)->hasInt() && child(1)->asInt() & 1);
+ case Equal:
+ case NotEqual:
+ case LessThan:
+ case GreaterThan:
+ case LessEqual:
+ case GreaterEqual:
+ case Above:
+ case Below:
+ case AboveEqual:
+ case BelowEqual:
+ case EqualOrUnordered:
+ return true;
+ case Phi:
+ // FIXME: We should have a story here.
+ // https://bugs.webkit.org/show_bug.cgi?id=150725
+ return false;
+ default:
+ return false;
+ }
+}
+
+TriState Value::asTriState() const
+{
+ switch (opcode()) {
+ case Const32:
+ return triState(!!asInt32());
+ case Const64:
+ return triState(!!asInt64());
+ case ConstDouble:
+ // Use "!= 0" to really emphasize what this mean with respect to NaN and such.
+ return triState(asDouble() != 0);
+ case ConstFloat:
+ return triState(asFloat() != 0.);
+ default:
+ return MixedTriState;
+ }
+}
+
+Effects Value::effects() const
+{
+ Effects result;
+ switch (opcode()) {
+ case Nop:
+ case Identity:
+ case Const32:
+ case Const64:
+ case ConstDouble:
+ case ConstFloat:
+ case SlotBase:
+ case ArgumentReg:
+ case FramePointer:
+ case Add:
+ case Sub:
+ case Mul:
+ case Neg:
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case Shl:
+ case SShr:
+ case ZShr:
+ case RotR:
+ case RotL:
+ case Clz:
+ case Abs:
+ case Ceil:
+ case Floor:
+ case Sqrt:
+ case BitwiseCast:
+ case SExt8:
+ case SExt16:
+ case SExt32:
+ case ZExt32:
+ case Trunc:
+ case IToD:
+ case IToF:
+ case FloatToDouble:
+ case DoubleToFloat:
+ case Equal:
+ case NotEqual:
+ case LessThan:
+ case GreaterThan:
+ case LessEqual:
+ case GreaterEqual:
+ case Above:
+ case Below:
+ case AboveEqual:
+ case BelowEqual:
+ case EqualOrUnordered:
+ case Select:
+ break;
+ case Div:
+ case UDiv:
+ case Mod:
+ case UMod:
+ result.controlDependent = true;
+ break;
+ case Load8Z:
+ case Load8S:
+ case Load16Z:
+ case Load16S:
+ case Load:
+ result.reads = as<MemoryValue>()->range();
+ result.controlDependent = true;
+ break;
+ case Store8:
+ case Store16:
+ case Store:
+ result.writes = as<MemoryValue>()->range();
+ result.controlDependent = true;
+ break;
+ case WasmAddress:
+ result.readsPinned = true;
+ break;
+ case Fence: {
+ const FenceValue* fence = as<FenceValue>();
+ result.reads = fence->read;
+ result.writes = fence->write;
+
+ // Prevent killing of fences that claim not to write anything. It's a bit weird that we use
+ // local state as the way to do this, but it happens to work: we must assume that we cannot
+ // kill writesLocalState unless we understands exactly what the instruction is doing (like
+ // the way that fixSSA understands Set/Get and the way that reduceStrength and others
+ // understand Upsilon). This would only become a problem if we had some analysis that was
+ // looking to use the writesLocalState bit to invalidate a CSE over local state operations.
+ // Then a Fence would look block, say, the elimination of a redundant Get. But it like
+ // that's not at all how our optimizations for Set/Get/Upsilon/Phi work - they grok their
+ // operations deeply enough that they have no need to check this bit - so this cheat is
+ // fine.
+ result.writesLocalState = true;
+ break;
+ }
+ case CCall:
+ result = as<CCallValue>()->effects;
+ break;
+ case Patchpoint:
+ result = as<PatchpointValue>()->effects;
+ break;
+ case CheckAdd:
+ case CheckSub:
+ case CheckMul:
+ case Check:
+ result = Effects::forCheck();
+ break;
+ case WasmBoundsCheck:
+ result.readsPinned = true;
+ result.exitsSideways = true;
+ break;
+ case Upsilon:
+ case Set:
+ result.writesLocalState = true;
+ break;
+ case Phi:
+ case Get:
+ result.readsLocalState = true;
+ break;
+ case Jump:
+ case Branch:
+ case Switch:
+ case Return:
+ case Oops:
+ case EntrySwitch:
+ result.terminal = true;
+ break;
+ }
+ if (traps()) {
+ result.exitsSideways = true;
+ result.reads = HeapRange::top();
+ }
+ return result;
+}
+
+ValueKey Value::key() const
+{
+ switch (opcode()) {
+ case FramePointer:
+ return ValueKey(kind(), type());
+ case Identity:
+ case Abs:
+ case Ceil:
+ case Floor:
+ case Sqrt:
+ case SExt8:
+ case SExt16:
+ case SExt32:
+ case ZExt32:
+ case Clz:
+ case Trunc:
+ case IToD:
+ case IToF:
+ case FloatToDouble:
+ case DoubleToFloat:
+ case Check:
+ case BitwiseCast:
+ case Neg:
+ return ValueKey(kind(), type(), child(0));
+ case Add:
+ case Sub:
+ case Mul:
+ case Div:
+ case UDiv:
+ case Mod:
+ case UMod:
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case Shl:
+ case SShr:
+ case ZShr:
+ case RotR:
+ case RotL:
+ case Equal:
+ case NotEqual:
+ case LessThan:
+ case GreaterThan:
+ case Above:
+ case Below:
+ case AboveEqual:
+ case BelowEqual:
+ case EqualOrUnordered:
+ case CheckAdd:
+ case CheckSub:
+ case CheckMul:
+ return ValueKey(kind(), type(), child(0), child(1));
+ case Select:
+ return ValueKey(kind(), type(), child(0), child(1), child(2));
+ case Const32:
+ return ValueKey(Const32, type(), static_cast<int64_t>(asInt32()));
+ case Const64:
+ return ValueKey(Const64, type(), asInt64());
+ case ConstDouble:
+ return ValueKey(ConstDouble, type(), asDouble());
+ case ConstFloat:
+ return ValueKey(ConstFloat, type(), asFloat());
+ case ArgumentReg:
+ return ValueKey(
+ ArgumentReg, type(),
+ static_cast<int64_t>(as<ArgumentRegValue>()->argumentReg().index()));
+ case SlotBase:
+ return ValueKey(
+ SlotBase, type(),
+ static_cast<int64_t>(as<SlotBaseValue>()->slot()->index()));
+ default:
+ return ValueKey();
+ }
+}
+
+void Value::performSubstitution()
+{
+ for (Value*& child : children()) {
+ while (child->opcode() == Identity)
+ child = child->child(0);
+ }
+}
+
+bool Value::isFree() const
+{
+ switch (opcode()) {
+ case Const32:
+ case Const64:
+ case ConstDouble:
+ case ConstFloat:
+ case Identity:
+ case Nop:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void Value::dumpMeta(CommaPrinter&, PrintStream&) const
+{
+}
+
+Type Value::typeFor(Kind kind, Value* firstChild, Value* secondChild)
+{
+ switch (kind.opcode()) {
+ case Identity:
+ case Add:
+ case Sub:
+ case Mul:
+ case Div:
+ case UDiv:
+ case Mod:
+ case UMod:
+ case Neg:
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case Shl:
+ case SShr:
+ case ZShr:
+ case RotR:
+ case RotL:
+ case Clz:
+ case Abs:
+ case Ceil:
+ case Floor:
+ case Sqrt:
+ case CheckAdd:
+ case CheckSub:
+ case CheckMul:
+ return firstChild->type();
+ case FramePointer:
+ return pointerType();
+ case SExt8:
+ case SExt16:
+ case Equal:
+ case NotEqual:
+ case LessThan:
+ case GreaterThan:
+ case LessEqual:
+ case GreaterEqual:
+ case Above:
+ case Below:
+ case AboveEqual:
+ case BelowEqual:
+ case EqualOrUnordered:
+ return Int32;
+ case Trunc:
+ return firstChild->type() == Int64 ? Int32 : Float;
+ case SExt32:
+ case ZExt32:
+ return Int64;
+ case FloatToDouble:
+ case IToD:
+ return Double;
+ case DoubleToFloat:
+ case IToF:
+ return Float;
+ case BitwiseCast:
+ switch (firstChild->type()) {
+ case Int64:
+ return Double;
+ case Double:
+ return Int64;
+ case Int32:
+ return Float;
+ case Float:
+ return Int32;
+ case Void:
+ ASSERT_NOT_REACHED();
+ }
+ return Void;
+ case Nop:
+ case Jump:
+ case Branch:
+ case Return:
+ case Oops:
+ case EntrySwitch:
+ case WasmBoundsCheck:
+ return Void;
+ case Select:
+ ASSERT(secondChild);
+ return secondChild->type();
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+void Value::badKind(Kind kind, unsigned numArgs)
+{
+ dataLog("Bad kind ", kind, " with ", numArgs, " args.\n");
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Value.h b/Source/JavaScriptCore/b3/B3Value.h
new file mode 100644
index 000000000..ebe52ad3f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Value.h
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "B3Effects.h"
+#include "B3FrequentedBlock.h"
+#include "B3Kind.h"
+#include "B3Origin.h"
+#include "B3SparseCollection.h"
+#include "B3Type.h"
+#include "B3ValueKey.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class CheckValue;
+class InsertionSet;
+class PhiChildren;
+class Procedure;
+
+class JS_EXPORT_PRIVATE Value {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ typedef Vector<Value*, 3> AdjacencyList;
+
+ static const char* const dumpPrefix;
+
+ static bool accepts(Kind) { return true; }
+
+ virtual ~Value();
+
+ unsigned index() const { return m_index; }
+
+ // Note that the kind is immutable, except for replacing values with:
+ // Identity, Nop, Oops, Jump, and Phi. See below for replaceWithXXX() methods.
+ Kind kind() const { return m_kind; }
+
+ Opcode opcode() const { return kind().opcode(); }
+
+ // It's good practice to mirror Kind methods here, so you can say value->isBlah()
+ // instead of value->kind().isBlah().
+ bool isChill() const { return kind().isChill(); }
+ bool traps() const { return kind().traps(); }
+
+ Origin origin() const { return m_origin; }
+ void setOrigin(Origin origin) { m_origin = origin; }
+
+ Value*& child(unsigned index) { return m_children[index]; }
+ Value* child(unsigned index) const { return m_children[index]; }
+
+ Value*& lastChild() { return m_children.last(); }
+ Value* lastChild() const { return m_children.last(); }
+
+ unsigned numChildren() const { return m_children.size(); }
+
+ Type type() const { return m_type; }
+ void setType(Type type) { m_type = type; }
+
+ // This is useful when lowering. Note that this is only valid for non-void values.
+ Air::Arg::Type airType() const { return Air::Arg::typeForB3Type(type()); }
+ Air::Arg::Width airWidth() const { return Air::Arg::widthForB3Type(type()); }
+
+ AdjacencyList& children() { return m_children; }
+ const AdjacencyList& children() const { return m_children; }
+
+ // If you want to replace all uses of this value with a different value, then replace this
+ // value with Identity. Then do a pass of performSubstitution() on all of the values that use
+ // this one. Usually we do all of this in one pass in pre-order, which ensures that the
+ // X->replaceWithIdentity() calls happen before the performSubstitution() calls on X's users.
+ void replaceWithIdentity(Value*);
+
+ // It's often necessary to kill a value. It's tempting to replace the value with Nop or to
+ // just remove it. But unless you are sure that the value is Void, you will probably still
+ // have other values that use this one. Sure, you may kill those later, or you might not. This
+ // method lets you kill a value safely. It will replace Void values with Nop and non-Void
+ // values with Identities on bottom constants. For this reason, this takes a callback that is
+ // responsible for creating bottoms. There's a utility for this, see B3BottomProvider.h. You
+ // can also access that utility using replaceWithBottom(InsertionSet&, size_t).
+ template<typename BottomProvider>
+ void replaceWithBottom(const BottomProvider&);
+
+ void replaceWithBottom(InsertionSet&, size_t index);
+
+ // Use this if you want to kill a value and you are sure that the value is Void.
+ void replaceWithNop();
+
+ // Use this if you want to kill a value and you are sure that nobody is using it anymore.
+ void replaceWithNopIgnoringType();
+
+ void replaceWithPhi();
+
+ // These transformations are only valid for terminals.
+ void replaceWithJump(BasicBlock* owner, FrequentedBlock);
+ void replaceWithOops(BasicBlock* owner);
+
+ // You can use this form if owners are valid. They're usually not valid.
+ void replaceWithJump(FrequentedBlock);
+ void replaceWithOops();
+
+ void dump(PrintStream&) const;
+ void deepDump(const Procedure*, PrintStream&) const;
+
+ virtual void dumpSuccessors(const BasicBlock*, PrintStream&) const;
+
+ // This is how you cast Values. For example, if you want to do something provided that we have a
+ // ArgumentRegValue, you can do:
+ //
+ // if (ArgumentRegValue* argumentReg = value->as<ArgumentRegValue>()) {
+ // things
+ // }
+ //
+ // This will return null if this kind() != ArgumentReg. This works because this returns nullptr
+ // if T::accepts(kind()) returns false.
+ template<typename T>
+ T* as();
+ template<typename T>
+ const T* as() const;
+
+ // What follows are a bunch of helpers for inspecting and modifying values. Note that we have a
+ // bunch of different idioms for implementing such helpers. You can use virtual methods, and
+ // override from the various Value subclasses. You can put the method inside Value and make it
+ // non-virtual, and the implementation can switch on kind. The method could be inline or not.
+ // If a method is specific to some Value subclass, you could put it in the subclass, or you could
+ // put it on Value anyway. It's fine to pick whatever feels right, and we shouldn't restrict
+ // ourselves to any particular idiom.
+
+ bool isConstant() const;
+ bool isInteger() const;
+
+ virtual Value* negConstant(Procedure&) const;
+ virtual Value* addConstant(Procedure&, int32_t other) const;
+ virtual Value* addConstant(Procedure&, const Value* other) const;
+ virtual Value* subConstant(Procedure&, const Value* other) const;
+ virtual Value* mulConstant(Procedure&, const Value* other) const;
+ virtual Value* checkAddConstant(Procedure&, const Value* other) const;
+ virtual Value* checkSubConstant(Procedure&, const Value* other) const;
+ virtual Value* checkMulConstant(Procedure&, const Value* other) const;
+ virtual Value* checkNegConstant(Procedure&) const;
+ virtual Value* divConstant(Procedure&, const Value* other) const; // This chooses Div<Chill> semantics for integers.
+ virtual Value* uDivConstant(Procedure&, const Value* other) const;
+ virtual Value* modConstant(Procedure&, const Value* other) const; // This chooses Mod<Chill> semantics.
+ virtual Value* uModConstant(Procedure&, const Value* other) const;
+ virtual Value* bitAndConstant(Procedure&, const Value* other) const;
+ virtual Value* bitOrConstant(Procedure&, const Value* other) const;
+ virtual Value* bitXorConstant(Procedure&, const Value* other) const;
+ virtual Value* shlConstant(Procedure&, const Value* other) const;
+ virtual Value* sShrConstant(Procedure&, const Value* other) const;
+ virtual Value* zShrConstant(Procedure&, const Value* other) const;
+ virtual Value* rotRConstant(Procedure&, const Value* other) const;
+ virtual Value* rotLConstant(Procedure&, const Value* other) const;
+ virtual Value* bitwiseCastConstant(Procedure&) const;
+ virtual Value* iToDConstant(Procedure&) const;
+ virtual Value* iToFConstant(Procedure&) const;
+ virtual Value* doubleToFloatConstant(Procedure&) const;
+ virtual Value* floatToDoubleConstant(Procedure&) const;
+ virtual Value* absConstant(Procedure&) const;
+ virtual Value* ceilConstant(Procedure&) const;
+ virtual Value* floorConstant(Procedure&) const;
+ virtual Value* sqrtConstant(Procedure&) const;
+
+ virtual TriState equalConstant(const Value* other) const;
+ virtual TriState notEqualConstant(const Value* other) const;
+ virtual TriState lessThanConstant(const Value* other) const;
+ virtual TriState greaterThanConstant(const Value* other) const;
+ virtual TriState lessEqualConstant(const Value* other) const;
+ virtual TriState greaterEqualConstant(const Value* other) const;
+ virtual TriState aboveConstant(const Value* other) const;
+ virtual TriState belowConstant(const Value* other) const;
+ virtual TriState aboveEqualConstant(const Value* other) const;
+ virtual TriState belowEqualConstant(const Value* other) const;
+ virtual TriState equalOrUnorderedConstant(const Value* other) const;
+
+ // If the value is a comparison then this returns the inverted form of that comparison, if
+ // possible. It can be impossible for double comparisons, where for example LessThan and
+ // GreaterEqual behave differently. If this returns a value, it is a new value, which must be
+ // either inserted into some block or deleted.
+ Value* invertedCompare(Procedure&) const;
+
+ bool hasInt32() const;
+ int32_t asInt32() const;
+ bool isInt32(int32_t) const;
+
+ bool hasInt64() const;
+ int64_t asInt64() const;
+ bool isInt64(int64_t) const;
+
+ bool hasInt() const;
+ int64_t asInt() const;
+ bool isInt(int64_t value) const;
+
+ bool hasIntPtr() const;
+ intptr_t asIntPtr() const;
+ bool isIntPtr(intptr_t) const;
+
+ bool hasDouble() const;
+ double asDouble() const;
+ bool isEqualToDouble(double) const; // We say "isEqualToDouble" because "isDouble" would be a bit equality.
+
+ bool hasFloat() const;
+ float asFloat() const;
+
+ bool hasNumber() const;
+ template<typename T> bool isRepresentableAs() const;
+ template<typename T> T asNumber() const;
+
+ // Booleans in B3 are Const32(0) or Const32(1). So this is true if the type is Int32 and the only
+ // possible return values are 0 or 1. It's OK for this method to conservatively return false.
+ bool returnsBool() const;
+
+ bool isNegativeZero() const;
+
+ bool isRounded() const;
+
+ TriState asTriState() const;
+ bool isLikeZero() const { return asTriState() == FalseTriState; }
+ bool isLikeNonZero() const { return asTriState() == TrueTriState; }
+
+ Effects effects() const;
+
+ // This returns a ValueKey that describes that this Value returns when it executes. Returns an
+ // empty ValueKey if this Value is impure. Note that an operation that returns Void could still
+ // have a non-empty ValueKey. This happens for example with Check operations.
+ ValueKey key() const;
+
+ // Makes sure that none of the children are Identity's. If a child points to Identity, this will
+ // repoint it at the Identity's child. For simplicity, this will follow arbitrarily long chains
+ // of Identity's.
+ void performSubstitution();
+
+ // Free values are those whose presence is guaranteed not to hurt code. We consider constants,
+ // Identities, and Nops to be free. Constants are free because we hoist them to an optimal place.
+ // Identities and Nops are free because we remove them.
+ bool isFree() const;
+
+ // Walk the ancestors of this value (i.e. the graph of things it transitively uses). This
+ // either walks phis or not, depending on whether PhiChildren is null. Your callback gets
+ // called with the signature:
+ //
+ // (Value*) -> WalkStatus
+ enum WalkStatus {
+ Continue,
+ IgnoreChildren,
+ Stop
+ };
+ template<typename Functor>
+ void walk(const Functor& functor, PhiChildren* = nullptr);
+
+protected:
+ virtual Value* cloneImpl() const;
+
+ virtual void dumpChildren(CommaPrinter&, PrintStream&) const;
+ virtual void dumpMeta(CommaPrinter&, PrintStream&) const;
+
+private:
+ friend class Procedure;
+ friend class SparseCollection<Value>;
+
+ // Checks that this kind is valid for use with B3::Value.
+ ALWAYS_INLINE static void checkKind(Kind kind, unsigned numArgs)
+ {
+ switch (kind.opcode()) {
+ case FramePointer:
+ case Nop:
+ case Phi:
+ case Jump:
+ case Oops:
+ case EntrySwitch:
+ if (UNLIKELY(numArgs))
+ badKind(kind, numArgs);
+ break;
+ case Return:
+ if (UNLIKELY(numArgs > 1))
+ badKind(kind, numArgs);
+ break;
+ case Identity:
+ case Neg:
+ case Clz:
+ case Abs:
+ case Ceil:
+ case Floor:
+ case Sqrt:
+ case SExt8:
+ case SExt16:
+ case Trunc:
+ case SExt32:
+ case ZExt32:
+ case FloatToDouble:
+ case IToD:
+ case DoubleToFloat:
+ case IToF:
+ case BitwiseCast:
+ case Branch:
+ if (UNLIKELY(numArgs != 1))
+ badKind(kind, numArgs);
+ break;
+ case Add:
+ case Sub:
+ case Mul:
+ case Div:
+ case UDiv:
+ case Mod:
+ case UMod:
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case Shl:
+ case SShr:
+ case ZShr:
+ case RotR:
+ case RotL:
+ case Equal:
+ case NotEqual:
+ case LessThan:
+ case GreaterThan:
+ case LessEqual:
+ case GreaterEqual:
+ case Above:
+ case Below:
+ case AboveEqual:
+ case BelowEqual:
+ case EqualOrUnordered:
+ if (UNLIKELY(numArgs != 2))
+ badKind(kind, numArgs);
+ break;
+ case Select:
+ if (UNLIKELY(numArgs != 3))
+ badKind(kind, numArgs);
+ break;
+ default:
+ badKind(kind, numArgs);
+ break;
+ }
+ }
+
+protected:
+ enum CheckedOpcodeTag { CheckedOpcode };
+
+ Value(const Value&) = default;
+ Value& operator=(const Value&) = default;
+
+ // Instantiate values via Procedure.
+ // This form requires specifying the type explicitly:
+ template<typename... Arguments>
+ explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin, Value* firstChild, Arguments... arguments)
+ : m_kind(kind)
+ , m_type(type)
+ , m_origin(origin)
+ , m_children{ firstChild, arguments... }
+ {
+ }
+ // This form is for specifying the type explicitly when the opcode has no children:
+ explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin)
+ : m_kind(kind)
+ , m_type(type)
+ , m_origin(origin)
+ {
+ }
+ // This form is for those opcodes that can infer their type from the opcode and first child:
+ template<typename... Arguments>
+ explicit Value(CheckedOpcodeTag, Kind kind, Origin origin, Value* firstChild)
+ : m_kind(kind)
+ , m_type(typeFor(kind, firstChild))
+ , m_origin(origin)
+ , m_children{ firstChild }
+ {
+ }
+ // This form is for those opcodes that can infer their type from the opcode and first and second child:
+ template<typename... Arguments>
+ explicit Value(CheckedOpcodeTag, Kind kind, Origin origin, Value* firstChild, Value* secondChild, Arguments... arguments)
+ : m_kind(kind)
+ , m_type(typeFor(kind, firstChild, secondChild))
+ , m_origin(origin)
+ , m_children{ firstChild, secondChild, arguments... }
+ {
+ }
+ // This form is for those opcodes that can infer their type from the opcode alone, and that don't
+ // take any arguments:
+ explicit Value(CheckedOpcodeTag, Kind kind, Origin origin)
+ : m_kind(kind)
+ , m_type(typeFor(kind, nullptr))
+ , m_origin(origin)
+ {
+ }
+ // Use this form for varargs.
+ explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin, const AdjacencyList& children)
+ : m_kind(kind)
+ , m_type(type)
+ , m_origin(origin)
+ , m_children(children)
+ {
+ }
+ explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin, AdjacencyList&& children)
+ : m_kind(kind)
+ , m_type(type)
+ , m_origin(origin)
+ , m_children(WTFMove(children))
+ {
+ }
+
+ // This is the constructor you end up actually calling, if you're instantiating Value
+ // directly.
+ template<typename... Arguments>
+ explicit Value(Kind kind, Type type, Origin origin)
+ : Value(CheckedOpcode, kind, type, origin)
+ {
+ checkKind(kind, 0);
+ }
+ template<typename... Arguments>
+ explicit Value(Kind kind, Type type, Origin origin, Value* firstChild, Arguments&&... arguments)
+ : Value(CheckedOpcode, kind, type, origin, firstChild, std::forward<Arguments>(arguments)...)
+ {
+ checkKind(kind, 1 + sizeof...(arguments));
+ }
+ template<typename... Arguments>
+ explicit Value(Kind kind, Type type, Origin origin, const AdjacencyList& children)
+ : Value(CheckedOpcode, kind, type, origin, children)
+ {
+ checkKind(kind, children.size());
+ }
+ template<typename... Arguments>
+ explicit Value(Kind kind, Type type, Origin origin, AdjacencyList&& children)
+ : Value(CheckedOpcode, kind, type, origin, WTFMove(children))
+ {
+ checkKind(kind, m_children.size());
+ }
+ template<typename... Arguments>
+ explicit Value(Kind kind, Origin origin, Arguments&&... arguments)
+ : Value(CheckedOpcode, kind, origin, std::forward<Arguments>(arguments)...)
+ {
+ checkKind(kind, sizeof...(arguments));
+ }
+
+private:
+ friend class CheckValue; // CheckValue::convertToAdd() modifies m_kind.
+
+ static Type typeFor(Kind, Value* firstChild, Value* secondChild = nullptr);
+
+ // This group of fields is arranged to fit in 64 bits.
+protected:
+ unsigned m_index { UINT_MAX };
+private:
+ Kind m_kind;
+ Type m_type;
+
+ Origin m_origin;
+ AdjacencyList m_children;
+
+ JS_EXPORT_PRIVATE NO_RETURN_DUE_TO_CRASH static void badKind(Kind, unsigned);
+
+public:
+ BasicBlock* owner { nullptr }; // computed by Procedure::resetValueOwners().
+};
+
+class DeepValueDump {
+public:
+ DeepValueDump(const Procedure* proc, const Value* value)
+ : m_proc(proc)
+ , m_value(value)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_value)
+ m_value->deepDump(m_proc, out);
+ else
+ out.print("<null>");
+ }
+
+private:
+ const Procedure* m_proc;
+ const Value* m_value;
+};
+
+inline DeepValueDump deepDump(const Procedure& proc, const Value* value)
+{
+ return DeepValueDump(&proc, value);
+}
+inline DeepValueDump deepDump(const Value* value)
+{
+ return DeepValueDump(nullptr, value);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ValueInlines.h b/Source/JavaScriptCore/b3/B3ValueInlines.h
new file mode 100644
index 000000000..57f93d60a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueInlines.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CheckValue.h"
+#include "B3Const32Value.h"
+#include "B3Const64Value.h"
+#include "B3ConstDoubleValue.h"
+#include "B3ConstFloatValue.h"
+#include "B3PatchpointValue.h"
+#include "B3PhiChildren.h"
+#include "B3Procedure.h"
+#include "B3Value.h"
+#include <wtf/GraphNodeWorklist.h>
+
+namespace JSC { namespace B3 {
+
+template<typename BottomProvider>
+void Value::replaceWithBottom(const BottomProvider& bottomProvider)
+{
+ if (m_type == Void) {
+ replaceWithNop();
+ return;
+ }
+
+ if (isConstant())
+ return;
+
+ replaceWithIdentity(bottomProvider(m_origin, m_type));
+}
+
+template<typename T>
+inline T* Value::as()
+{
+ if (T::accepts(kind()))
+ return static_cast<T*>(this);
+ return nullptr;
+}
+
+template<typename T>
+inline const T* Value::as() const
+{
+ return const_cast<Value*>(this)->as<T>();
+}
+
+inline bool Value::isConstant() const
+{
+ return B3::isConstant(opcode());
+}
+
+inline bool Value::isInteger() const
+{
+ return type() == Int32 || type() == Int64;
+}
+
+inline bool Value::hasInt32() const
+{
+ return !!as<Const32Value>();
+}
+
+inline int32_t Value::asInt32() const
+{
+ return as<Const32Value>()->value();
+}
+
+inline bool Value::isInt32(int32_t value) const
+{
+ return hasInt32() && asInt32() == value;
+}
+
+inline bool Value::hasInt64() const
+{
+ return !!as<Const64Value>();
+}
+
+inline int64_t Value::asInt64() const
+{
+ return as<Const64Value>()->value();
+}
+
+inline bool Value::isInt64(int64_t value) const
+{
+ return hasInt64() && asInt64() == value;
+}
+
+inline bool Value::hasInt() const
+{
+ return hasInt32() || hasInt64();
+}
+
+inline int64_t Value::asInt() const
+{
+ return hasInt32() ? asInt32() : asInt64();
+}
+
+inline bool Value::isInt(int64_t value) const
+{
+ return hasInt() && asInt() == value;
+}
+
+inline bool Value::hasIntPtr() const
+{
+ if (is64Bit())
+ return hasInt64();
+ return hasInt32();
+}
+
+inline intptr_t Value::asIntPtr() const
+{
+ if (is64Bit())
+ return asInt64();
+ return asInt32();
+}
+
+inline bool Value::isIntPtr(intptr_t value) const
+{
+ return hasIntPtr() && asIntPtr() == value;
+}
+
+inline bool Value::hasDouble() const
+{
+ return !!as<ConstDoubleValue>();
+}
+
+inline double Value::asDouble() const
+{
+ return as<ConstDoubleValue>()->value();
+}
+
+inline bool Value::isEqualToDouble(double value) const
+{
+ return hasDouble() && asDouble() == value;
+}
+
+inline bool Value::hasFloat() const
+{
+ return !!as<ConstFloatValue>();
+}
+
+inline float Value::asFloat() const
+{
+ return as<ConstFloatValue>()->value();
+}
+
+inline bool Value::hasNumber() const
+{
+ return hasInt() || hasDouble() || hasFloat();
+}
+
+inline bool Value::isNegativeZero() const
+{
+ if (hasDouble()) {
+ double value = asDouble();
+ return !value && std::signbit(value);
+ }
+ if (hasFloat()) {
+ float value = asFloat();
+ return !value && std::signbit(value);
+ }
+ return false;
+}
+
+template<typename T>
+inline bool Value::isRepresentableAs() const
+{
+ switch (opcode()) {
+ case Const32:
+ return B3::isRepresentableAs<T>(asInt32());
+ case Const64:
+ return B3::isRepresentableAs<T>(asInt64());
+ case ConstDouble:
+ return B3::isRepresentableAs<T>(asDouble());
+ case ConstFloat:
+ return B3::isRepresentableAs<T>(asFloat());
+ default:
+ return false;
+ }
+}
+
+template<typename T>
+inline T Value::asNumber() const
+{
+ switch (opcode()) {
+ case Const32:
+ return static_cast<T>(asInt32());
+ case Const64:
+ return static_cast<T>(asInt64());
+ case ConstDouble:
+ return static_cast<T>(asDouble());
+ case ConstFloat:
+ return static_cast<T>(asFloat());
+ default:
+ return T();
+ }
+}
+
+template<typename Functor>
+void Value::walk(const Functor& functor, PhiChildren* phiChildren)
+{
+ GraphNodeWorklist<Value*> worklist;
+ worklist.push(this);
+ while (Value* value = worklist.pop()) {
+ WalkStatus status = functor(value);
+ switch (status) {
+ case Continue:
+ if (value->opcode() == Phi) {
+ if (phiChildren)
+ worklist.pushAll(phiChildren->at(value).values());
+ } else
+ worklist.pushAll(value->children());
+ break;
+ case IgnoreChildren:
+ break;
+ case Stop:
+ return;
+ }
+ }
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ValueKey.cpp b/Source/JavaScriptCore/b3/B3ValueKey.cpp
new file mode 100644
index 000000000..10edff3c4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueKey.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3ValueKey.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ArgumentRegValue.h"
+#include "B3ProcedureInlines.h"
+#include "B3SlotBaseValue.h"
+#include "B3ValueInlines.h"
+#include "B3ValueKeyInlines.h"
+
+namespace JSC { namespace B3 {
+
+ValueKey ValueKey::intConstant(Type type, int64_t value)
+{
+ switch (type) {
+ case Int32:
+ return ValueKey(Const32, Int32, value);
+ case Int64:
+ return ValueKey(Const64, Int64, value);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return ValueKey();
+ }
+}
+
+void ValueKey::dump(PrintStream& out) const
+{
+ out.print(m_type, " ", m_kind, "(", u.indices[0], ", ", u.indices[1], ", ", u.indices[2], ")");
+}
+
+Value* ValueKey::materialize(Procedure& proc, Origin origin) const
+{
+ switch (opcode()) {
+ case FramePointer:
+ return proc.add<Value>(kind(), type(), origin);
+ case Identity:
+ case Sqrt:
+ case SExt8:
+ case SExt16:
+ case SExt32:
+ case ZExt32:
+ case Clz:
+ case Trunc:
+ case IToD:
+ case IToF:
+ case FloatToDouble:
+ case DoubleToFloat:
+ case Check:
+ return proc.add<Value>(kind(), type(), origin, child(proc, 0));
+ case Add:
+ case Sub:
+ case Mul:
+ case Div:
+ case UDiv:
+ case Mod:
+ case UMod:
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case Shl:
+ case SShr:
+ case ZShr:
+ case RotR:
+ case RotL:
+ case Equal:
+ case NotEqual:
+ case LessThan:
+ case GreaterThan:
+ case Above:
+ case Below:
+ case AboveEqual:
+ case BelowEqual:
+ return proc.add<Value>(kind(), type(), origin, child(proc, 0), child(proc, 1));
+ case Select:
+ return proc.add<Value>(kind(), type(), origin, child(proc, 0), child(proc, 1), child(proc, 2));
+ case Const32:
+ return proc.add<Const32Value>(origin, static_cast<int32_t>(value()));
+ case Const64:
+ return proc.add<Const64Value>(origin, value());
+ case ConstDouble:
+ return proc.add<ConstDoubleValue>(origin, doubleValue());
+ case ConstFloat:
+ return proc.add<ConstFloatValue>(origin, floatValue());
+ case ArgumentReg:
+ return proc.add<ArgumentRegValue>(origin, Reg::fromIndex(static_cast<unsigned>(value())));
+ case SlotBase:
+ return proc.add<SlotBaseValue>(origin, proc.stackSlots()[value()]);
+ default:
+ return nullptr;
+ }
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3ValueKey.h b/Source/JavaScriptCore/b3/B3ValueKey.h
new file mode 100644
index 000000000..18b092c59
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueKey.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include "B3Kind.h"
+#include "B3Origin.h"
+#include "B3Type.h"
+#include <wtf/HashTable.h>
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+class Value;
+
+// ValueKeys are useful for CSE. They abstractly describe the value that a Value returns when it
+// executes. Any Value that has the same ValueKey is guaranteed to return the same value, provided
+// that they return a non-empty ValueKey. Operations that have effects, or that can have their
+// behavior affected by other operations' effects, will return an empty ValueKey. You have to use
+// other mechanisms for doing CSE for impure operations.
+
+class ValueKey {
+public:
+ ValueKey()
+ {
+ }
+
+ ValueKey(Kind kind, Type type)
+ : m_kind(kind)
+ , m_type(type)
+ {
+ }
+
+ ValueKey(Kind, Type, Value* child);
+
+ ValueKey(Kind, Type, Value* left, Value* right);
+
+ ValueKey(Kind, Type, Value* a, Value* b, Value* c);
+
+ ValueKey(Kind kind, Type type, int64_t value)
+ : m_kind(kind)
+ , m_type(type)
+ {
+ u.value = value;
+ }
+
+ ValueKey(Kind kind, Type type, double value)
+ : m_kind(kind)
+ , m_type(type)
+ {
+ u.doubleValue = value;
+ }
+
+ ValueKey(Kind kind, Type type, float value)
+ : m_kind(kind)
+ , m_type(type)
+ {
+ u.floatValue = value;
+ }
+
+ static ValueKey intConstant(Type type, int64_t value);
+
+ Kind kind() const { return m_kind; }
+ Opcode opcode() const { return kind().opcode(); }
+ Type type() const { return m_type; }
+ unsigned childIndex(unsigned index) const { return u.indices[index]; }
+ Value* child(Procedure&, unsigned index) const;
+ int64_t value() const { return u.value; }
+ double doubleValue() const { return u.doubleValue; }
+ double floatValue() const { return u.floatValue; }
+
+ bool operator==(const ValueKey& other) const
+ {
+ return m_kind == other.m_kind
+ && m_type == other.m_type
+ && u == other.u;
+ }
+
+ bool operator!=(const ValueKey& other) const
+ {
+ return !(*this == other);
+ }
+
+ unsigned hash() const
+ {
+ return m_kind.hash() + m_type + WTF::IntHash<int32_t>::hash(u.indices[0]) + u.indices[1] + u.indices[2];
+ }
+
+ explicit operator bool() const { return *this != ValueKey(); }
+
+ void dump(PrintStream&) const;
+
+ bool canMaterialize() const
+ {
+ if (!*this)
+ return false;
+ switch (opcode()) {
+ case CheckAdd:
+ case CheckSub:
+ case CheckMul:
+ return false;
+ default:
+ return true;
+ }
+ }
+
+ bool isConstant() const
+ {
+ return B3::isConstant(opcode());
+ }
+
+ // Attempts to materialize the Value for this ValueKey. May return nullptr if the value cannot
+ // be materialized. This happens for CheckAdd and friends. You can use canMaterialize() to check
+ // if your key is materializable.
+ Value* materialize(Procedure&, Origin) const;
+
+ ValueKey(WTF::HashTableDeletedValueType)
+ : m_type { Int32 }
+ {
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return *this == ValueKey(WTF::HashTableDeletedValue);
+ }
+
+private:
+ Kind m_kind;
+ Type m_type { Void };
+ union U {
+ unsigned indices[3];
+ int64_t value;
+ double doubleValue;
+ float floatValue;
+
+ U()
+ {
+ indices[0] = 0;
+ indices[1] = 0;
+ indices[2] = 0;
+ }
+
+ bool operator==(const U& other) const
+ {
+ return indices[0] == other.indices[0]
+ && indices[1] == other.indices[1]
+ && indices[2] == other.indices[2];
+ }
+ } u;
+};
+
+struct ValueKeyHash {
+ static unsigned hash(const ValueKey& key) { return key.hash(); }
+ static bool equal(const ValueKey& a, const ValueKey& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::B3::ValueKey> {
+ typedef JSC::B3::ValueKeyHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::B3::ValueKey> : public SimpleClassHashTraits<JSC::B3::ValueKey> {
+ static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ValueKeyInlines.h b/Source/JavaScriptCore/b3/B3ValueKeyInlines.h
new file mode 100644
index 000000000..14158d501
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueKeyInlines.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+#include "B3Value.h"
+#include "B3ValueKey.h"
+
+namespace JSC { namespace B3 {
+
+inline ValueKey::ValueKey(Kind kind, Type type, Value* child)
+ : m_kind(kind)
+ , m_type(type)
+{
+ u.indices[0] = child->index();
+}
+
+inline ValueKey::ValueKey(Kind kind, Type type, Value* left, Value* right)
+ : m_kind(kind)
+ , m_type(type)
+{
+ u.indices[0] = left->index();
+ u.indices[1] = right->index();
+}
+
+inline ValueKey::ValueKey(Kind kind, Type type, Value* a, Value* b, Value* c)
+ : m_kind(kind)
+ , m_type(type)
+{
+ u.indices[0] = a->index();
+ u.indices[1] = b->index();
+ u.indices[2] = c->index();
+}
+
+inline Value* ValueKey::child(Procedure& proc, unsigned index) const
+{
+ return proc.values()[index];
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ValueRep.cpp b/Source/JavaScriptCore/b3/B3ValueRep.cpp
new file mode 100644
index 000000000..9888d228f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueRep.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3ValueRep.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AssemblyHelpers.h"
+#include "JSCInlines.h"
+
+namespace JSC { namespace B3 {
+
+void ValueRep::addUsedRegistersTo(RegisterSet& set) const
+{
+ switch (m_kind) {
+ case WarmAny:
+ case ColdAny:
+ case LateColdAny:
+ case SomeRegister:
+ case SomeEarlyRegister:
+ case Constant:
+ return;
+ case LateRegister:
+ case Register:
+ set.set(reg());
+ return;
+ case Stack:
+ case StackArgument:
+ set.set(MacroAssembler::stackPointerRegister);
+ set.set(GPRInfo::callFrameRegister);
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+RegisterSet ValueRep::usedRegisters() const
+{
+ RegisterSet result;
+ addUsedRegistersTo(result);
+ return result;
+}
+
+void ValueRep::dump(PrintStream& out) const
+{
+ out.print(m_kind);
+ switch (m_kind) {
+ case WarmAny:
+ case ColdAny:
+ case LateColdAny:
+ case SomeRegister:
+ case SomeEarlyRegister:
+ return;
+ case LateRegister:
+ case Register:
+ out.print("(", reg(), ")");
+ return;
+ case Stack:
+ out.print("(", offsetFromFP(), ")");
+ return;
+ case StackArgument:
+ out.print("(", offsetFromSP(), ")");
+ return;
+ case Constant:
+ out.print("(", value(), ")");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void ValueRep::emitRestore(AssemblyHelpers& jit, Reg reg) const
+{
+ if (reg.isGPR()) {
+ switch (kind()) {
+ case LateRegister:
+ case Register:
+ if (isGPR())
+ jit.move(gpr(), reg.gpr());
+ else
+ jit.moveDoubleTo64(fpr(), reg.gpr());
+ break;
+ case Stack:
+ jit.load64(AssemblyHelpers::Address(GPRInfo::callFrameRegister, offsetFromFP()), reg.gpr());
+ break;
+ case Constant:
+ jit.move(AssemblyHelpers::TrustedImm64(value()), reg.gpr());
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ return;
+ }
+
+ switch (kind()) {
+ case LateRegister:
+ case Register:
+ if (isGPR())
+ jit.move64ToDouble(gpr(), reg.fpr());
+ else
+ jit.moveDouble(fpr(), reg.fpr());
+ break;
+ case Stack:
+ jit.loadDouble(AssemblyHelpers::Address(GPRInfo::callFrameRegister, offsetFromFP()), reg.fpr());
+ break;
+ case Constant:
+ jit.move(AssemblyHelpers::TrustedImm64(value()), jit.scratchRegister());
+ jit.move64ToDouble(jit.scratchRegister(), reg.fpr());
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+}
+
+ValueRecovery ValueRep::recoveryForJSValue() const
+{
+ switch (kind()) {
+ case LateRegister:
+ case Register:
+ return ValueRecovery::inGPR(gpr(), DataFormatJS);
+ case Stack:
+ RELEASE_ASSERT(!(offsetFromFP() % sizeof(EncodedJSValue)));
+ return ValueRecovery::displacedInJSStack(
+ VirtualRegister(offsetFromFP() / sizeof(EncodedJSValue)),
+ DataFormatJS);
+ case Constant:
+ return ValueRecovery::constant(JSValue::decode(value()));
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return { };
+ }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, ValueRep::Kind kind)
+{
+ switch (kind) {
+ case ValueRep::WarmAny:
+ out.print("WarmAny");
+ return;
+ case ValueRep::ColdAny:
+ out.print("ColdAny");
+ return;
+ case ValueRep::LateColdAny:
+ out.print("LateColdAny");
+ return;
+ case ValueRep::SomeRegister:
+ out.print("SomeRegister");
+ return;
+ case ValueRep::SomeEarlyRegister:
+ out.print("SomeEarlyRegister");
+ return;
+ case ValueRep::Register:
+ out.print("Register");
+ return;
+ case ValueRep::LateRegister:
+ out.print("LateRegister");
+ return;
+ case ValueRep::Stack:
+ out.print("Stack");
+ return;
+ case ValueRep::StackArgument:
+ out.print("StackArgument");
+ return;
+ case ValueRep::Constant:
+ out.print("Constant");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3ValueRep.h b/Source/JavaScriptCore/b3/B3ValueRep.h
new file mode 100644
index 000000000..5f9635e7a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3ValueRep.h
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "JSCJSValue.h"
+#include "Reg.h"
+#include "RegisterSet.h"
+#include "ValueRecovery.h"
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+class AssemblyHelpers;
+
+namespace B3 {
+
+// We use this class to describe value representations at stackmaps. It's used both to force a
+// representation and to get the representation. When the B3 client forces a representation, we say
+// that it's an input. When B3 tells the client what representation it picked, we say that it's an
+// output.
+
+class ValueRep {
+public:
+ enum Kind {
+ // As an input representation, this means that B3 can pick any representation. As an output
+ // representation, this means that we don't know. This will only arise as an output
+ // representation for the active arguments of Check/CheckAdd/CheckSub/CheckMul.
+ WarmAny,
+
+ // Same as WarmAny, but implies that the use is cold. A cold use is not counted as a use for
+ // computing the priority of the used temporary.
+ ColdAny,
+
+ // Same as ColdAny, but also implies that the use occurs after all other effects of the stackmap
+ // value.
+ LateColdAny,
+
+ // As an input representation, this means that B3 should pick some register. It could be a
+ // register that this claims to clobber!
+ SomeRegister,
+
+ // As an input representation, this tells us that B3 should pick some register, but implies
+ // that the def happens before any of the effects of the stackmap. This is only valid for
+ // the result constraint of a Patchpoint.
+ SomeEarlyRegister,
+
+ // As an input representation, this forces a particular register. As an output
+ // representation, this tells us what register B3 picked.
+ Register,
+
+ // As an input representation, this forces a particular register and states that
+ // the register is used late. This means that the register is used after the result
+ // is defined (i.e, the result will interfere with this as an input).
+ // It's not a valid output representation.
+ LateRegister,
+
+ // As an output representation, this tells us what stack slot B3 picked. It's not a valid
+ // input representation.
+ Stack,
+
+ // As an input representation, this forces the value to end up in the argument area at some
+ // offset.
+ StackArgument,
+
+ // As an output representation, this tells us that B3 constant-folded the value.
+ Constant
+ };
+
+ ValueRep()
+ : m_kind(WarmAny)
+ {
+ }
+
+ explicit ValueRep(Reg reg)
+ : m_kind(Register)
+ {
+ u.reg = reg;
+ }
+
+ ValueRep(Kind kind)
+ : m_kind(kind)
+ {
+ ASSERT(kind == WarmAny || kind == ColdAny || kind == LateColdAny || kind == SomeRegister || kind == SomeEarlyRegister);
+ }
+
+ static ValueRep reg(Reg reg)
+ {
+ return ValueRep(reg);
+ }
+
+ static ValueRep lateReg(Reg reg)
+ {
+ ValueRep result(reg);
+ result.m_kind = LateRegister;
+ return result;
+ }
+
+ static ValueRep stack(intptr_t offsetFromFP)
+ {
+ ValueRep result;
+ result.m_kind = Stack;
+ result.u.offsetFromFP = offsetFromFP;
+ return result;
+ }
+
+ static ValueRep stackArgument(intptr_t offsetFromSP)
+ {
+ ValueRep result;
+ result.m_kind = StackArgument;
+ result.u.offsetFromSP = offsetFromSP;
+ return result;
+ }
+
+ static ValueRep constant(int64_t value)
+ {
+ ValueRep result;
+ result.m_kind = Constant;
+ result.u.value = value;
+ return result;
+ }
+
+ static ValueRep constantDouble(double value)
+ {
+ return ValueRep::constant(bitwise_cast<int64_t>(value));
+ }
+
+ Kind kind() const { return m_kind; }
+
+ bool operator==(const ValueRep& other) const
+ {
+ if (kind() != other.kind())
+ return false;
+ switch (kind()) {
+ case LateRegister:
+ case Register:
+ return u.reg == other.u.reg;
+ case Stack:
+ return u.offsetFromFP == other.u.offsetFromFP;
+ case StackArgument:
+ return u.offsetFromSP == other.u.offsetFromSP;
+ case Constant:
+ return u.value == other.u.value;
+ default:
+ return true;
+ }
+ }
+
+ bool operator!=(const ValueRep& other) const
+ {
+ return !(*this == other);
+ }
+
+ explicit operator bool() const { return kind() != WarmAny; }
+
+ bool isAny() const { return kind() == WarmAny || kind() == ColdAny || kind() == LateColdAny; }
+
+ bool isReg() const { return kind() == Register || kind() == LateRegister; }
+
+ Reg reg() const
+ {
+ ASSERT(isReg());
+ return u.reg;
+ }
+
+ bool isGPR() const { return isReg() && reg().isGPR(); }
+ bool isFPR() const { return isReg() && reg().isFPR(); }
+
+ GPRReg gpr() const { return reg().gpr(); }
+ FPRReg fpr() const { return reg().fpr(); }
+
+ bool isStack() const { return kind() == Stack; }
+
+ intptr_t offsetFromFP() const
+ {
+ ASSERT(isStack());
+ return u.offsetFromFP;
+ }
+
+ bool isStackArgument() const { return kind() == StackArgument; }
+
+ intptr_t offsetFromSP() const
+ {
+ ASSERT(isStackArgument());
+ return u.offsetFromSP;
+ }
+
+ bool isConstant() const { return kind() == Constant; }
+
+ int64_t value() const
+ {
+ ASSERT(isConstant());
+ return u.value;
+ }
+
+ double doubleValue() const
+ {
+ return bitwise_cast<double>(value());
+ }
+
+ ValueRep withOffset(intptr_t offset) const
+ {
+ switch (kind()) {
+ case Stack:
+ return stack(offsetFromFP() + offset);
+ case StackArgument:
+ return stackArgument(offsetFromSP() + offset);
+ default:
+ return *this;
+ }
+ }
+
+ void addUsedRegistersTo(RegisterSet&) const;
+
+ RegisterSet usedRegisters() const;
+
+ // Get the used registers for a vector of ValueReps.
+ template<typename VectorType>
+ static RegisterSet usedRegisters(const VectorType& vector)
+ {
+ RegisterSet result;
+ for (const ValueRep& value : vector)
+ value.addUsedRegistersTo(result);
+ return result;
+ }
+
+ JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+ // This has a simple contract: it emits code to restore the value into the given register. This
+ // will work even if it requires moving between bits a GPR and a FPR.
+ void emitRestore(AssemblyHelpers&, Reg) const;
+
+ // Computes the ValueRecovery assuming that the Value* was for a JSValue (i.e. Int64).
+ // NOTE: We should avoid putting JSValue-related methods in B3, but this was hard to avoid
+ // because some parts of JSC use ValueRecovery like a general "where my bits at" object, almost
+ // exactly like ValueRep.
+ ValueRecovery recoveryForJSValue() const;
+
+private:
+ Kind m_kind;
+ union U {
+ Reg reg;
+ intptr_t offsetFromFP;
+ intptr_t offsetFromSP;
+ int64_t value;
+
+ U()
+ {
+ memset(this, 0, sizeof(*this));
+ }
+ } u;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::B3::ValueRep::Kind);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3Variable.cpp b/Source/JavaScriptCore/b3/B3Variable.cpp
new file mode 100644
index 000000000..2314ee2dd
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Variable.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3Variable.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+Variable::~Variable()
+{
+}
+
+void Variable::dump(PrintStream& out) const
+{
+ out.print("var", m_index);
+}
+
+void Variable::deepDump(PrintStream& out) const
+{
+ out.print(m_type, " var", m_index);
+}
+
+Variable::Variable(Type type)
+ : m_type(type)
+{
+ ASSERT(type != Void);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3Variable.h b/Source/JavaScriptCore/b3/B3Variable.h
new file mode 100644
index 000000000..f4d610ff7
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3Variable.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3SparseCollection.h"
+#include "B3Type.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class Variable {
+ WTF_MAKE_NONCOPYABLE(Variable);
+ WTF_MAKE_FAST_ALLOCATED;
+
+public:
+ ~Variable();
+
+ Type type() const { return m_type; }
+ unsigned index() const { return m_index; }
+
+ void dump(PrintStream&) const;
+ void deepDump(PrintStream&) const;
+
+private:
+ friend class Procedure;
+ friend class SparseCollection<Variable>;
+
+ Variable(Type);
+
+ unsigned m_index;
+ Type m_type;
+};
+
+class DeepVariableDump {
+public:
+ DeepVariableDump(const Variable* variable)
+ : m_variable(variable)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_variable)
+ m_variable->deepDump(out);
+ else
+ out.print("<null>");
+ }
+
+private:
+ const Variable* m_variable;
+};
+
+inline DeepVariableDump deepDump(const Variable* variable)
+{
+ return DeepVariableDump(variable);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3VariableValue.cpp b/Source/JavaScriptCore/b3/B3VariableValue.cpp
new file mode 100644
index 000000000..6aeef479b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3VariableValue.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3VariableValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Variable.h"
+
+namespace JSC { namespace B3 {
+
+VariableValue::~VariableValue()
+{
+}
+
+void VariableValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(comma, pointerDump(m_variable));
+}
+
+Value* VariableValue::cloneImpl() const
+{
+ return new VariableValue(*this);
+}
+
+VariableValue::VariableValue(Kind kind, Origin origin, Variable* variable, Value* value)
+ : Value(CheckedOpcode, kind, Void, origin, value)
+ , m_variable(variable)
+{
+ ASSERT(kind == Set);
+}
+
+VariableValue::VariableValue(Kind kind, Origin origin, Variable* variable)
+ : Value(CheckedOpcode, kind, variable->type(), origin)
+ , m_variable(variable)
+{
+ ASSERT(kind == Get);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3VariableValue.h b/Source/JavaScriptCore/b3/B3VariableValue.h
new file mode 100644
index 000000000..067ba42c6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3VariableValue.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class Variable;
+
+class JS_EXPORT_PRIVATE VariableValue : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == Get || kind == Set; }
+
+ ~VariableValue();
+
+ Variable* variable() const { return m_variable; }
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ // Use this for Set.
+ VariableValue(Kind, Origin, Variable*, Value*);
+
+ // Use this for Get.
+ VariableValue(Kind, Origin, Variable*);
+
+ Variable* m_variable;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3WasmAddressValue.cpp b/Source/JavaScriptCore/b3/B3WasmAddressValue.cpp
new file mode 100644
index 000000000..57d762852
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3WasmAddressValue.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3WasmAddressValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+WasmAddressValue::~WasmAddressValue()
+{
+}
+
+void WasmAddressValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(comma, m_pinnedGPR);
+}
+
+Value* WasmAddressValue::cloneImpl() const
+{
+ return new WasmAddressValue(*this);
+}
+
+WasmAddressValue::WasmAddressValue(Origin origin, Value* value, GPRReg pinnedGPR)
+ : Value(CheckedOpcode, WasmAddress, Int64, origin, value)
+ , m_pinnedGPR(pinnedGPR)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/B3WasmAddressValue.h b/Source/JavaScriptCore/b3/B3WasmAddressValue.h
new file mode 100644
index 000000000..d93860275
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3WasmAddressValue.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE WasmAddressValue : public Value {
+public:
+ static bool accepts(Kind kind) { return kind == WasmAddress; }
+
+ ~WasmAddressValue();
+
+ GPRReg pinnedGPR() const { return m_pinnedGPR; }
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ WasmAddressValue(Origin, Value*, GPRReg);
+
+ GPRReg m_pinnedGPR;
+};
+
+} } // namespace JSC::B3
+
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.cpp b/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.cpp
new file mode 100644
index 000000000..b3a3290dc
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3WasmBoundsCheckValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+WasmBoundsCheckValue::~WasmBoundsCheckValue()
+{
+}
+
+WasmBoundsCheckValue::WasmBoundsCheckValue(Origin origin, Value* ptr, GPRReg pinnedGPR, unsigned offset)
+ : Value(CheckedOpcode, WasmBoundsCheck, origin, ptr)
+ , m_pinnedGPR(pinnedGPR)
+ , m_offset(offset)
+{
+}
+
+Value* WasmBoundsCheckValue::cloneImpl() const
+{
+ return new WasmBoundsCheckValue(*this);
+}
+
+void WasmBoundsCheckValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+ out.print(comma, "sizeRegister = ", m_pinnedGPR, ", offset = ", m_offset);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.h b/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.h
new file mode 100644
index 000000000..ccc54b86b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include "CCallHelpers.h"
+
+namespace JSC { namespace B3 {
+
+class WasmBoundsCheckValue : public Value {
+public:
+ static bool accepts(Kind kind)
+ {
+ switch (kind.opcode()) {
+ case WasmBoundsCheck:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ ~WasmBoundsCheckValue();
+
+ GPRReg pinnedGPR() const { return m_pinnedGPR; }
+ unsigned offset() const { return m_offset; }
+
+protected:
+ void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+ Value* cloneImpl() const override;
+
+private:
+ friend class Procedure;
+
+ JS_EXPORT_PRIVATE WasmBoundsCheckValue(Origin, Value* ptr, GPRReg pinnedGPR, unsigned offset);
+
+ GPRReg m_pinnedGPR;
+ unsigned m_offset;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirAllocateStack.cpp b/Source/JavaScriptCore/b3/air/AirAllocateStack.cpp
new file mode 100644
index 000000000..de9297f26
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirAllocateStack.cpp
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirAllocateStack.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPhaseScope.h"
+#include "StackAlignment.h"
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+const bool verbose = false;
+
+bool attemptAssignment(
+ StackSlot* slot, intptr_t offsetFromFP, const Vector<StackSlot*>& otherSlots)
+{
+ if (verbose)
+ dataLog("Attempting to assign ", pointerDump(slot), " to ", offsetFromFP, " with interference ", pointerListDump(otherSlots), "\n");
+
+ // Need to align it to the slot's desired alignment.
+ offsetFromFP = -WTF::roundUpToMultipleOf(slot->alignment(), -offsetFromFP);
+
+ for (StackSlot* otherSlot : otherSlots) {
+ if (!otherSlot->offsetFromFP())
+ continue;
+ bool overlap = WTF::rangesOverlap(
+ offsetFromFP,
+ offsetFromFP + static_cast<intptr_t>(slot->byteSize()),
+ otherSlot->offsetFromFP(),
+ otherSlot->offsetFromFP() + static_cast<intptr_t>(otherSlot->byteSize()));
+ if (overlap)
+ return false;
+ }
+
+ if (verbose)
+ dataLog("Assigned ", pointerDump(slot), " to ", offsetFromFP, "\n");
+ slot->setOffsetFromFP(offsetFromFP);
+ return true;
+}
+
+void assign(StackSlot* slot, const Vector<StackSlot*>& otherSlots)
+{
+ if (verbose)
+ dataLog("Attempting to assign ", pointerDump(slot), " with interference ", pointerListDump(otherSlots), "\n");
+
+ if (attemptAssignment(slot, -static_cast<intptr_t>(slot->byteSize()), otherSlots))
+ return;
+
+ for (StackSlot* otherSlot : otherSlots) {
+ if (!otherSlot->offsetFromFP())
+ continue;
+ bool didAssign = attemptAssignment(
+ slot, otherSlot->offsetFromFP() - static_cast<intptr_t>(slot->byteSize()), otherSlots);
+ if (didAssign)
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // anonymous namespace
+
+void allocateStack(Code& code)
+{
+ PhaseScope phaseScope(code, "allocateStack");
+
+ // Allocate all of the escaped slots in order. This is kind of a crazy algorithm to allow for
+ // the possibility of stack slots being assigned frame offsets before we even get here.
+ ASSERT(!code.frameSize());
+ Vector<StackSlot*> assignedEscapedStackSlots;
+ Vector<StackSlot*> escapedStackSlotsWorklist;
+ for (StackSlot* slot : code.stackSlots()) {
+ if (slot->isLocked()) {
+ if (slot->offsetFromFP())
+ assignedEscapedStackSlots.append(slot);
+ else
+ escapedStackSlotsWorklist.append(slot);
+ } else {
+ // It would be super strange to have an unlocked stack slot that has an offset already.
+ ASSERT(!slot->offsetFromFP());
+ }
+ }
+ // This is a fairly expensive loop, but it's OK because we'll usually only have a handful of
+ // escaped stack slots.
+ while (!escapedStackSlotsWorklist.isEmpty()) {
+ StackSlot* slot = escapedStackSlotsWorklist.takeLast();
+ assign(slot, assignedEscapedStackSlots);
+ assignedEscapedStackSlots.append(slot);
+ }
+
+ // Now we handle the spill slots.
+ StackSlotLiveness liveness(code);
+ IndexMap<StackSlot, HashSet<StackSlot*>> interference(code.stackSlots().size());
+ Vector<StackSlot*> slots;
+
+ for (BasicBlock* block : code) {
+ StackSlotLiveness::LocalCalc localCalc(liveness, block);
+
+ auto interfere = [&] (unsigned instIndex) {
+ if (verbose)
+ dataLog("Interfering: ", WTF::pointerListDump(localCalc.live()), "\n");
+
+ Inst::forEachDef<Arg>(
+ block->get(instIndex), block->get(instIndex + 1),
+ [&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+ if (!arg.isStack())
+ return;
+ StackSlot* slot = arg.stackSlot();
+ if (slot->kind() != StackSlotKind::Spill)
+ return;
+
+ for (StackSlot* otherSlot : localCalc.live()) {
+ interference[slot].add(otherSlot);
+ interference[otherSlot].add(slot);
+ }
+ });
+ };
+
+ for (unsigned instIndex = block->size(); instIndex--;) {
+ if (verbose)
+ dataLog("Analyzing: ", block->at(instIndex), "\n");
+
+ // Kill dead stores. For simplicity we say that a store is killable if it has only late
+ // defs and those late defs are to things that are dead right now. We only do that
+ // because that's the only kind of dead stack store we will see here.
+ Inst& inst = block->at(instIndex);
+ if (!inst.hasNonArgEffects()) {
+ bool ok = true;
+ inst.forEachArg(
+ [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
+ if (Arg::isEarlyDef(role)) {
+ ok = false;
+ return;
+ }
+ if (!Arg::isLateDef(role))
+ return;
+ if (!arg.isStack()) {
+ ok = false;
+ return;
+ }
+ StackSlot* slot = arg.stackSlot();
+ if (slot->kind() != StackSlotKind::Spill) {
+ ok = false;
+ return;
+ }
+
+ if (localCalc.isLive(slot)) {
+ ok = false;
+ return;
+ }
+ });
+ if (ok)
+ inst = Inst();
+ }
+
+ interfere(instIndex);
+ localCalc.execute(instIndex);
+ }
+ interfere(-1);
+
+ block->insts().removeAllMatching(
+ [&] (const Inst& inst) -> bool {
+ return !inst;
+ });
+ }
+
+ if (verbose) {
+ for (StackSlot* slot : code.stackSlots())
+ dataLog("Interference of ", pointerDump(slot), ": ", pointerListDump(interference[slot]), "\n");
+ }
+
+ // Now we assign stack locations. At its heart this algorithm is just first-fit. For each
+ // StackSlot we just want to find the offsetFromFP that is closest to zero while ensuring no
+ // overlap with other StackSlots that this overlaps with.
+ Vector<StackSlot*> otherSlots = assignedEscapedStackSlots;
+ for (StackSlot* slot : code.stackSlots()) {
+ if (slot->offsetFromFP()) {
+ // Already assigned an offset.
+ continue;
+ }
+
+ HashSet<StackSlot*>& interferingSlots = interference[slot];
+ otherSlots.resize(assignedEscapedStackSlots.size());
+ otherSlots.resize(assignedEscapedStackSlots.size() + interferingSlots.size());
+ unsigned nextIndex = assignedEscapedStackSlots.size();
+ for (StackSlot* otherSlot : interferingSlots)
+ otherSlots[nextIndex++] = otherSlot;
+
+ assign(slot, otherSlots);
+ }
+
+ // Figure out how much stack we're using for stack slots.
+ unsigned frameSizeForStackSlots = 0;
+ for (StackSlot* slot : code.stackSlots()) {
+ frameSizeForStackSlots = std::max(
+ frameSizeForStackSlots,
+ static_cast<unsigned>(-slot->offsetFromFP()));
+ }
+
+ frameSizeForStackSlots = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSizeForStackSlots);
+
+ // Now we need to deduce how much argument area we need.
+ for (BasicBlock* block : code) {
+ for (Inst& inst : *block) {
+ for (Arg& arg : inst.args) {
+ if (arg.isCallArg()) {
+ // For now, we assume that we use 8 bytes of the call arg. But that's not
+ // such an awesome assumption.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150454
+ ASSERT(arg.offset() >= 0);
+ code.requestCallArgAreaSizeInBytes(arg.offset() + 8);
+ }
+ }
+ }
+ }
+
+ code.setFrameSize(frameSizeForStackSlots + code.callArgAreaSizeInBytes());
+
+ // Finally, transform the code to use Addr's instead of StackSlot's. This is a lossless
+ // transformation since we can search the StackSlots array to figure out which StackSlot any
+ // offset-from-FP refers to.
+
+ // FIXME: This may produce addresses that aren't valid if we end up with a ginormous stack frame.
+ // We would have to scavenge for temporaries if this happened. Fortunately, this case will be
+ // extremely rare so we can do crazy things when it arises.
+ // https://bugs.webkit.org/show_bug.cgi?id=152530
+
+ InsertionSet insertionSet(code);
+ for (BasicBlock* block : code) {
+ for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+ Inst& inst = block->at(instIndex);
+ inst.forEachArg(
+ [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width width) {
+ auto stackAddr = [&] (int32_t offset) -> Arg {
+ return Arg::stackAddr(offset, code.frameSize(), width);
+ };
+
+ switch (arg.kind()) {
+ case Arg::Stack: {
+ StackSlot* slot = arg.stackSlot();
+ if (Arg::isZDef(role)
+ && slot->kind() == StackSlotKind::Spill
+ && slot->byteSize() > Arg::bytes(width)) {
+ // Currently we only handle this simple case because it's the only one
+ // that arises: ZDef's are only 32-bit right now. So, when we hit these
+ // assertions it means that we need to implement those other kinds of
+ // zero fills.
+ RELEASE_ASSERT(slot->byteSize() == 8);
+ RELEASE_ASSERT(width == Arg::Width32);
+
+ RELEASE_ASSERT(isValidForm(StoreZero32, Arg::Stack));
+ insertionSet.insert(
+ instIndex + 1, StoreZero32, inst.origin,
+ stackAddr(arg.offset() + 4 + slot->offsetFromFP()));
+ }
+ arg = stackAddr(arg.offset() + slot->offsetFromFP());
+ break;
+ }
+ case Arg::CallArg:
+ arg = stackAddr(arg.offset() - code.frameSize());
+ break;
+ default:
+ break;
+ }
+ }
+ );
+ }
+ insertionSet.execute(block);
+ }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/Source/JavaScriptCore/b3/air/AirAllocateStack.h b/Source/JavaScriptCore/b3/air/AirAllocateStack.h
new file mode 100644
index 000000000..31519d246
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirAllocateStack.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This allocates StackSlots to places on the stack. It first allocates the pinned ones in index
+// order and then it allocates the rest using first fit. Takes the opportunity to kill dead
+// assignments to stack slots, since it knows which ones are live. Also fixes ZDefs to anonymous
+// stack slots.
+
+void allocateStack(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirArg.cpp b/Source/JavaScriptCore/b3/air/AirArg.cpp
new file mode 100644
index 000000000..c777928b7
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirArg.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirArg.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirSpecial.h"
+#include "AirStackSlot.h"
+#include "B3Value.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool Arg::isStackMemory() const
+{
+ switch (kind()) {
+ case Addr:
+ return base() == Air::Tmp(GPRInfo::callFrameRegister)
+ || base() == Air::Tmp(MacroAssembler::stackPointerRegister);
+ case Stack:
+ case CallArg:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool Arg::isRepresentableAs(Width width, Signedness signedness) const
+{
+ return isRepresentableAs(width, signedness, value());
+}
+
+bool Arg::usesTmp(Air::Tmp tmp) const
+{
+ bool uses = false;
+ const_cast<Arg*>(this)->forEachTmpFast(
+ [&] (Air::Tmp otherTmp) {
+ if (otherTmp == tmp)
+ uses = true;
+ });
+ return uses;
+}
+
+bool Arg::canRepresent(Value* value) const
+{
+ return isType(typeForB3Type(value->type()));
+}
+
+bool Arg::isCompatibleType(const Arg& other) const
+{
+ if (hasType())
+ return other.isType(type());
+ if (other.hasType())
+ return isType(other.type());
+ return true;
+}
+
+unsigned Arg::jsHash() const
+{
+ unsigned result = static_cast<unsigned>(m_kind);
+
+ switch (m_kind) {
+ case Invalid:
+ case Special:
+ break;
+ case Tmp:
+ result += m_base.internalValue();
+ break;
+ case Imm:
+ case BitImm:
+ case CallArg:
+ case RelCond:
+ case ResCond:
+ case DoubleCond:
+ case WidthArg:
+ result += static_cast<unsigned>(m_offset);
+ break;
+ case BigImm:
+ case BitImm64:
+ result += static_cast<unsigned>(m_offset);
+ result += static_cast<unsigned>(m_offset >> 32);
+ break;
+ case Addr:
+ result += m_offset;
+ result += m_base.internalValue();
+ break;
+ case Index:
+ result += static_cast<unsigned>(m_offset);
+ result += m_scale;
+ result += m_base.internalValue();
+ result += m_index.internalValue();
+ break;
+ case Stack:
+ result += static_cast<unsigned>(m_scale);
+ result += stackSlot()->index();
+ break;
+ }
+
+ return result;
+}
+
+void Arg::dump(PrintStream& out) const
+{
+ switch (m_kind) {
+ case Invalid:
+ out.print("<invalid>");
+ return;
+ case Tmp:
+ out.print(tmp());
+ return;
+ case Imm:
+ out.print("$", m_offset);
+ return;
+ case BigImm:
+ out.printf("$0x%llx", static_cast<long long unsigned>(m_offset));
+ return;
+ case BitImm:
+ out.print("$", m_offset);
+ return;
+ case BitImm64:
+ out.printf("$0x%llx", static_cast<long long unsigned>(m_offset));
+ return;
+ case Addr:
+ if (offset())
+ out.print(offset());
+ out.print("(", base(), ")");
+ return;
+ case Index:
+ if (offset())
+ out.print(offset());
+ out.print("(", base(), ",", index());
+ if (scale() != 1)
+ out.print(",", scale());
+ out.print(")");
+ return;
+ case Stack:
+ if (offset())
+ out.print(offset());
+ out.print("(", pointerDump(stackSlot()), ")");
+ return;
+ case CallArg:
+ if (offset())
+ out.print(offset());
+ out.print("(callArg)");
+ return;
+ case RelCond:
+ out.print(asRelationalCondition());
+ return;
+ case ResCond:
+ out.print(asResultCondition());
+ return;
+ case DoubleCond:
+ out.print(asDoubleCondition());
+ return;
+ case Special:
+ out.print(pointerDump(special()));
+ return;
+ case WidthArg:
+ out.print(width());
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+using namespace JSC::B3::Air;
+
+void printInternal(PrintStream& out, Arg::Kind kind)
+{
+ switch (kind) {
+ case Arg::Invalid:
+ out.print("Invalid");
+ return;
+ case Arg::Tmp:
+ out.print("Tmp");
+ return;
+ case Arg::Imm:
+ out.print("Imm");
+ return;
+ case Arg::BigImm:
+ out.print("BigImm");
+ return;
+ case Arg::BitImm:
+ out.print("BitImm");
+ return;
+ case Arg::BitImm64:
+ out.print("BitImm64");
+ return;
+ case Arg::Addr:
+ out.print("Addr");
+ return;
+ case Arg::Stack:
+ out.print("Stack");
+ return;
+ case Arg::CallArg:
+ out.print("CallArg");
+ return;
+ case Arg::Index:
+ out.print("Index");
+ return;
+ case Arg::RelCond:
+ out.print("RelCond");
+ return;
+ case Arg::ResCond:
+ out.print("ResCond");
+ return;
+ case Arg::DoubleCond:
+ out.print("DoubleCond");
+ return;
+ case Arg::Special:
+ out.print("Special");
+ return;
+ case Arg::WidthArg:
+ out.print("WidthArg");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Role role)
+{
+ switch (role) {
+ case Arg::Use:
+ out.print("Use");
+ return;
+ case Arg::Def:
+ out.print("Def");
+ return;
+ case Arg::UseDef:
+ out.print("UseDef");
+ return;
+ case Arg::ZDef:
+ out.print("ZDef");
+ return;
+ case Arg::UseZDef:
+ out.print("UseZDef");
+ return;
+ case Arg::UseAddr:
+ out.print("UseAddr");
+ return;
+ case Arg::ColdUse:
+ out.print("ColdUse");
+ return;
+ case Arg::LateUse:
+ out.print("LateUse");
+ return;
+ case Arg::LateColdUse:
+ out.print("LateColdUse");
+ return;
+ case Arg::EarlyDef:
+ out.print("EarlyDef");
+ return;
+ case Arg::Scratch:
+ out.print("Scratch");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Type type)
+{
+ switch (type) {
+ case Arg::GP:
+ out.print("GP");
+ return;
+ case Arg::FP:
+ out.print("FP");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Width width)
+{
+ switch (width) {
+ case Arg::Width8:
+ out.print("8");
+ return;
+ case Arg::Width16:
+ out.print("16");
+ return;
+ case Arg::Width32:
+ out.print("32");
+ return;
+ case Arg::Width64:
+ out.print("64");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Signedness signedness)
+{
+ switch (signedness) {
+ case Arg::Signed:
+ out.print("Signed");
+ return;
+ case Arg::Unsigned:
+ out.print("Unsigned");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirArg.h b/Source/JavaScriptCore/b3/air/AirArg.h
new file mode 100644
index 000000000..13db1ce7e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirArg.h
@@ -0,0 +1,1383 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirTmp.h"
+#include "B3Common.h"
+#include "B3Type.h"
+#include <wtf/Optional.h>
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+class Value;
+
+namespace Air {
+
+class Special;
+class StackSlot;
+
+// This class name is also intentionally terse because we will say it a lot. You'll see code like
+// Inst(..., Arg::imm(5), Arg::addr(thing, blah), ...)
+class Arg {
+public:
+ // These enum members are intentionally terse because we have to mention them a lot.
+ enum Kind : int8_t {
+ Invalid,
+
+ // This is either an unassigned temporary or a register. All unassigned temporaries
+ // eventually become registers.
+ Tmp,
+
+ // This is an immediate that the instruction will materialize. Imm is the immediate that can be
+ // inlined into most instructions, while BigImm indicates a constant materialization and is
+ // usually only usable with Move. Specials may also admit it, for example for stackmaps used for
+ // OSR exit and tail calls.
+ // BitImm is an immediate for Bitwise operation (And, Xor, etc).
+ Imm,
+ BigImm,
+ BitImm,
+ BitImm64,
+
+ // These are the addresses. Instructions may load from (Use), store to (Def), or evaluate
+ // (UseAddr) addresses.
+ Addr,
+ Stack,
+ CallArg,
+ Index,
+
+ // Immediate operands that customize the behavior of an operation. You can think of them as
+ // secondary opcodes. They are always "Use"'d.
+ RelCond,
+ ResCond,
+ DoubleCond,
+ Special,
+ WidthArg
+ };
+
+ enum Role : int8_t {
+ // Use means that the Inst will read from this value before doing anything else.
+ //
+ // For Tmp: The Inst will read this Tmp.
+ // For Arg::addr and friends: The Inst will load from this address.
+ // For Arg::imm and friends: The Inst will materialize and use this immediate.
+ // For RelCond/ResCond/Special: This is the only valid role for these kinds.
+ //
+ // Note that Use of an address does not mean escape. It only means that the instruction will
+ // load from the address before doing anything else. This is a bit tricky; for example
+ // Specials could theoretically squirrel away the address and effectively escape it. However,
+ // this is not legal. On the other hand, any address other than Stack is presumed to be
+ // always escaping, and Stack is presumed to be always escaping if it's Locked.
+ Use,
+
+ // Exactly like Use, except that it also implies that the use is cold: that is, replacing the
+ // use with something on the stack is free.
+ ColdUse,
+
+ // LateUse means that the Inst will read from this value after doing its Def's. Note that LateUse
+ // on an Addr or Index still means Use on the internal temporaries. Note that specifying the
+ // same Tmp once as Def and once as LateUse has undefined behavior: the use may happen before
+ // the def, or it may happen after it.
+ LateUse,
+
+ // Combination of LateUse and ColdUse.
+ LateColdUse,
+
+ // Def means that the Inst will write to this value after doing everything else.
+ //
+ // For Tmp: The Inst will write to this Tmp.
+ // For Arg::addr and friends: The Inst will store to this address.
+ // This isn't valid for any other kinds.
+ //
+ // Like Use of address, Def of address does not mean escape.
+ Def,
+
+ // This is a special variant of Def that implies that the upper bits of the target register are
+ // zero-filled. Specifically, if the Width of a ZDef is less than the largest possible width of
+ // the argument (for example, we're on a 64-bit machine and we have a Width32 ZDef of a GPR) then
+ // this has different implications for the upper bits (i.e. the top 32 bits in our example)
+ // depending on the kind of the argument:
+ //
+ // For register: the upper bits are zero-filled.
+ // For anonymous stack slot: the upper bits are zero-filled.
+ // For address: the upper bits are not touched (i.e. we do a 32-bit store in our example).
+ // For tmp: either the upper bits are not touched or they are zero-filled, and we won't know
+ // which until we lower the tmp to either a StackSlot or a Reg.
+ //
+ // The behavior of ZDef is consistent with what happens when you perform 32-bit operations on a
+ // 64-bit GPR. It's not consistent with what happens with 8-bit or 16-bit Defs on x86 GPRs, or
+ // what happens with float Defs in ARM NEON or X86 SSE. Hence why we have both Def and ZDef.
+ ZDef,
+
+ // This is a combined Use and Def. It means that both things happen.
+ UseDef,
+
+ // This is a combined Use and ZDef. It means that both things happen.
+ UseZDef,
+
+ // This is like Def, but implies that the assignment occurs before the start of the Inst's
+ // execution rather than after. Note that specifying the same Tmp once as EarlyDef and once
+ // as Use has undefined behavior: the use may happen before the def, or it may happen after
+ // it.
+ EarlyDef,
+
+ // Some instructions need a scratch register. We model this by saying that the temporary is
+ // defined early and used late. This role implies that.
+ Scratch,
+
+ // This is a special kind of use that is only valid for addresses. It means that the
+ // instruction will evaluate the address expression and consume the effective address, but it
+ // will neither load nor store. This is an escaping use, because now the address may be
+ // passed along to who-knows-where. Note that this isn't really a Use of the Arg, but it does
+ // imply that we're Use'ing any registers that the Arg contains.
+ UseAddr
+ };
+
+ enum Type : int8_t {
+ GP,
+ FP
+ };
+
+ static const unsigned numTypes = 2;
+
+ template<typename Functor>
+ static void forEachType(const Functor& functor)
+ {
+ functor(GP);
+ functor(FP);
+ }
+
+ enum Width : int8_t {
+ Width8,
+ Width16,
+ Width32,
+ Width64
+ };
+
+ static Width pointerWidth()
+ {
+ if (sizeof(void*) == 8)
+ return Width64;
+ return Width32;
+ }
+
+ enum Signedness : int8_t {
+ Signed,
+ Unsigned
+ };
+
+ // Returns true if the Role implies that the Inst will Use the Arg. It's deliberately false for
+ // UseAddr, since isAnyUse() for an Arg::addr means that we are loading from the address.
+ static bool isAnyUse(Role role)
+ {
+ switch (role) {
+ case Use:
+ case ColdUse:
+ case UseDef:
+ case UseZDef:
+ case LateUse:
+ case LateColdUse:
+ case Scratch:
+ return true;
+ case Def:
+ case ZDef:
+ case UseAddr:
+ case EarlyDef:
+ return false;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ static bool isColdUse(Role role)
+ {
+ switch (role) {
+ case ColdUse:
+ case LateColdUse:
+ return true;
+ case Use:
+ case UseDef:
+ case UseZDef:
+ case LateUse:
+ case Def:
+ case ZDef:
+ case UseAddr:
+ case Scratch:
+ case EarlyDef:
+ return false;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ static bool isWarmUse(Role role)
+ {
+ return isAnyUse(role) && !isColdUse(role);
+ }
+
+ static Role cooled(Role role)
+ {
+ switch (role) {
+ case ColdUse:
+ case LateColdUse:
+ case UseDef:
+ case UseZDef:
+ case Def:
+ case ZDef:
+ case UseAddr:
+ case Scratch:
+ case EarlyDef:
+ return role;
+ case Use:
+ return ColdUse;
+ case LateUse:
+ return LateColdUse;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ // Returns true if the Role implies that the Inst will Use the Arg before doing anything else.
+ static bool isEarlyUse(Role role)
+ {
+ switch (role) {
+ case Use:
+ case ColdUse:
+ case UseDef:
+ case UseZDef:
+ return true;
+ case Def:
+ case ZDef:
+ case UseAddr:
+ case LateUse:
+ case LateColdUse:
+ case Scratch:
+ case EarlyDef:
+ return false;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ // Returns true if the Role implies that the Inst will Use the Arg after doing everything else.
+ static bool isLateUse(Role role)
+ {
+ switch (role) {
+ case LateUse:
+ case LateColdUse:
+ case Scratch:
+ return true;
+ case ColdUse:
+ case Use:
+ case UseDef:
+ case UseZDef:
+ case Def:
+ case ZDef:
+ case UseAddr:
+ case EarlyDef:
+ return false;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ // Returns true if the Role implies that the Inst will Def the Arg.
+ static bool isAnyDef(Role role)
+ {
+ switch (role) {
+ case Use:
+ case ColdUse:
+ case UseAddr:
+ case LateUse:
+ case LateColdUse:
+ return false;
+ case Def:
+ case UseDef:
+ case ZDef:
+ case UseZDef:
+ case EarlyDef:
+ case Scratch:
+ return true;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ // Returns true if the Role implies that the Inst will Def the Arg before start of execution.
+ static bool isEarlyDef(Role role)
+ {
+ switch (role) {
+ case Use:
+ case ColdUse:
+ case UseAddr:
+ case LateUse:
+ case Def:
+ case UseDef:
+ case ZDef:
+ case UseZDef:
+ case LateColdUse:
+ return false;
+ case EarlyDef:
+ case Scratch:
+ return true;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ // Returns true if the Role implies that the Inst will Def the Arg after the end of execution.
+ static bool isLateDef(Role role)
+ {
+ switch (role) {
+ case Use:
+ case ColdUse:
+ case UseAddr:
+ case LateUse:
+ case EarlyDef:
+ case Scratch:
+ case LateColdUse:
+ return false;
+ case Def:
+ case UseDef:
+ case ZDef:
+ case UseZDef:
+ return true;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ // Returns true if the Role implies that the Inst will ZDef the Arg.
+ static bool isZDef(Role role)
+ {
+ switch (role) {
+ case Use:
+ case ColdUse:
+ case UseAddr:
+ case LateUse:
+ case Def:
+ case UseDef:
+ case EarlyDef:
+ case Scratch:
+ case LateColdUse:
+ return false;
+ case ZDef:
+ case UseZDef:
+ return true;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ static Type typeForB3Type(B3::Type type)
+ {
+ switch (type) {
+ case Void:
+ ASSERT_NOT_REACHED();
+ return GP;
+ case Int32:
+ case Int64:
+ return GP;
+ case Float:
+ case Double:
+ return FP;
+ }
+ ASSERT_NOT_REACHED();
+ return GP;
+ }
+
+ static Width widthForB3Type(B3::Type type)
+ {
+ switch (type) {
+ case Void:
+ ASSERT_NOT_REACHED();
+ return Width8;
+ case Int32:
+ case Float:
+ return Width32;
+ case Int64:
+ case Double:
+ return Width64;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ static Width conservativeWidth(Type type)
+ {
+ return type == GP ? pointerWidth() : Width64;
+ }
+
+ static Width minimumWidth(Type type)
+ {
+ return type == GP ? Width8 : Width32;
+ }
+
+ static unsigned bytes(Width width)
+ {
+ return 1 << width;
+ }
+
+ static Width widthForBytes(unsigned bytes)
+ {
+ switch (bytes) {
+ case 0:
+ case 1:
+ return Width8;
+ case 2:
+ return Width16;
+ case 3:
+ case 4:
+ return Width32;
+ default:
+ return Width64;
+ }
+ }
+
+ Arg()
+ : m_kind(Invalid)
+ {
+ }
+
+ Arg(Air::Tmp tmp)
+ : m_kind(Tmp)
+ , m_base(tmp)
+ {
+ }
+
+ Arg(Reg reg)
+ : Arg(Air::Tmp(reg))
+ {
+ }
+
+ static Arg imm(int64_t value)
+ {
+ Arg result;
+ result.m_kind = Imm;
+ result.m_offset = value;
+ return result;
+ }
+
+ static Arg bigImm(int64_t value)
+ {
+ Arg result;
+ result.m_kind = BigImm;
+ result.m_offset = value;
+ return result;
+ }
+
+ static Arg bitImm(int64_t value)
+ {
+ Arg result;
+ result.m_kind = BitImm;
+ result.m_offset = value;
+ return result;
+ }
+
+ static Arg bitImm64(int64_t value)
+ {
+ Arg result;
+ result.m_kind = BitImm64;
+ result.m_offset = value;
+ return result;
+ }
+
+ static Arg immPtr(const void* address)
+ {
+ return bigImm(bitwise_cast<intptr_t>(address));
+ }
+
+ static Arg addr(Air::Tmp base, int32_t offset = 0)
+ {
+ ASSERT(base.isGP());
+ Arg result;
+ result.m_kind = Addr;
+ result.m_base = base;
+ result.m_offset = offset;
+ return result;
+ }
+
+ static Arg stack(StackSlot* value, int32_t offset = 0)
+ {
+ Arg result;
+ result.m_kind = Stack;
+ result.m_offset = bitwise_cast<intptr_t>(value);
+ result.m_scale = offset; // I know, yuck.
+ return result;
+ }
+
+ static Arg callArg(int32_t offset)
+ {
+ Arg result;
+ result.m_kind = CallArg;
+ result.m_offset = offset;
+ return result;
+ }
+
+ static Arg stackAddr(int32_t offsetFromFP, unsigned frameSize, Width width)
+ {
+ Arg result = Arg::addr(Air::Tmp(GPRInfo::callFrameRegister), offsetFromFP);
+ if (!result.isValidForm(width)) {
+ result = Arg::addr(
+ Air::Tmp(MacroAssembler::stackPointerRegister),
+ offsetFromFP + frameSize);
+ }
+ return result;
+ }
+
+ // If you don't pass a Width, this optimistically assumes that you're using the right width.
+ static bool isValidScale(unsigned scale, std::optional<Width> width = std::nullopt)
+ {
+ switch (scale) {
+ case 1:
+ if (isX86() || isARM64())
+ return true;
+ return false;
+ case 2:
+ case 4:
+ case 8:
+ if (isX86())
+ return true;
+ if (isARM64()) {
+ if (!width)
+ return true;
+ return scale == 1 || scale == bytes(*width);
+ }
+ return false;
+ default:
+ return false;
+ }
+ }
+
+ static unsigned logScale(unsigned scale)
+ {
+ switch (scale) {
+ case 1:
+ return 0;
+ case 2:
+ return 1;
+ case 4:
+ return 2;
+ case 8:
+ return 3;
+ default:
+ ASSERT_NOT_REACHED();
+ return 0;
+ }
+ }
+
+ static Arg index(Air::Tmp base, Air::Tmp index, unsigned scale = 1, int32_t offset = 0)
+ {
+ ASSERT(base.isGP());
+ ASSERT(index.isGP());
+ ASSERT(isValidScale(scale));
+ Arg result;
+ result.m_kind = Index;
+ result.m_base = base;
+ result.m_index = index;
+ result.m_scale = static_cast<int32_t>(scale);
+ result.m_offset = offset;
+ return result;
+ }
+
+ static Arg relCond(MacroAssembler::RelationalCondition condition)
+ {
+ Arg result;
+ result.m_kind = RelCond;
+ result.m_offset = condition;
+ return result;
+ }
+
+ static Arg resCond(MacroAssembler::ResultCondition condition)
+ {
+ Arg result;
+ result.m_kind = ResCond;
+ result.m_offset = condition;
+ return result;
+ }
+
+ static Arg doubleCond(MacroAssembler::DoubleCondition condition)
+ {
+ Arg result;
+ result.m_kind = DoubleCond;
+ result.m_offset = condition;
+ return result;
+ }
+
+ static Arg special(Air::Special* special)
+ {
+ Arg result;
+ result.m_kind = Special;
+ result.m_offset = bitwise_cast<intptr_t>(special);
+ return result;
+ }
+
+ static Arg widthArg(Width width)
+ {
+ Arg result;
+ result.m_kind = WidthArg;
+ result.m_offset = width;
+ return result;
+ }
+
+ bool operator==(const Arg& other) const
+ {
+ return m_offset == other.m_offset
+ && m_kind == other.m_kind
+ && m_base == other.m_base
+ && m_index == other.m_index
+ && m_scale == other.m_scale;
+ }
+
+ bool operator!=(const Arg& other) const
+ {
+ return !(*this == other);
+ }
+
+ explicit operator bool() const { return *this != Arg(); }
+
+ Kind kind() const
+ {
+ return m_kind;
+ }
+
+ bool isTmp() const
+ {
+ return kind() == Tmp;
+ }
+
+ bool isImm() const
+ {
+ return kind() == Imm;
+ }
+
+ bool isBigImm() const
+ {
+ return kind() == BigImm;
+ }
+
+ bool isBitImm() const
+ {
+ return kind() == BitImm;
+ }
+
+ bool isBitImm64() const
+ {
+ return kind() == BitImm64;
+ }
+
+ bool isSomeImm() const
+ {
+ switch (kind()) {
+ case Imm:
+ case BigImm:
+ case BitImm:
+ case BitImm64:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isAddr() const
+ {
+ return kind() == Addr;
+ }
+
+ bool isStack() const
+ {
+ return kind() == Stack;
+ }
+
+ bool isCallArg() const
+ {
+ return kind() == CallArg;
+ }
+
+ bool isIndex() const
+ {
+ return kind() == Index;
+ }
+
+ bool isMemory() const
+ {
+ switch (kind()) {
+ case Addr:
+ case Stack:
+ case CallArg:
+ case Index:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isStackMemory() const;
+
+ bool isRelCond() const
+ {
+ return kind() == RelCond;
+ }
+
+ bool isResCond() const
+ {
+ return kind() == ResCond;
+ }
+
+ bool isDoubleCond() const
+ {
+ return kind() == DoubleCond;
+ }
+
+ bool isCondition() const
+ {
+ switch (kind()) {
+ case RelCond:
+ case ResCond:
+ case DoubleCond:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isSpecial() const
+ {
+ return kind() == Special;
+ }
+
+ bool isWidthArg() const
+ {
+ return kind() == WidthArg;
+ }
+
+ bool isAlive() const
+ {
+ return isTmp() || isStack();
+ }
+
+ Air::Tmp tmp() const
+ {
+ ASSERT(kind() == Tmp);
+ return m_base;
+ }
+
+ int64_t value() const
+ {
+ ASSERT(isSomeImm());
+ return m_offset;
+ }
+
+ template<typename T>
+ bool isRepresentableAs() const
+ {
+ return B3::isRepresentableAs<T>(value());
+ }
+
+ static bool isRepresentableAs(Width width, Signedness signedness, int64_t value)
+ {
+ switch (signedness) {
+ case Signed:
+ switch (width) {
+ case Width8:
+ return B3::isRepresentableAs<int8_t>(value);
+ case Width16:
+ return B3::isRepresentableAs<int16_t>(value);
+ case Width32:
+ return B3::isRepresentableAs<int32_t>(value);
+ case Width64:
+ return B3::isRepresentableAs<int64_t>(value);
+ }
+ case Unsigned:
+ switch (width) {
+ case Width8:
+ return B3::isRepresentableAs<uint8_t>(value);
+ case Width16:
+ return B3::isRepresentableAs<uint16_t>(value);
+ case Width32:
+ return B3::isRepresentableAs<uint32_t>(value);
+ case Width64:
+ return B3::isRepresentableAs<uint64_t>(value);
+ }
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ bool isRepresentableAs(Width, Signedness) const;
+
+ static int64_t castToType(Width width, Signedness signedness, int64_t value)
+ {
+ switch (signedness) {
+ case Signed:
+ switch (width) {
+ case Width8:
+ return static_cast<int8_t>(value);
+ case Width16:
+ return static_cast<int16_t>(value);
+ case Width32:
+ return static_cast<int32_t>(value);
+ case Width64:
+ return static_cast<int64_t>(value);
+ }
+ case Unsigned:
+ switch (width) {
+ case Width8:
+ return static_cast<uint8_t>(value);
+ case Width16:
+ return static_cast<uint16_t>(value);
+ case Width32:
+ return static_cast<uint32_t>(value);
+ case Width64:
+ return static_cast<uint64_t>(value);
+ }
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ template<typename T>
+ T asNumber() const
+ {
+ return static_cast<T>(value());
+ }
+
+ void* pointerValue() const
+ {
+ ASSERT(kind() == BigImm);
+ return bitwise_cast<void*>(static_cast<intptr_t>(m_offset));
+ }
+
+ Air::Tmp base() const
+ {
+ ASSERT(kind() == Addr || kind() == Index);
+ return m_base;
+ }
+
+ bool hasOffset() const { return isMemory(); }
+
+ int32_t offset() const
+ {
+ if (kind() == Stack)
+ return static_cast<int32_t>(m_scale);
+ ASSERT(kind() == Addr || kind() == CallArg || kind() == Index);
+ return static_cast<int32_t>(m_offset);
+ }
+
+ StackSlot* stackSlot() const
+ {
+ ASSERT(kind() == Stack);
+ return bitwise_cast<StackSlot*>(m_offset);
+ }
+
+ Air::Tmp index() const
+ {
+ ASSERT(kind() == Index);
+ return m_index;
+ }
+
+ unsigned scale() const
+ {
+ ASSERT(kind() == Index);
+ return m_scale;
+ }
+
+ unsigned logScale() const
+ {
+ return logScale(scale());
+ }
+
+ Air::Special* special() const
+ {
+ ASSERT(kind() == Special);
+ return bitwise_cast<Air::Special*>(m_offset);
+ }
+
+ Width width() const
+ {
+ ASSERT(kind() == WidthArg);
+ return static_cast<Width>(m_offset);
+ }
+
+ bool isGPTmp() const
+ {
+ return isTmp() && tmp().isGP();
+ }
+
+ bool isFPTmp() const
+ {
+ return isTmp() && tmp().isFP();
+ }
+
+ // Tells us if this Arg can be used in a position that requires a GP value.
+ bool isGP() const
+ {
+ switch (kind()) {
+ case Imm:
+ case BigImm:
+ case BitImm:
+ case BitImm64:
+ case Addr:
+ case Index:
+ case Stack:
+ case CallArg:
+ case RelCond:
+ case ResCond:
+ case DoubleCond:
+ case Special:
+ case WidthArg:
+ return true;
+ case Tmp:
+ return isGPTmp();
+ case Invalid:
+ return false;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ // Tells us if this Arg can be used in a position that requires a FP value.
+ bool isFP() const
+ {
+ switch (kind()) {
+ case Imm:
+ case BitImm:
+ case BitImm64:
+ case RelCond:
+ case ResCond:
+ case DoubleCond:
+ case Special:
+ case WidthArg:
+ case Invalid:
+ return false;
+ case Addr:
+ case Index:
+ case Stack:
+ case CallArg:
+ case BigImm: // Yes, we allow BigImm as a double immediate. We use this for implementing stackmaps.
+ return true;
+ case Tmp:
+ return isFPTmp();
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ bool hasType() const
+ {
+ switch (kind()) {
+ case Imm:
+ case BitImm:
+ case BitImm64:
+ case Special:
+ case Tmp:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // The type is ambiguous for some arg kinds. Call with care.
+ Type type() const
+ {
+ return isGP() ? GP : FP;
+ }
+
+ bool isType(Type type) const
+ {
+ switch (type) {
+ case GP:
+ return isGP();
+ case FP:
+ return isFP();
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ bool canRepresent(Value* value) const;
+
+ bool isCompatibleType(const Arg& other) const;
+
+ bool isGPR() const
+ {
+ return isTmp() && tmp().isGPR();
+ }
+
+ GPRReg gpr() const
+ {
+ return tmp().gpr();
+ }
+
+ bool isFPR() const
+ {
+ return isTmp() && tmp().isFPR();
+ }
+
+ FPRReg fpr() const
+ {
+ return tmp().fpr();
+ }
+
+ bool isReg() const
+ {
+ return isTmp() && tmp().isReg();
+ }
+
+ Reg reg() const
+ {
+ return tmp().reg();
+ }
+
+ unsigned gpTmpIndex() const
+ {
+ return tmp().gpTmpIndex();
+ }
+
+ unsigned fpTmpIndex() const
+ {
+ return tmp().fpTmpIndex();
+ }
+
+ unsigned tmpIndex() const
+ {
+ return tmp().tmpIndex();
+ }
+
+ static bool isValidImmForm(int64_t value)
+ {
+ if (isX86())
+ return B3::isRepresentableAs<int32_t>(value);
+ if (isARM64())
+ return isUInt12(value);
+ return false;
+ }
+
+ static bool isValidBitImmForm(int64_t value)
+ {
+ if (isX86())
+ return B3::isRepresentableAs<int32_t>(value);
+ if (isARM64())
+ return ARM64LogicalImmediate::create32(value).isValid();
+ return false;
+ }
+
+ static bool isValidBitImm64Form(int64_t value)
+ {
+ if (isX86())
+ return B3::isRepresentableAs<int32_t>(value);
+ if (isARM64())
+ return ARM64LogicalImmediate::create64(value).isValid();
+ return false;
+ }
+
+ static bool isValidAddrForm(int32_t offset, std::optional<Width> width = std::nullopt)
+ {
+ if (isX86())
+ return true;
+ if (isARM64()) {
+ if (!width)
+ return true;
+
+ if (isValidSignedImm9(offset))
+ return true;
+
+ switch (*width) {
+ case Width8:
+ return isValidScaledUImm12<8>(offset);
+ case Width16:
+ return isValidScaledUImm12<16>(offset);
+ case Width32:
+ return isValidScaledUImm12<32>(offset);
+ case Width64:
+ return isValidScaledUImm12<64>(offset);
+ }
+ }
+ return false;
+ }
+
+ static bool isValidIndexForm(unsigned scale, int32_t offset, std::optional<Width> width = std::nullopt)
+ {
+ if (!isValidScale(scale, width))
+ return false;
+ if (isX86())
+ return true;
+ if (isARM64())
+ return !offset;
+ return false;
+ }
+
+ // If you don't pass a width then this optimistically assumes that you're using the right width. But
+ // the width is relevant to validity, so passing a null width is only useful for assertions. Don't
+ // pass null widths when cascading through Args in the instruction selector!
+ bool isValidForm(std::optional<Width> width = std::nullopt) const
+ {
+ switch (kind()) {
+ case Invalid:
+ return false;
+ case Tmp:
+ return true;
+ case Imm:
+ return isValidImmForm(value());
+ case BigImm:
+ return true;
+ case BitImm:
+ return isValidBitImmForm(value());
+ case BitImm64:
+ return isValidBitImm64Form(value());
+ case Addr:
+ case Stack:
+ case CallArg:
+ return isValidAddrForm(offset(), width);
+ case Index:
+ return isValidIndexForm(scale(), offset(), width);
+ case RelCond:
+ case ResCond:
+ case DoubleCond:
+ case Special:
+ case WidthArg:
+ return true;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ template<typename Functor>
+ void forEachTmpFast(const Functor& functor)
+ {
+ switch (m_kind) {
+ case Tmp:
+ case Addr:
+ functor(m_base);
+ break;
+ case Index:
+ functor(m_base);
+ functor(m_index);
+ break;
+ default:
+ break;
+ }
+ }
+
+ bool usesTmp(Air::Tmp tmp) const;
+
+ template<typename Thing>
+ bool is() const;
+
+ template<typename Thing>
+ Thing as() const;
+
+ template<typename Thing, typename Functor>
+ void forEachFast(const Functor&);
+
+ template<typename Thing, typename Functor>
+ void forEach(Role, Type, Width, const Functor&);
+
+ // This is smart enough to know that an address arg in a Def or UseDef rule will use its
+ // tmps and never def them. For example, this:
+ //
+ // mov %rax, (%rcx)
+ //
+ // This defs (%rcx) but uses %rcx.
+ template<typename Functor>
+ void forEachTmp(Role argRole, Type argType, Width argWidth, const Functor& functor)
+ {
+ switch (m_kind) {
+ case Tmp:
+ ASSERT(isAnyUse(argRole) || isAnyDef(argRole));
+ functor(m_base, argRole, argType, argWidth);
+ break;
+ case Addr:
+ functor(m_base, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
+ break;
+ case Index:
+ functor(m_base, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
+ functor(m_index, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
+ break;
+ default:
+ break;
+ }
+ }
+
+ MacroAssembler::TrustedImm32 asTrustedImm32() const
+ {
+ ASSERT(isImm() || isBitImm());
+ return MacroAssembler::TrustedImm32(static_cast<int32_t>(m_offset));
+ }
+
+#if USE(JSVALUE64)
+ MacroAssembler::TrustedImm64 asTrustedImm64() const
+ {
+ ASSERT(isBigImm() || isBitImm64());
+ return MacroAssembler::TrustedImm64(value());
+ }
+#endif
+
+ MacroAssembler::TrustedImmPtr asTrustedImmPtr() const
+ {
+ if (is64Bit())
+ ASSERT(isBigImm());
+ else
+ ASSERT(isImm());
+ return MacroAssembler::TrustedImmPtr(pointerValue());
+ }
+
+ MacroAssembler::Address asAddress() const
+ {
+ ASSERT(isAddr());
+ return MacroAssembler::Address(m_base.gpr(), static_cast<int32_t>(m_offset));
+ }
+
+ MacroAssembler::BaseIndex asBaseIndex() const
+ {
+ ASSERT(isIndex());
+ return MacroAssembler::BaseIndex(
+ m_base.gpr(), m_index.gpr(), static_cast<MacroAssembler::Scale>(logScale()),
+ static_cast<int32_t>(m_offset));
+ }
+
+ MacroAssembler::RelationalCondition asRelationalCondition() const
+ {
+ ASSERT(isRelCond());
+ return static_cast<MacroAssembler::RelationalCondition>(m_offset);
+ }
+
+ MacroAssembler::ResultCondition asResultCondition() const
+ {
+ ASSERT(isResCond());
+ return static_cast<MacroAssembler::ResultCondition>(m_offset);
+ }
+
+ MacroAssembler::DoubleCondition asDoubleCondition() const
+ {
+ ASSERT(isDoubleCond());
+ return static_cast<MacroAssembler::DoubleCondition>(m_offset);
+ }
+
+ // Tells you if the Arg is invertible. Only condition arguments are invertible, and even for those, there
+ // are a few exceptions - notably Overflow and Signed.
+ bool isInvertible() const
+ {
+ switch (kind()) {
+ case RelCond:
+ case DoubleCond:
+ return true;
+ case ResCond:
+ return MacroAssembler::isInvertible(asResultCondition());
+ default:
+ return false;
+ }
+ }
+
+ // This is valid for condition arguments. It will invert them.
+ Arg inverted(bool inverted = true) const
+ {
+ if (!inverted)
+ return *this;
+ switch (kind()) {
+ case RelCond:
+ return relCond(MacroAssembler::invert(asRelationalCondition()));
+ case ResCond:
+ return resCond(MacroAssembler::invert(asResultCondition()));
+ case DoubleCond:
+ return doubleCond(MacroAssembler::invert(asDoubleCondition()));
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return Arg();
+ }
+ }
+
+ Arg flipped(bool flipped = true) const
+ {
+ if (!flipped)
+ return Arg();
+ return relCond(MacroAssembler::flip(asRelationalCondition()));
+ }
+
+ bool isSignedCond() const
+ {
+ return isRelCond() && MacroAssembler::isSigned(asRelationalCondition());
+ }
+
+ bool isUnsignedCond() const
+ {
+ return isRelCond() && MacroAssembler::isUnsigned(asRelationalCondition());
+ }
+
+ // This computes a hash for comparing this to JSAir's Arg.
+ unsigned jsHash() const;
+
+ void dump(PrintStream&) const;
+
+ Arg(WTF::HashTableDeletedValueType)
+ : m_base(WTF::HashTableDeletedValue)
+ {
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return *this == Arg(WTF::HashTableDeletedValue);
+ }
+
+ unsigned hash() const
+ {
+ // This really doesn't have to be that great.
+ return WTF::IntHash<int64_t>::hash(m_offset) + m_kind + m_scale + m_base.hash() +
+ m_index.hash();
+ }
+
+private:
+ int64_t m_offset { 0 };
+ Kind m_kind { Invalid };
+ int32_t m_scale { 1 };
+ Air::Tmp m_base;
+ Air::Tmp m_index;
+};
+
+struct ArgHash {
+ static unsigned hash(const Arg& key) { return key.hash(); }
+ static bool equal(const Arg& a, const Arg& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Kind);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Role);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Type);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Width);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Signedness);
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::B3::Air::Arg> {
+ typedef JSC::B3::Air::ArgHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::B3::Air::Arg> : SimpleClassHashTraits<JSC::B3::Air::Arg> {
+ // Because m_scale is 1 in the empty value.
+ static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirArgInlines.h b/Source/JavaScriptCore/b3/air/AirArgInlines.h
new file mode 100644
index 000000000..73f7d5bba
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirArgInlines.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+template<typename T> struct ArgThingHelper;
+
+template<> struct ArgThingHelper<Tmp> {
+ static bool is(const Arg& arg)
+ {
+ return arg.isTmp();
+ }
+
+ static Tmp as(const Arg& arg)
+ {
+ if (is(arg))
+ return arg.tmp();
+ return Tmp();
+ }
+
+ template<typename Functor>
+ static void forEachFast(Arg& arg, const Functor& functor)
+ {
+ arg.forEachTmpFast(functor);
+ }
+
+ template<typename Functor>
+ static void forEach(Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width, const Functor& functor)
+ {
+ arg.forEachTmp(role, type, width, functor);
+ }
+};
+
+template<> struct ArgThingHelper<Arg> {
+ static bool is(const Arg&)
+ {
+ return true;
+ }
+
+ static Arg as(const Arg& arg)
+ {
+ return arg;
+ }
+
+ template<typename Functor>
+ static void forEachFast(Arg& arg, const Functor& functor)
+ {
+ functor(arg);
+ }
+
+ template<typename Functor>
+ static void forEach(Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width, const Functor& functor)
+ {
+ functor(arg, role, type, width);
+ }
+};
+
+template<> struct ArgThingHelper<StackSlot*> {
+ static bool is(const Arg& arg)
+ {
+ return arg.isStack();
+ }
+
+ static StackSlot* as(const Arg& arg)
+ {
+ return arg.stackSlot();
+ }
+
+ template<typename Functor>
+ static void forEachFast(Arg& arg, const Functor& functor)
+ {
+ if (!arg.isStack())
+ return;
+
+ StackSlot* stackSlot = arg.stackSlot();
+ functor(stackSlot);
+ arg = Arg::stack(stackSlot, arg.offset());
+ }
+
+ template<typename Functor>
+ static void forEach(Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width, const Functor& functor)
+ {
+ if (!arg.isStack())
+ return;
+
+ StackSlot* stackSlot = arg.stackSlot();
+
+ // FIXME: This is way too optimistic about the meaning of "Def". It gets lucky for
+ // now because our only use of "Anonymous" stack slots happens to want the optimistic
+ // semantics. We could fix this by just changing the comments that describe the
+ // semantics of "Anonymous".
+ // https://bugs.webkit.org/show_bug.cgi?id=151128
+
+ functor(stackSlot, role, type, width);
+ arg = Arg::stack(stackSlot, arg.offset());
+ }
+};
+
+template<> struct ArgThingHelper<Reg> {
+ static bool is(const Arg& arg)
+ {
+ return arg.isReg();
+ }
+
+ static Reg as(const Arg& arg)
+ {
+ return arg.reg();
+ }
+
+ template<typename Functor>
+ static void forEachFast(Arg& arg, const Functor& functor)
+ {
+ arg.forEachTmpFast(
+ [&] (Tmp& tmp) {
+ if (!tmp.isReg())
+ return;
+
+ Reg reg = tmp.reg();
+ functor(reg);
+ tmp = Tmp(reg);
+ });
+ }
+
+ template<typename Functor>
+ static void forEach(Arg& arg, Arg::Role argRole, Arg::Type argType, Arg::Width argWidth, const Functor& functor)
+ {
+ arg.forEachTmp(
+ argRole, argType, argWidth,
+ [&] (Tmp& tmp, Arg::Role role, Arg::Type type, Arg::Width width) {
+ if (!tmp.isReg())
+ return;
+
+ Reg reg = tmp.reg();
+ functor(reg, role, type, width);
+ tmp = Tmp(reg);
+ });
+ }
+};
+
+template<typename Thing>
+bool Arg::is() const
+{
+ return ArgThingHelper<Thing>::is(*this);
+}
+
+template<typename Thing>
+Thing Arg::as() const
+{
+ return ArgThingHelper<Thing>::as(*this);
+}
+
+template<typename Thing, typename Functor>
+void Arg::forEachFast(const Functor& functor)
+{
+ ArgThingHelper<Thing>::forEachFast(*this, functor);
+}
+
+template<typename Thing, typename Functor>
+void Arg::forEach(Role role, Type type, Width width, const Functor& functor)
+{
+ ArgThingHelper<Thing>::forEach(*this, role, type, width, functor);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirBasicBlock.cpp b/Source/JavaScriptCore/b3/air/AirBasicBlock.cpp
new file mode 100644
index 000000000..fa3ad8e4d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirBasicBlock.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirBasicBlock.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockUtils.h"
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+const char* const BasicBlock::dumpPrefix = "#";
+
+bool BasicBlock::addPredecessor(BasicBlock* block)
+{
+ return B3::addPredecessor(this, block);
+}
+
+bool BasicBlock::removePredecessor(BasicBlock* block)
+{
+ return B3::removePredecessor(this, block);
+}
+
+bool BasicBlock::replacePredecessor(BasicBlock* from, BasicBlock* to)
+{
+ return B3::replacePredecessor(this, from, to);
+}
+
+void BasicBlock::dump(PrintStream& out) const
+{
+ out.print(dumpPrefix, m_index);
+}
+
+void BasicBlock::deepDump(PrintStream& out) const
+{
+ dumpHeader(out);
+ for (const Inst& inst : *this)
+ out.print(" ", inst, "\n");
+ dumpFooter(out);
+}
+
+void BasicBlock::dumpHeader(PrintStream& out) const
+{
+ out.print("BB", *this, ": ; frequency = ", m_frequency, "\n");
+ if (predecessors().size())
+ out.print(" Predecessors: ", pointerListDump(predecessors()), "\n");
+}
+
+void BasicBlock::dumpFooter(PrintStream& out) const
+{
+ if (successors().size())
+ out.print(" Successors: ", listDump(successors()), "\n");
+}
+
+BasicBlock::BasicBlock(unsigned index, double frequency)
+ : m_index(index)
+ , m_frequency(frequency)
+{
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirBasicBlock.h b/Source/JavaScriptCore/b3/air/AirBasicBlock.h
new file mode 100644
index 000000000..431bd711c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirBasicBlock.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirFrequentedBlock.h"
+#include "AirInst.h"
+#include "B3SuccessorCollection.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BlockInsertionSet;
+class Code;
+class InsertionSet;
+
+class BasicBlock {
+ WTF_MAKE_NONCOPYABLE(BasicBlock);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ static const char* const dumpPrefix;
+
+ typedef Vector<Inst> InstList;
+ typedef Vector<BasicBlock*, 2> PredecessorList;
+ typedef Vector<FrequentedBlock, 2> SuccessorList;
+
+ unsigned index() const { return m_index; }
+
+ // This method is exposed for phases that mess with the layout of basic blocks. Currently that means just
+ // optimizeBlockOrder().
+ void setIndex(unsigned index) { m_index = index; }
+
+ unsigned size() const { return m_insts.size(); }
+ InstList::iterator begin() { return m_insts.begin(); }
+ InstList::iterator end() { return m_insts.end(); }
+ InstList::const_iterator begin() const { return m_insts.begin(); }
+ InstList::const_iterator end() const { return m_insts.end(); }
+
+ const Inst& at(unsigned index) const { return m_insts[index]; }
+ Inst& at(unsigned index) { return m_insts[index]; }
+
+ Inst* get(unsigned index)
+ {
+ return index < size() ? &at(index) : nullptr;
+ }
+
+ const Inst& last() const { return m_insts.last(); }
+ Inst& last() { return m_insts.last(); }
+
+ void resize(unsigned size) { m_insts.resize(size); }
+
+ const InstList& insts() const { return m_insts; }
+ InstList& insts() { return m_insts; }
+
+ template<typename Inst>
+ Inst& appendInst(Inst&& inst)
+ {
+ m_insts.append(std::forward<Inst>(inst));
+ return m_insts.last();
+ }
+
+ template<typename... Arguments>
+ Inst& append(Arguments&&... arguments)
+ {
+ m_insts.append(Inst(std::forward<Arguments>(arguments)...));
+ return m_insts.last();
+ }
+
+ // The "0" case is the case to which the branch jumps, so the "then" case. The "1" case is the
+ // "else" case, and is used to represent the fall-through of a conditional branch.
+ unsigned numSuccessors() const { return m_successors.size(); }
+ FrequentedBlock successor(unsigned index) const { return m_successors[index]; }
+ FrequentedBlock& successor(unsigned index) { return m_successors[index]; }
+ const SuccessorList& successors() const { return m_successors; }
+ SuccessorList& successors() { return m_successors; }
+
+ BasicBlock* successorBlock(unsigned index) const { return successor(index).block(); }
+ BasicBlock*& successorBlock(unsigned index) { return successor(index).block(); }
+ SuccessorCollection<BasicBlock, SuccessorList> successorBlocks()
+ {
+ return SuccessorCollection<BasicBlock, SuccessorList>(m_successors);
+ }
+ SuccessorCollection<const BasicBlock, const SuccessorList> successorBlocks() const
+ {
+ return SuccessorCollection<const BasicBlock, const SuccessorList>(m_successors);
+ }
+
+ unsigned numPredecessors() const { return m_predecessors.size(); }
+ BasicBlock* predecessor(unsigned index) const { return m_predecessors[index]; }
+ BasicBlock*& predecessor(unsigned index) { return m_predecessors[index]; }
+ const PredecessorList& predecessors() const { return m_predecessors; }
+ PredecessorList& predecessors() { return m_predecessors; }
+
+ bool addPredecessor(BasicBlock*);
+ bool removePredecessor(BasicBlock*);
+ bool replacePredecessor(BasicBlock* from, BasicBlock* to);
+ bool containsPredecessor(BasicBlock* predecessor) const { return m_predecessors.contains(predecessor); }
+
+ double frequency() const { return m_frequency; }
+
+ void dump(PrintStream&) const;
+ void deepDump(PrintStream&) const;
+
+ void dumpHeader(PrintStream&) const;
+ void dumpFooter(PrintStream&) const;
+
+private:
+ friend class BlockInsertionSet;
+ friend class Code;
+ friend class InsertionSet;
+
+ BasicBlock(unsigned index, double frequency);
+
+ unsigned m_index;
+ InstList m_insts;
+ SuccessorList m_successors;
+ PredecessorList m_predecessors;
+ double m_frequency;
+};
+
+class DeepBasicBlockDump {
+public:
+ DeepBasicBlockDump(const BasicBlock* block)
+ : m_block(block)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_block)
+ m_block->deepDump(out);
+ else
+ out.print("<null>");
+ }
+
+private:
+ const BasicBlock* m_block;
+};
+
+inline DeepBasicBlockDump deepDump(const BasicBlock* block)
+{
+ return DeepBasicBlockDump(block);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirBlockWorklist.h b/Source/JavaScriptCore/b3/air/AirBlockWorklist.h
new file mode 100644
index 000000000..ba231a9b5
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirBlockWorklist.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "B3BlockWorklist.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+typedef GraphNodeWorklist<BasicBlock*, IndexSet<BasicBlock>> BlockWorklist;
+
+// When you say BlockWith<int> you should read it as "block with an int".
+template<typename T> using BlockWith = GraphNodeWith<BasicBlock*, T>;
+
+// Extended block worklist is useful for enqueueing some meta-data along with the block. It also
+// permits forcibly enqueueing things even if the block has already been seen. It's useful for
+// things like building a spanning tree, in which case T (the auxiliary payload) would be the
+// successor index.
+template<typename T> using ExtendedBlockWorklist = ExtendedGraphNodeWorklist<BasicBlock*, T, IndexSet<BasicBlock>>;
+
+typedef GraphNodeWithOrder<BasicBlock*> BlockWithOrder;
+
+typedef PostOrderGraphNodeWorklist<BasicBlock*, IndexSet<BasicBlock>> PostOrderBlockWorklist;
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp b/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp
new file mode 100644
index 000000000..f1b6d710e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCCallSpecial.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirCCallSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+CCallSpecial::CCallSpecial()
+{
+ m_clobberedRegs = RegisterSet::allRegisters();
+ m_clobberedRegs.exclude(RegisterSet::stackRegisters());
+ m_clobberedRegs.exclude(RegisterSet::reservedHardwareRegisters());
+ m_clobberedRegs.exclude(RegisterSet::calleeSaveRegisters());
+ m_clobberedRegs.clear(GPRInfo::returnValueGPR);
+ m_clobberedRegs.clear(GPRInfo::returnValueGPR2);
+ m_clobberedRegs.clear(FPRInfo::returnValueFPR);
+}
+
+CCallSpecial::~CCallSpecial()
+{
+}
+
+void CCallSpecial::forEachArg(Inst& inst, const ScopedLambda<Inst::EachArgCallback>& callback)
+{
+ for (unsigned i = 0; i < numCalleeArgs; ++i)
+ callback(inst.args[calleeArgOffset + i], Arg::Use, Arg::GP, Arg::pointerWidth());
+ for (unsigned i = 0; i < numReturnGPArgs; ++i)
+ callback(inst.args[returnGPArgOffset + i], Arg::Def, Arg::GP, Arg::pointerWidth());
+ for (unsigned i = 0; i < numReturnFPArgs; ++i)
+ callback(inst.args[returnFPArgOffset + i], Arg::Def, Arg::FP, Arg::Width64);
+
+ for (unsigned i = argArgOffset; i < inst.args.size(); ++i) {
+ // For the type, we can just query the arg's type. The arg will have a type, because we
+ // require these args to be argument registers.
+ Arg::Type type = inst.args[i].type();
+ callback(inst.args[i], Arg::Use, type, Arg::conservativeWidth(type));
+ }
+}
+
+bool CCallSpecial::isValid(Inst& inst)
+{
+ if (inst.args.size() < argArgOffset)
+ return false;
+
+ for (unsigned i = 0; i < numCalleeArgs; ++i) {
+ Arg& arg = inst.args[i + calleeArgOffset];
+ if (!arg.isGP())
+ return false;
+ switch (arg.kind()) {
+ case Arg::Imm:
+ if (is32Bit())
+ break;
+ return false;
+ case Arg::BigImm:
+ if (is64Bit())
+ break;
+ return false;
+ case Arg::Tmp:
+ case Arg::Addr:
+ case Arg::Stack:
+ case Arg::CallArg:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ // Return args need to be exact.
+ if (inst.args[returnGPArgOffset + 0] != Tmp(GPRInfo::returnValueGPR))
+ return false;
+ if (inst.args[returnGPArgOffset + 1] != Tmp(GPRInfo::returnValueGPR2))
+ return false;
+ if (inst.args[returnFPArgOffset + 0] != Tmp(FPRInfo::returnValueFPR))
+ return false;
+
+ for (unsigned i = argArgOffset; i < inst.args.size(); ++i) {
+ if (!inst.args[i].isReg())
+ return false;
+
+ if (inst.args[i] == Tmp(scratchRegister))
+ return false;
+ }
+ return true;
+}
+
+bool CCallSpecial::admitsStack(Inst&, unsigned argIndex)
+{
+ // The callee can be on the stack.
+ if (argIndex == calleeArgOffset)
+ return true;
+
+ return false;
+}
+
+void CCallSpecial::reportUsedRegisters(Inst&, const RegisterSet&)
+{
+}
+
+CCallHelpers::Jump CCallSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext&)
+{
+ switch (inst.args[calleeArgOffset].kind()) {
+ case Arg::Imm:
+ case Arg::BigImm:
+ jit.move(inst.args[calleeArgOffset].asTrustedImmPtr(), scratchRegister);
+ jit.call(scratchRegister);
+ break;
+ case Arg::Tmp:
+ jit.call(inst.args[calleeArgOffset].gpr());
+ break;
+ case Arg::Addr:
+ jit.call(inst.args[calleeArgOffset].asAddress());
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ return CCallHelpers::Jump();
+}
+
+RegisterSet CCallSpecial::extraEarlyClobberedRegs(Inst&)
+{
+ return m_emptyRegs;
+}
+
+RegisterSet CCallSpecial::extraClobberedRegs(Inst&)
+{
+ return m_clobberedRegs;
+}
+
+void CCallSpecial::dumpImpl(PrintStream& out) const
+{
+ out.print("CCall");
+}
+
+void CCallSpecial::deepDumpImpl(PrintStream& out) const
+{
+ out.print("function call that uses the C calling convention.");
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCCallSpecial.h b/Source/JavaScriptCore/b3/air/AirCCallSpecial.h
new file mode 100644
index 000000000..ec909b9f0
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCCallSpecial.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirSpecial.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+// Use this special for constructing a C call. Arg 0 is of course a Special arg that refers to the
+// CCallSpecial object. Arg 1 is the callee, and it can be an ImmPtr, a register, or an address. The
+// next three args - arg 2, arg 3, and arg 4 - hold the return value GPRs and FPR. The remaining args
+// are just the set of argument registers used by this call. For arguments that go to the stack, you
+// have to do the grunt work of doing those stack stores. In fact, the only reason why we specify the
+// argument registers as arguments to a call is so that the liveness analysis can see that they get
+// used here. It would be wrong to automagically report all argument registers as being used because
+// if we had a call that didn't pass them, then they'd appear to be live until some clobber point or
+// the prologue, whichever happened sooner.
+
+class CCallSpecial : public Special {
+public:
+ CCallSpecial();
+ ~CCallSpecial();
+
+ // You cannot use this register to pass arguments. It just so happens that this register is not
+ // used for arguments in the C calling convention. By the way, this is the only thing that causes
+ // this special to be specific to C calls.
+ static const GPRReg scratchRegister = GPRInfo::nonArgGPR0;
+
+protected:
+ void forEachArg(Inst&, const ScopedLambda<Inst::EachArgCallback>&) override;
+ bool isValid(Inst&) override;
+ bool admitsStack(Inst&, unsigned argIndex) override;
+ void reportUsedRegisters(Inst&, const RegisterSet&) override;
+ CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&) override;
+ RegisterSet extraEarlyClobberedRegs(Inst&) override;
+ RegisterSet extraClobberedRegs(Inst&) override;
+
+ void dumpImpl(PrintStream&) const override;
+ void deepDumpImpl(PrintStream&) const override;
+
+private:
+ static const unsigned specialArgOffset = 0;
+ static const unsigned numSpecialArgs = 1;
+ static const unsigned calleeArgOffset = numSpecialArgs;
+ static const unsigned numCalleeArgs = 1;
+ static const unsigned returnGPArgOffset = numSpecialArgs + numCalleeArgs;
+ static const unsigned numReturnGPArgs = 2;
+ static const unsigned returnFPArgOffset = numSpecialArgs + numCalleeArgs + numReturnGPArgs;
+ static const unsigned numReturnFPArgs = 1;
+ static const unsigned argArgOffset =
+ numSpecialArgs + numCalleeArgs + numReturnGPArgs + numReturnFPArgs;
+
+ RegisterSet m_clobberedRegs;
+ RegisterSet m_emptyRegs;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp b/Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp
new file mode 100644
index 000000000..2b6f733bf
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirCCallingConvention.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallSpecial.h"
+#include "AirCode.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+template<typename BankInfo>
+Arg marshallCCallArgumentImpl(unsigned& argumentCount, unsigned& stackOffset, Value* child)
+{
+ unsigned argumentIndex = argumentCount++;
+ if (argumentIndex < BankInfo::numberOfArgumentRegisters)
+ return Tmp(BankInfo::toArgumentRegister(argumentIndex));
+
+ unsigned slotSize;
+ if (isARM64() && isIOS()) {
+ // Arguments are packed.
+ slotSize = sizeofType(child->type());
+ } else {
+ // Arguments are aligned.
+ slotSize = 8;
+ }
+
+ stackOffset = WTF::roundUpToMultipleOf(slotSize, stackOffset);
+ Arg result = Arg::callArg(stackOffset);
+ stackOffset += slotSize;
+ return result;
+}
+
+Arg marshallCCallArgument(
+ unsigned& gpArgumentCount, unsigned& fpArgumentCount, unsigned& stackOffset, Value* child)
+{
+ switch (Arg::typeForB3Type(child->type())) {
+ case Arg::GP:
+ return marshallCCallArgumentImpl<GPRInfo>(gpArgumentCount, stackOffset, child);
+ case Arg::FP:
+ return marshallCCallArgumentImpl<FPRInfo>(fpArgumentCount, stackOffset, child);
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return Arg();
+}
+
+} // anonymous namespace
+
+Vector<Arg> computeCCallingConvention(Code& code, CCallValue* value)
+{
+ Vector<Arg> result;
+ result.append(Tmp(CCallSpecial::scratchRegister));
+ unsigned gpArgumentCount = 0;
+ unsigned fpArgumentCount = 0;
+ unsigned stackOffset = 0;
+ for (unsigned i = 1; i < value->numChildren(); ++i) {
+ result.append(
+ marshallCCallArgument(gpArgumentCount, fpArgumentCount, stackOffset, value->child(i)));
+ }
+ code.requestCallArgAreaSizeInBytes(WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset));
+ return result;
+}
+
+Tmp cCallResult(Type type)
+{
+ switch (type) {
+ case Void:
+ return Tmp();
+ case Int32:
+ case Int64:
+ return Tmp(GPRInfo::returnValueGPR);
+ case Float:
+ case Double:
+ return Tmp(FPRInfo::returnValueFPR);
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return Tmp();
+}
+
+Inst buildCCall(Code& code, Value* origin, const Vector<Arg>& arguments)
+{
+ Inst inst(Patch, origin, Arg::special(code.cCallSpecial()));
+ inst.args.append(arguments[0]);
+ inst.args.append(Tmp(GPRInfo::returnValueGPR));
+ inst.args.append(Tmp(GPRInfo::returnValueGPR2));
+ inst.args.append(Tmp(FPRInfo::returnValueFPR));
+ for (unsigned i = 1; i < arguments.size(); ++i) {
+ Arg arg = arguments[i];
+ if (arg.isTmp())
+ inst.args.append(arg);
+ }
+ return inst;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirCCallingConvention.h b/Source/JavaScriptCore/b3/air/AirCCallingConvention.h
new file mode 100644
index 000000000..76acc29ab
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCCallingConvention.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirInst.h"
+#include "B3Type.h"
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class CCallValue;
+
+namespace Air {
+
+class Code;
+
+Vector<Arg> computeCCallingConvention(Code&, CCallValue*);
+
+Tmp cCallResult(Type);
+
+Inst buildCCall(Code&, Value* origin, const Vector<Arg>&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCode.cpp b/Source/JavaScriptCore/b3/air/AirCode.cpp
new file mode 100644
index 000000000..79e2c0cf2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCode.cpp
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirCode.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallSpecial.h"
+#include "B3BasicBlockUtils.h"
+#include "B3Procedure.h"
+#include "B3StackSlot.h"
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+Code::Code(Procedure& proc)
+ : m_proc(proc)
+ , m_lastPhaseName("initial")
+{
+ // Come up with initial orderings of registers. The user may replace this with something else.
+ Arg::forEachType(
+ [&] (Arg::Type type) {
+ Vector<Reg> result;
+ RegisterSet all = type == Arg::GP ? RegisterSet::allGPRs() : RegisterSet::allFPRs();
+ all.exclude(RegisterSet::stackRegisters());
+ all.exclude(RegisterSet::reservedHardwareRegisters());
+ RegisterSet calleeSave = RegisterSet::calleeSaveRegisters();
+ all.forEach(
+ [&] (Reg reg) {
+ if (!calleeSave.get(reg))
+ result.append(reg);
+ });
+ all.forEach(
+ [&] (Reg reg) {
+ if (calleeSave.get(reg))
+ result.append(reg);
+ });
+ setRegsInPriorityOrder(type, result);
+ });
+}
+
+Code::~Code()
+{
+}
+
+void Code::setRegsInPriorityOrder(Arg::Type type, const Vector<Reg>& regs)
+{
+ regsInPriorityOrderImpl(type) = regs;
+ m_mutableRegs = RegisterSet();
+ Arg::forEachType(
+ [&] (Arg::Type type) {
+ for (Reg reg : regsInPriorityOrder(type))
+ m_mutableRegs.set(reg);
+ });
+}
+
+void Code::pinRegister(Reg reg)
+{
+ Vector<Reg>& regs = regsInPriorityOrderImpl(Arg(Tmp(reg)).type());
+ regs.removeFirst(reg);
+ m_mutableRegs.clear(reg);
+ ASSERT(!regs.contains(reg));
+}
+
+BasicBlock* Code::addBlock(double frequency)
+{
+ std::unique_ptr<BasicBlock> block(new BasicBlock(m_blocks.size(), frequency));
+ BasicBlock* result = block.get();
+ m_blocks.append(WTFMove(block));
+ return result;
+}
+
+StackSlot* Code::addStackSlot(unsigned byteSize, StackSlotKind kind, B3::StackSlot* b3Slot)
+{
+ return m_stackSlots.addNew(byteSize, kind, b3Slot);
+}
+
+StackSlot* Code::addStackSlot(B3::StackSlot* b3Slot)
+{
+ return addStackSlot(b3Slot->byteSize(), StackSlotKind::Locked, b3Slot);
+}
+
+Special* Code::addSpecial(std::unique_ptr<Special> special)
+{
+ special->m_code = this;
+ return m_specials.add(WTFMove(special));
+}
+
+CCallSpecial* Code::cCallSpecial()
+{
+ if (!m_cCallSpecial) {
+ m_cCallSpecial = static_cast<CCallSpecial*>(
+ addSpecial(std::make_unique<CCallSpecial>()));
+ }
+
+ return m_cCallSpecial;
+}
+
+bool Code::isEntrypoint(BasicBlock* block) const
+{
+ if (m_entrypoints.isEmpty())
+ return !block->index();
+
+ for (const FrequentedBlock& entrypoint : m_entrypoints) {
+ if (entrypoint.block() == block)
+ return true;
+ }
+ return false;
+}
+
+void Code::resetReachability()
+{
+ clearPredecessors(m_blocks);
+ if (m_entrypoints.isEmpty())
+ updatePredecessorsAfter(m_blocks[0].get());
+ else {
+ for (const FrequentedBlock& entrypoint : m_entrypoints)
+ updatePredecessorsAfter(entrypoint.block());
+ }
+
+ for (auto& block : m_blocks) {
+ if (isBlockDead(block.get()) && !isEntrypoint(block.get()))
+ block = nullptr;
+ }
+}
+
+void Code::dump(PrintStream& out) const
+{
+ if (!m_entrypoints.isEmpty())
+ out.print("Entrypoints: ", listDump(m_entrypoints), "\n");
+ for (BasicBlock* block : *this)
+ out.print(deepDump(block));
+ if (stackSlots().size()) {
+ out.print("Stack slots:\n");
+ for (StackSlot* slot : stackSlots())
+ out.print(" ", pointerDump(slot), ": ", deepDump(slot), "\n");
+ }
+ if (specials().size()) {
+ out.print("Specials:\n");
+ for (Special* special : specials())
+ out.print(" ", deepDump(special), "\n");
+ }
+ if (m_frameSize)
+ out.print("Frame size: ", m_frameSize, "\n");
+ if (m_callArgAreaSize)
+ out.print("Call arg area size: ", m_callArgAreaSize, "\n");
+ if (m_calleeSaveRegisters.size())
+ out.print("Callee saves: ", m_calleeSaveRegisters, "\n");
+}
+
+unsigned Code::findFirstBlockIndex(unsigned index) const
+{
+ while (index < size() && !at(index))
+ index++;
+ return index;
+}
+
+unsigned Code::findNextBlockIndex(unsigned index) const
+{
+ return findFirstBlockIndex(index + 1);
+}
+
+BasicBlock* Code::findNextBlock(BasicBlock* block) const
+{
+ unsigned index = findNextBlockIndex(block->index());
+ if (index < size())
+ return at(index);
+ return nullptr;
+}
+
+void Code::addFastTmp(Tmp tmp)
+{
+ m_fastTmps.add(tmp);
+}
+
+void* Code::addDataSection(size_t size)
+{
+ return m_proc.addDataSection(size);
+}
+
+unsigned Code::jsHash() const
+{
+ unsigned result = 0;
+
+ for (BasicBlock* block : *this) {
+ result *= 1000001;
+ for (Inst& inst : *block) {
+ result *= 97;
+ result += inst.jsHash();
+ }
+ for (BasicBlock* successor : block->successorBlocks()) {
+ result *= 7;
+ result += successor->index();
+ }
+ }
+ for (StackSlot* slot : stackSlots()) {
+ result *= 101;
+ result += slot->jsHash();
+ }
+
+ return result;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCode.h b/Source/JavaScriptCore/b3/air/AirCode.h
new file mode 100644
index 000000000..6d4a14722
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCode.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirBasicBlock.h"
+#include "AirSpecial.h"
+#include "AirStackSlot.h"
+#include "AirTmp.h"
+#include "B3SparseCollection.h"
+#include "CCallHelpers.h"
+#include "RegisterAtOffsetList.h"
+#include "StackAlignment.h"
+#include <wtf/IndexMap.h>
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace Air {
+
+class BlockInsertionSet;
+class CCallSpecial;
+
+typedef void WasmBoundsCheckGeneratorFunction(CCallHelpers&, GPRReg, unsigned);
+typedef SharedTask<WasmBoundsCheckGeneratorFunction> WasmBoundsCheckGenerator;
+
+// This is an IR that is very close to the bare metal. It requires about 40x more bytes than the
+// generated machine code - for example if you're generating 1MB of machine code, you need about
+// 40MB of Air.
+
+class Code {
+ WTF_MAKE_NONCOPYABLE(Code);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ ~Code();
+
+ Procedure& proc() { return m_proc; }
+
+ const Vector<Reg>& regsInPriorityOrder(Arg::Type type) const
+ {
+ switch (type) {
+ case Arg::GP:
+ return m_gpRegsInPriorityOrder;
+ case Arg::FP:
+ return m_fpRegsInPriorityOrder;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ void setRegsInPriorityOrder(Arg::Type, const Vector<Reg>&);
+
+ // This is the set of registers that Air is allowed to emit code to mutate. It's derived from
+ // regsInPriorityOrder. Any registers not in this set are said to be "pinned".
+ const RegisterSet& mutableRegs() const { return m_mutableRegs; }
+
+ bool isPinned(Reg reg) const { return !mutableRegs().get(reg); }
+
+ void pinRegister(Reg);
+
+ JS_EXPORT_PRIVATE BasicBlock* addBlock(double frequency = 1);
+
+ // Note that you can rely on stack slots always getting indices that are larger than the index
+ // of any prior stack slot. In fact, all stack slots you create in the future will have an index
+ // that is >= stackSlots().size().
+ JS_EXPORT_PRIVATE StackSlot* addStackSlot(
+ unsigned byteSize, StackSlotKind, B3::StackSlot* = nullptr);
+ StackSlot* addStackSlot(B3::StackSlot*);
+
+ Special* addSpecial(std::unique_ptr<Special>);
+
+ // This is the special you need to make a C call!
+ CCallSpecial* cCallSpecial();
+
+ Tmp newTmp(Arg::Type type)
+ {
+ switch (type) {
+ case Arg::GP:
+ return Tmp::gpTmpForIndex(m_numGPTmps++);
+ case Arg::FP:
+ return Tmp::fpTmpForIndex(m_numFPTmps++);
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ unsigned numTmps(Arg::Type type)
+ {
+ switch (type) {
+ case Arg::GP:
+ return m_numGPTmps;
+ case Arg::FP:
+ return m_numFPTmps;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ unsigned callArgAreaSizeInBytes() const { return m_callArgAreaSize; }
+
+ // You can call this before code generation to force a minimum call arg area size.
+ void requestCallArgAreaSizeInBytes(unsigned size)
+ {
+ m_callArgAreaSize = std::max(
+ m_callArgAreaSize,
+ static_cast<unsigned>(WTF::roundUpToMultipleOf(stackAlignmentBytes(), size)));
+ }
+
+ unsigned frameSize() const { return m_frameSize; }
+
+ // Only phases that do stack allocation are allowed to set this. Currently, only
+ // Air::allocateStack() does this.
+ void setFrameSize(unsigned frameSize)
+ {
+ m_frameSize = frameSize;
+ }
+
+ // Note that this is not the same thing as proc().numEntrypoints(). This value here may be zero
+ // until we lower EntrySwitch.
+ unsigned numEntrypoints() const { return m_entrypoints.size(); }
+ const Vector<FrequentedBlock>& entrypoints() const { return m_entrypoints; }
+ const FrequentedBlock& entrypoint(unsigned index) const { return m_entrypoints[index]; }
+ bool isEntrypoint(BasicBlock*) const;
+
+ // This is used by lowerEntrySwitch().
+ template<typename Vector>
+ void setEntrypoints(Vector&& vector)
+ {
+ m_entrypoints = std::forward<Vector>(vector);
+ }
+
+ CCallHelpers::Label entrypointLabel(unsigned index) const
+ {
+ return m_entrypointLabels[index];
+ }
+
+ // This is used by generate().
+ template<typename Vector>
+ void setEntrypointLabels(Vector&& vector)
+ {
+ m_entrypointLabels = std::forward<Vector>(vector);
+ }
+
+ const RegisterAtOffsetList& calleeSaveRegisters() const { return m_calleeSaveRegisters; }
+ RegisterAtOffsetList& calleeSaveRegisters() { return m_calleeSaveRegisters; }
+
+ // Recomputes predecessors and deletes unreachable blocks.
+ void resetReachability();
+
+ JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+ unsigned size() const { return m_blocks.size(); }
+ BasicBlock* at(unsigned index) const { return m_blocks[index].get(); }
+ BasicBlock* operator[](unsigned index) const { return at(index); }
+
+ // This is used by phases that optimize the block list. You shouldn't use this unless you really know
+ // what you're doing.
+ Vector<std::unique_ptr<BasicBlock>>& blockList() { return m_blocks; }
+
+ // Finds the smallest index' such that at(index') != null and index' >= index.
+ JS_EXPORT_PRIVATE unsigned findFirstBlockIndex(unsigned index) const;
+
+ // Finds the smallest index' such that at(index') != null and index' > index.
+ unsigned findNextBlockIndex(unsigned index) const;
+
+ BasicBlock* findNextBlock(BasicBlock*) const;
+
+ class iterator {
+ public:
+ iterator()
+ : m_code(nullptr)
+ , m_index(0)
+ {
+ }
+
+ iterator(const Code& code, unsigned index)
+ : m_code(&code)
+ , m_index(m_code->findFirstBlockIndex(index))
+ {
+ }
+
+ BasicBlock* operator*()
+ {
+ return m_code->at(m_index);
+ }
+
+ iterator& operator++()
+ {
+ m_index = m_code->findFirstBlockIndex(m_index + 1);
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ const Code* m_code;
+ unsigned m_index;
+ };
+
+ iterator begin() const { return iterator(*this, 0); }
+ iterator end() const { return iterator(*this, size()); }
+
+ const SparseCollection<StackSlot>& stackSlots() const { return m_stackSlots; }
+ SparseCollection<StackSlot>& stackSlots() { return m_stackSlots; }
+
+ const SparseCollection<Special>& specials() const { return m_specials; }
+ SparseCollection<Special>& specials() { return m_specials; }
+
+ template<typename Callback>
+ void forAllTmps(const Callback& callback) const
+ {
+ for (unsigned i = m_numGPTmps; i--;)
+ callback(Tmp::gpTmpForIndex(i));
+ for (unsigned i = m_numFPTmps; i--;)
+ callback(Tmp::fpTmpForIndex(i));
+ }
+
+ void addFastTmp(Tmp);
+ bool isFastTmp(Tmp tmp) const { return m_fastTmps.contains(tmp); }
+
+ void* addDataSection(size_t);
+
+ // The name has to be a string literal, since we don't do any memory management for the string.
+ void setLastPhaseName(const char* name)
+ {
+ m_lastPhaseName = name;
+ }
+
+ const char* lastPhaseName() const { return m_lastPhaseName; }
+
+ void setWasmBoundsCheckGenerator(RefPtr<WasmBoundsCheckGenerator> generator)
+ {
+ m_wasmBoundsCheckGenerator = generator;
+ }
+
+ RefPtr<WasmBoundsCheckGenerator> wasmBoundsCheckGenerator() const { return m_wasmBoundsCheckGenerator; }
+
+ // This is a hash of the code. You can use this if you want to put code into a hashtable, but
+ // it's mainly for validating the results from JSAir.
+ unsigned jsHash() const;
+
+private:
+ friend class ::JSC::B3::Procedure;
+ friend class BlockInsertionSet;
+
+ Code(Procedure&);
+
+ Vector<Reg>& regsInPriorityOrderImpl(Arg::Type type)
+ {
+ switch (type) {
+ case Arg::GP:
+ return m_gpRegsInPriorityOrder;
+ case Arg::FP:
+ return m_fpRegsInPriorityOrder;
+ }
+ ASSERT_NOT_REACHED();
+ }
+
+ Procedure& m_proc; // Some meta-data, like byproducts, is stored in the Procedure.
+ Vector<Reg> m_gpRegsInPriorityOrder;
+ Vector<Reg> m_fpRegsInPriorityOrder;
+ RegisterSet m_mutableRegs;
+ SparseCollection<StackSlot> m_stackSlots;
+ Vector<std::unique_ptr<BasicBlock>> m_blocks;
+ SparseCollection<Special> m_specials;
+ HashSet<Tmp> m_fastTmps;
+ CCallSpecial* m_cCallSpecial { nullptr };
+ unsigned m_numGPTmps { 0 };
+ unsigned m_numFPTmps { 0 };
+ unsigned m_frameSize { 0 };
+ unsigned m_callArgAreaSize { 0 };
+ RegisterAtOffsetList m_calleeSaveRegisters;
+ Vector<FrequentedBlock> m_entrypoints; // This is empty until after lowerEntrySwitch().
+ Vector<CCallHelpers::Label> m_entrypointLabels; // This is empty until code generation.
+ RefPtr<WasmBoundsCheckGenerator> m_wasmBoundsCheckGenerator;
+ const char* m_lastPhaseName;
+};
+
+} } } // namespace JSC::B3::Air
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirCustom.cpp b/Source/JavaScriptCore/b3/air/AirCustom.cpp
new file mode 100644
index 000000000..2a2df2fbd
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCustom.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirCustom.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirInstInlines.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool PatchCustom::isValidForm(Inst& inst)
+{
+ if (inst.args.size() < 1)
+ return false;
+ if (!inst.args[0].isSpecial())
+ return false;
+ if (!inst.args[0].special()->isValid(inst))
+ return false;
+ RegisterSet clobberedEarly = inst.extraEarlyClobberedRegs();
+ RegisterSet clobberedLate = inst.extraClobberedRegs();
+ bool ok = true;
+ inst.forEachTmp(
+ [&] (Tmp& tmp, Arg::Role role, Arg::Type, Arg::Width) {
+ if (!tmp.isReg())
+ return;
+ if (Arg::isLateDef(role) || Arg::isLateUse(role))
+ ok &= !clobberedLate.get(tmp.reg());
+ else
+ ok &= !clobberedEarly.get(tmp.reg());
+ });
+ return ok;
+}
+
+bool CCallCustom::isValidForm(Inst& inst)
+{
+ CCallValue* value = inst.origin->as<CCallValue>();
+ if (!value)
+ return false;
+
+ if (inst.args.size() != (value->type() == Void ? 0 : 1) + value->numChildren())
+ return false;
+
+ // The arguments can only refer to the stack, tmps, or immediates.
+ for (Arg& arg : inst.args) {
+ if (!arg.isTmp() && !arg.isStackMemory() && !arg.isSomeImm())
+ return false;
+ }
+
+ unsigned offset = 0;
+
+ if (!inst.args[0].isGP())
+ return false;
+
+ // If there is a result then it cannot be an immediate.
+ if (value->type() != Void) {
+ if (inst.args[1].isSomeImm())
+ return false;
+ if (!inst.args[1].canRepresent(value))
+ return false;
+ offset++;
+ }
+
+ for (unsigned i = value->numChildren(); i-- > 1;) {
+ Value* child = value->child(i);
+ Arg arg = inst.args[offset + i];
+ if (!arg.canRepresent(child))
+ return false;
+ }
+
+ return true;
+}
+
+CCallHelpers::Jump CCallCustom::generate(Inst& inst, CCallHelpers&, GenerationContext&)
+{
+ dataLog("FATAL: Unlowered C call: ", inst, "\n");
+ UNREACHABLE_FOR_PLATFORM();
+ return CCallHelpers::Jump();
+}
+
+bool ShuffleCustom::isValidForm(Inst& inst)
+{
+ if (inst.args.size() % 3)
+ return false;
+
+ // A destination may only appear once. This requirement allows us to avoid the undefined behavior
+ // of having a destination that is supposed to get multiple inputs simultaneously. It also
+ // imposes some interesting constraints on the "shape" of the shuffle. If we treat a shuffle pair
+ // as an edge and the Args as nodes, then the single-destination requirement means that the
+ // shuffle graph consists of two kinds of subgraphs:
+ //
+ // - Spanning trees. We call these shifts. They can be executed as a sequence of Move
+ // instructions and don't usually require scratch registers.
+ //
+ // - Closed loops. These loops consist of nodes that have one successor and one predecessor, so
+ // there is no way to "get into" the loop from outside of it. These can be executed using swaps
+ // or by saving one of the Args to a scratch register and executing it as a shift.
+ HashSet<Arg> dsts;
+
+ for (unsigned i = 0; i < inst.args.size(); ++i) {
+ Arg arg = inst.args[i];
+ unsigned mode = i % 3;
+
+ if (mode == 2) {
+ // It's the width.
+ if (!arg.isWidthArg())
+ return false;
+ continue;
+ }
+
+ // The source can be an immediate.
+ if (!mode) {
+ if (arg.isSomeImm())
+ continue;
+
+ if (!arg.isCompatibleType(inst.args[i + 1]))
+ return false;
+ } else {
+ ASSERT(mode == 1);
+ if (!dsts.add(arg).isNewEntry)
+ return false;
+ }
+
+ if (arg.isTmp() || arg.isMemory())
+ continue;
+
+ return false;
+ }
+
+ // No destination register may appear in any address expressions. The lowering can't handle it
+ // and it's not useful for the way we end up using Shuffles. Normally, Shuffles only used for
+ // stack addresses and non-stack registers.
+ for (Arg& arg : inst.args) {
+ if (!arg.isMemory())
+ continue;
+ bool ok = true;
+ arg.forEachTmpFast(
+ [&] (Tmp tmp) {
+ if (dsts.contains(tmp))
+ ok = false;
+ });
+ if (!ok)
+ return false;
+ }
+
+ return true;
+}
+
+CCallHelpers::Jump ShuffleCustom::generate(Inst& inst, CCallHelpers&, GenerationContext&)
+{
+ dataLog("FATAL: Unlowered shuffle: ", inst, "\n");
+ UNREACHABLE_FOR_PLATFORM();
+ return CCallHelpers::Jump();
+}
+
+bool WasmBoundsCheckCustom::isValidForm(Inst& inst)
+{
+ if (inst.args.size() != 2)
+ return false;
+ if (!inst.args[0].isTmp() && !inst.args[0].isSomeImm())
+ return false;
+
+ return inst.args[1].isReg();
+}
+
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirCustom.h b/Source/JavaScriptCore/b3/air/AirCustom.h
new file mode 100644
index 000000000..cddc03857
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirCustom.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "AirInst.h"
+#include "AirSpecial.h"
+#include "B3ValueInlines.h"
+#include "B3WasmBoundsCheckValue.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+// This defines the behavior of custom instructions - i.e. those whose behavior cannot be
+// described using AirOpcode.opcodes. If you define an opcode as "custom Foo" in that file, then
+// you will need to create a "struct FooCustom" here that implements the custom behavior
+// methods.
+//
+// The customizability granted by the custom instruction mechanism is strictly less than what
+// you get using the Patch instruction and implementing a Special. However, that path requires
+// allocating a Special object and ensuring that it's the first operand. For many instructions,
+// that is not as convenient as using Custom, which makes the instruction look like any other
+// instruction. Note that both of those extra powers of the Patch instruction happen because we
+// special-case that instruction in many phases and analyses. Non-special-cased behaviors of
+// Patch are implemented using the custom instruction mechanism.
+//
+// Specials are still more flexible if you need to list extra clobbered registers and you'd like
+// that to be expressed as a bitvector rather than an arglist. They are also more flexible if
+// you need to carry extra state around with the instruction. Also, Specials mean that you
+// always have access to Code& even in methods that don't take a GenerationContext.
+
+// Definition of Patch instruction. Patch is used to delegate the behavior of the instruction to the
+// Special object, which will be the first argument to the instruction.
+struct PatchCustom {
+ template<typename Functor>
+ static void forEachArg(Inst& inst, const Functor& functor)
+ {
+ // This is basically bogus, but it works for analyses that model Special as an
+ // immediate.
+ functor(inst.args[0], Arg::Use, Arg::GP, Arg::pointerWidth());
+
+ inst.args[0].special()->forEachArg(inst, scopedLambda<Inst::EachArgCallback>(functor));
+ }
+
+ template<typename... Arguments>
+ static bool isValidFormStatic(Arguments...)
+ {
+ return false;
+ }
+
+ static bool isValidForm(Inst& inst);
+
+ static bool admitsStack(Inst& inst, unsigned argIndex)
+ {
+ if (!argIndex)
+ return false;
+ return inst.args[0].special()->admitsStack(inst, argIndex);
+ }
+
+ static std::optional<unsigned> shouldTryAliasingDef(Inst& inst)
+ {
+ return inst.args[0].special()->shouldTryAliasingDef(inst);
+ }
+
+ static bool isTerminal(Inst& inst)
+ {
+ return inst.args[0].special()->isTerminal(inst);
+ }
+
+ static bool hasNonArgEffects(Inst& inst)
+ {
+ return inst.args[0].special()->hasNonArgEffects(inst);
+ }
+
+ static bool hasNonArgNonControlEffects(Inst& inst)
+ {
+ return inst.args[0].special()->hasNonArgNonControlEffects(inst);
+ }
+
+ static CCallHelpers::Jump generate(
+ Inst& inst, CCallHelpers& jit, GenerationContext& context)
+ {
+ return inst.args[0].special()->generate(inst, jit, context);
+ }
+};
+
+template<typename Subtype>
+struct CommonCustomBase {
+ static bool hasNonArgEffects(Inst& inst)
+ {
+ return Subtype::isTerminal(inst) || Subtype::hasNonArgNonControlEffects(inst);
+ }
+};
+
+// Definition of CCall instruction. CCall is used for hot path C function calls. It's lowered to a
+// Patch with an Air CCallSpecial along with code to marshal instructions. The lowering happens
+// before register allocation, so that the register allocator sees the clobbers.
+struct CCallCustom : public CommonCustomBase<CCallCustom> {
+ template<typename Functor>
+ static void forEachArg(Inst& inst, const Functor& functor)
+ {
+ Value* value = inst.origin;
+
+ unsigned index = 0;
+
+ functor(inst.args[index++], Arg::Use, Arg::GP, Arg::pointerWidth()); // callee
+
+ if (value->type() != Void) {
+ functor(
+ inst.args[index++], Arg::Def,
+ Arg::typeForB3Type(value->type()),
+ Arg::widthForB3Type(value->type()));
+ }
+
+ for (unsigned i = 1; i < value->numChildren(); ++i) {
+ Value* child = value->child(i);
+ functor(
+ inst.args[index++], Arg::Use,
+ Arg::typeForB3Type(child->type()),
+ Arg::widthForB3Type(child->type()));
+ }
+ }
+
+ template<typename... Arguments>
+ static bool isValidFormStatic(Arguments...)
+ {
+ return false;
+ }
+
+ static bool isValidForm(Inst&);
+
+ static bool admitsStack(Inst&, unsigned)
+ {
+ return true;
+ }
+
+ static bool isTerminal(Inst&)
+ {
+ return false;
+ }
+
+ static bool hasNonArgNonControlEffects(Inst&)
+ {
+ return true;
+ }
+
+ // This just crashes, since we expect C calls to be lowered before generation.
+ static CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&);
+};
+
+struct ColdCCallCustom : CCallCustom {
+ template<typename Functor>
+ static void forEachArg(Inst& inst, const Functor& functor)
+ {
+ // This is just like a call, but uses become cold.
+ CCallCustom::forEachArg(
+ inst,
+ [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+ functor(arg, Arg::cooled(role), type, width);
+ });
+ }
+};
+
+struct ShuffleCustom : public CommonCustomBase<ShuffleCustom> {
+ template<typename Functor>
+ static void forEachArg(Inst& inst, const Functor& functor)
+ {
+ unsigned limit = inst.args.size() / 3 * 3;
+ for (unsigned i = 0; i < limit; i += 3) {
+ Arg& src = inst.args[i + 0];
+ Arg& dst = inst.args[i + 1];
+ Arg& widthArg = inst.args[i + 2];
+ Arg::Width width = widthArg.width();
+ Arg::Type type = src.isGP() && dst.isGP() ? Arg::GP : Arg::FP;
+ functor(src, Arg::Use, type, width);
+ functor(dst, Arg::Def, type, width);
+ functor(widthArg, Arg::Use, Arg::GP, Arg::Width8);
+ }
+ }
+
+ template<typename... Arguments>
+ static bool isValidFormStatic(Arguments...)
+ {
+ return false;
+ }
+
+ static bool isValidForm(Inst&);
+
+ static bool admitsStack(Inst&, unsigned index)
+ {
+ switch (index % 3) {
+ case 0:
+ case 1:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool isTerminal(Inst&)
+ {
+ return false;
+ }
+
+ static bool hasNonArgNonControlEffects(Inst&)
+ {
+ return false;
+ }
+
+ static CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&);
+};
+
+struct EntrySwitchCustom : public CommonCustomBase<EntrySwitchCustom> {
+ template<typename Func>
+ static void forEachArg(Inst&, const Func&)
+ {
+ }
+
+ template<typename... Arguments>
+ static bool isValidFormStatic(Arguments...)
+ {
+ return !sizeof...(Arguments);
+ }
+
+ static bool isValidForm(Inst& inst)
+ {
+ return inst.args.isEmpty();
+ }
+
+ static bool admitsStack(Inst&, unsigned)
+ {
+ return false;
+ }
+
+ static bool isTerminal(Inst&)
+ {
+ return true;
+ }
+
+ static bool hasNonArgNonControlEffects(Inst&)
+ {
+ return false;
+ }
+
+ static CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&)
+ {
+ // This should never be reached because we should have lowered EntrySwitch before
+ // generation.
+ UNREACHABLE_FOR_PLATFORM();
+ return CCallHelpers::Jump();
+ }
+};
+
+struct WasmBoundsCheckCustom : public CommonCustomBase<WasmBoundsCheckCustom> {
+ template<typename Func>
+ static void forEachArg(Inst& inst, const Func& functor)
+ {
+ functor(inst.args[0], Arg::Use, Arg::GP, Arg::Width64);
+ functor(inst.args[1], Arg::Use, Arg::GP, Arg::Width64);
+ }
+
+ template<typename... Arguments>
+ static bool isValidFormStatic(Arguments...)
+ {
+ return false;
+ }
+
+ static bool isValidForm(Inst&);
+
+ static bool admitsStack(Inst&, unsigned)
+ {
+ return false;
+ }
+
+ static bool isTerminal(Inst&)
+ {
+ return false;
+ }
+
+ static bool hasNonArgNonControlEffects(Inst&)
+ {
+ return true;
+ }
+
+ static CCallHelpers::Jump generate(Inst& inst, CCallHelpers& jit, GenerationContext& context)
+ {
+ WasmBoundsCheckValue* value = inst.origin->as<WasmBoundsCheckValue>();
+ CCallHelpers::Jump outOfBounds = Inst(Air::Branch64, value, Arg::relCond(CCallHelpers::AboveOrEqual), inst.args[0], inst.args[1]).generate(jit, context);
+
+ context.latePaths.append(createSharedTask<GenerationContext::LatePathFunction>(
+ [outOfBounds, value] (CCallHelpers& jit, Air::GenerationContext& context) {
+ outOfBounds.link(&jit);
+ context.code->wasmBoundsCheckGenerator()->run(jit, value->pinnedGPR(), value->offset());
+ }));
+
+ // We said we were not a terminal.
+ return CCallHelpers::Jump();
+ }
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirDumpAsJS.cpp b/Source/JavaScriptCore/b3/air/AirDumpAsJS.cpp
new file mode 100644
index 000000000..3d8d6fb41
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirDumpAsJS.cpp
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirDumpAsJS.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+CString varNameForBlockAtIndex(unsigned index)
+{
+ return toCString("bb", index);
+}
+
+CString varName(BasicBlock* block)
+{
+ return varNameForBlockAtIndex(block->index());
+}
+
+CString varNameForStackSlotAtIndex(unsigned index)
+{
+ return toCString("slot", index);
+}
+
+CString varName(StackSlot* slot)
+{
+ return varNameForStackSlotAtIndex(slot->index());
+}
+
+CString varName(Reg reg)
+{
+ return toCString("Reg.", reg.debugName());
+}
+
+CString varNameForTmpWithTypeAndIndex(Arg::Type type, unsigned index)
+{
+ return toCString(type == Arg::FP ? "f" : "", "tmp", index);
+}
+
+CString varName(Tmp tmp)
+{
+ if (tmp.isReg())
+ return varName(tmp.reg());
+ return varNameForTmpWithTypeAndIndex(Arg(tmp).type(), tmp.tmpIndex());
+}
+
+} // anonymous namespace
+
+void dumpAsJS(Code& code, PrintStream& out)
+{
+ out.println("let code = new Code();");
+
+ for (unsigned i = 0; i < code.size(); ++i)
+ out.println("let ", varNameForBlockAtIndex(i), " = code.addBlock();");
+
+ out.println("let hash;");
+
+ for (unsigned i = 0; i < code.stackSlots().size(); ++i) {
+ StackSlot* slot = code.stackSlots()[i];
+ if (slot) {
+ out.println("let ", varName(slot), " = code.addStackSlot(", slot->byteSize(), ", ", slot->kind(), ");");
+ if (slot->offsetFromFP())
+ out.println(varName(slot), ".setOffsetFromFP(", slot->offsetFromFP(), ");");
+ out.println("hash = ", varName(slot), ".hash();");
+ out.println("if (hash != ", slot->jsHash(), ")");
+ out.println(" throw new Error(\"Bad hash: \" + hash);");
+ } else
+ out.println("code.addStackSlot(1, Spill);");
+ }
+
+ Arg::forEachType(
+ [&] (Arg::Type type) {
+ for (unsigned i = code.numTmps(type); i--;) {
+ out.println(
+ "let ", varNameForTmpWithTypeAndIndex(type, i), " = code.newTmp(", type, ");");
+ }
+ });
+
+ out.println("let inst;");
+ out.println("let arg;");
+
+ for (BasicBlock* block : code) {
+ for (FrequentedBlock successor : block->successors()) {
+ out.println(
+ varName(block), ".successors.push(new FrequentedBlock(",
+ varName(successor.block()), ", ", successor.frequency(), "));");
+ }
+
+ for (BasicBlock* predecessor : block->predecessors())
+ out.println(varName(block), ".predecessors.push(", varName(predecessor), ");");
+
+ for (Inst& inst : *block) {
+ // FIXME: This should do something for flags.
+ // https://bugs.webkit.org/show_bug.cgi?id=162751
+ out.println("inst = new Inst(", inst.kind.opcode, ");");
+
+ inst.forEachArg(
+ [&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+ switch (arg.kind()) {
+ case Arg::Invalid:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+
+ case Arg::Tmp:
+ out.println("arg = Arg.createTmp(", varName(arg.tmp()), ");");
+ break;
+
+ case Arg::Imm:
+ out.println("arg = Arg.createImm(", arg.value(), ");");
+ break;
+
+ case Arg::BigImm:
+ out.println(
+ "arg = Arg.createBigImm(",
+ static_cast<int32_t>(arg.value()), ", ",
+ static_cast<int32_t>(arg.value() >> 32), ");");
+ break;
+
+ case Arg::BitImm:
+ out.println("arg = Arg.createBitImm(", arg.value(), ");");
+ break;
+
+ case Arg::BitImm64:
+ out.println(
+ "arg = Arg.createBitImm64(",
+ static_cast<int32_t>(arg.value()), ", ",
+ static_cast<int32_t>(arg.value() >> 32), ");");
+ break;
+
+ case Arg::Addr:
+ out.println(
+ "arg = Arg.createAddr(", varName(arg.base()), ", ", arg.offset(), ");");
+ break;
+
+ case Arg::Stack:
+ out.println(
+ "arg = Arg.createStack(", varName(arg.stackSlot()), ", ", arg.offset(), ");");
+ break;
+
+ case Arg::CallArg:
+ out.println("arg = Arg.createCallArg(", arg.offset(), ");");
+ break;
+
+ case Arg::Index:
+ out.println(
+ "arg = Arg.createIndex(", varName(arg.base()), ", ",
+ varName(arg.index()), ", ", arg.scale(), ", ", arg.offset(), ");");
+ break;
+
+ case Arg::RelCond:
+ out.println("arg = Arg.createRelCond(", arg.asRelationalCondition(), ");");
+ break;
+
+ case Arg::ResCond:
+ out.println("arg = Arg.createResCond(", arg.asResultCondition(), ");");
+ break;
+
+ case Arg::DoubleCond:
+ out.println("arg = Arg.createDoubleCond(", arg.asDoubleCondition(), ");");
+ break;
+
+ case Arg::Special:
+ out.println("arg = Arg.createSpecial();");
+ break;
+
+ case Arg::WidthArg:
+ out.println("arg = Arg.createWidthArg(", arg.width(), ");");
+ break;
+ }
+
+ out.println("inst.args.push(arg);");
+ });
+
+ if (inst.kind.opcode == Patch) {
+ if (inst.hasNonArgEffects())
+ out.println("inst.patchHasNonArgEffects = true;");
+
+ out.println("inst.extraEarlyClobberedRegs = new Set();");
+ out.println("inst.extraClobberedRegs = new Set();");
+ inst.extraEarlyClobberedRegs().forEach(
+ [&] (Reg reg) {
+ out.println("inst.extraEarlyClobberedRegs.add(", varName(reg), ");");
+ });
+ inst.extraClobberedRegs().forEach(
+ [&] (Reg reg) {
+ out.println("inst.extraClobberedRegs.add(", varName(reg), ");");
+ });
+
+ out.println("inst.patchArgData = [];");
+ inst.forEachArg(
+ [&] (Arg&, Arg::Role role, Arg::Type type, Arg::Width width) {
+ out.println(
+ "inst.patchArgData.push({role: Arg.", role, ", type: ", type,
+ ", width: ", width, "});");
+ });
+ }
+
+ if (inst.kind.opcode == CCall || inst.kind.opcode == ColdCCall) {
+ out.println("inst.cCallType = ", inst.origin->type());
+ out.println("inst.cCallArgTypes = [];");
+ for (unsigned i = 1; i < inst.origin->numChildren(); ++i)
+ out.println("inst.cCallArgTypes.push(", inst.origin->child(i)->type(), ");");
+ }
+
+ out.println("hash = inst.hash();");
+ out.println("if (hash != ", inst.jsHash(), ")");
+ out.println(" throw new Error(\"Bad hash: \" + hash);");
+
+ out.println(varName(block), ".append(inst);");
+ }
+ }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirDumpAsJS.h b/Source/JavaScriptCore/b3/air/AirDumpAsJS.h
new file mode 100644
index 000000000..8895f5801
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirDumpAsJS.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is used for benchmarking. Various operations on Air are interesting from a benchmarking
+// standpoint. We can write some Air phases in JS and then use that to benchmark JS. The benchmark
+// is called JSAir, and it's in PerformanceTests/JSAir.
+void dumpAsJS(Code&, PrintStream&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.cpp b/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.cpp
new file mode 100644
index 000000000..ca36af93e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirEliminateDeadCode.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include <wtf/IndexSet.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool eliminateDeadCode(Code& code)
+{
+ PhaseScope phaseScope(code, "eliminateDeadCode");
+
+ HashSet<Tmp> liveTmps;
+ IndexSet<StackSlot> liveStackSlots;
+ bool changed;
+
+ auto isArgLive = [&] (const Arg& arg) -> bool {
+ switch (arg.kind()) {
+ case Arg::Tmp:
+ if (arg.isReg())
+ return true;
+ return liveTmps.contains(arg.tmp());
+ case Arg::Stack:
+ if (arg.stackSlot()->isLocked())
+ return true;
+ return liveStackSlots.contains(arg.stackSlot());
+ default:
+ return true;
+ }
+ };
+
+ auto addLiveArg = [&] (const Arg& arg) -> bool {
+ switch (arg.kind()) {
+ case Arg::Tmp:
+ if (arg.isReg())
+ return false;
+ return liveTmps.add(arg.tmp()).isNewEntry;
+ case Arg::Stack:
+ if (arg.stackSlot()->isLocked())
+ return false;
+ return liveStackSlots.add(arg.stackSlot());
+ default:
+ return false;
+ }
+ };
+
+ auto isInstLive = [&] (Inst& inst) -> bool {
+ if (inst.hasNonArgEffects())
+ return true;
+
+ // This instruction should be presumed dead, if its Args are all dead.
+ bool storesToLive = false;
+ inst.forEachArg(
+ [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
+ if (!Arg::isAnyDef(role))
+ return;
+ if (role == Arg::Scratch)
+ return;
+ storesToLive |= isArgLive(arg);
+ });
+ return storesToLive;
+ };
+
+ auto handleInst = [&] (Inst& inst) {
+ if (!isInstLive(inst))
+ return;
+
+ // We get here if the Inst is live. For simplicity we say that a live instruction forces
+ // liveness upon everything it mentions.
+ for (Arg& arg : inst.args) {
+ changed |= addLiveArg(arg);
+ arg.forEachTmpFast(
+ [&] (Tmp& tmp) {
+ changed |= addLiveArg(tmp);
+ });
+ }
+ };
+
+ auto runForward = [&] () -> bool {
+ changed = false;
+ for (BasicBlock* block : code) {
+ for (Inst& inst : *block)
+ handleInst(inst);
+ }
+ return changed;
+ };
+
+ auto runBackward = [&] () -> bool {
+ changed = false;
+ for (unsigned blockIndex = code.size(); blockIndex--;) {
+ BasicBlock* block = code[blockIndex];
+ for (unsigned instIndex = block->size(); instIndex--;)
+ handleInst(block->at(instIndex));
+ }
+ return changed;
+ };
+
+ for (;;) {
+ // Propagating backward is most likely to be profitable.
+ if (!runBackward())
+ break;
+ if (!runBackward())
+ break;
+
+ // Occasionally propagating forward greatly reduces the likelihood of pathologies.
+ if (!runForward())
+ break;
+ }
+
+ unsigned removedInstCount = 0;
+ for (BasicBlock* block : code) {
+ removedInstCount += block->insts().removeAllMatching(
+ [&] (Inst& inst) -> bool {
+ return !isInstLive(inst);
+ });
+ }
+
+ return !!removedInstCount;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.h b/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.h
new file mode 100644
index 000000000..1b718f63d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirEliminateDeadCode.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This eliminates instructions that have no observable effect. These are instructions whose only
+// effect would be storing to some Arg, except that we proved that the location specified by the Arg
+// is never loaded from. The only Args for which we can do such analysis are non-Reg Tmps and
+// anonymous StackSlots.
+
+bool eliminateDeadCode(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirEmitShuffle.cpp b/Source/JavaScriptCore/b3/air/AirEmitShuffle.cpp
new file mode 100644
index 000000000..318471976
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirEmitShuffle.cpp
@@ -0,0 +1,543 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirEmitShuffle.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include <wtf/GraphNodeWorklist.h>
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool verbose = false;
+
+template<typename Functor>
+Tmp findPossibleScratch(Code& code, Arg::Type type, const Functor& functor) {
+ for (Reg reg : code.regsInPriorityOrder(type)) {
+ Tmp tmp(reg);
+ if (functor(tmp))
+ return tmp;
+ }
+ return Tmp();
+}
+
+Tmp findPossibleScratch(Code& code, Arg::Type type, const Arg& arg1, const Arg& arg2) {
+ return findPossibleScratch(
+ code, type,
+ [&] (Tmp tmp) -> bool {
+ return !arg1.usesTmp(tmp) && !arg2.usesTmp(tmp);
+ });
+}
+
+// Example: (a => b, b => a, a => c, b => d)
+struct Rotate {
+ Vector<ShufflePair> loop; // in the example, this is the loop: (a => b, b => a)
+ Vector<ShufflePair> fringe; // in the example, these are the associated shifts: (a => c, b => d)
+};
+
+} // anonymous namespace
+
+void ShufflePair::dump(PrintStream& out) const
+{
+ out.print(width(), ":", src(), "=>", dst());
+}
+
+Inst createShuffle(Value* origin, const Vector<ShufflePair>& pairs)
+{
+ Inst result(Shuffle, origin);
+ for (const ShufflePair& pair : pairs)
+ result.append(pair.src(), pair.dst(), Arg::widthArg(pair.width()));
+ return result;
+}
+
+Vector<Inst> emitShuffle(
+ Code& code, Vector<ShufflePair> pairs, std::array<Arg, 2> scratches, Arg::Type type,
+ Value* origin)
+{
+ if (verbose) {
+ dataLog(
+ "Dealing with pairs: ", listDump(pairs), " and scratches ", scratches[0], ", ",
+ scratches[1], "\n");
+ }
+
+ pairs.removeAllMatching(
+ [&] (const ShufflePair& pair) -> bool {
+ return pair.src() == pair.dst();
+ });
+
+ // First validate that this is the kind of shuffle that we know how to deal with.
+#if !ASSERT_DISABLED
+ for (const ShufflePair& pair : pairs) {
+ ASSERT(pair.src().isType(type));
+ ASSERT(pair.dst().isType(type));
+ ASSERT(pair.dst().isTmp() || pair.dst().isMemory());
+ }
+#endif // !ASSERT_DISABLED
+
+ // There are two possible kinds of operations that we will do:
+ //
+ // - Shift. Example: (a => b, b => c). We emit this as "Move b, c; Move a, b". This only requires
+ // scratch registers if there are memory->memory moves. We want to find as many of these as
+ // possible because they are cheaper. Note that shifts can involve the same source mentioned
+ // multiple times. Example: (a => b, a => c, b => d, b => e).
+ //
+ // - Rotate. Example: (a => b, b => a). We want to emit this as "Swap a, b", but that instruction
+ // may not be available, in which case we may need a scratch register or a scratch memory
+ // location. A gnarlier example is (a => b, b => c, c => a). We can emit this as "Swap b, c;
+ // Swap a, b". Note that swapping has to be careful about differing widths.
+ //
+ // Note that a rotate can have "fringe". For example, we might have (a => b, b => a, a =>c,
+ // b => d). This has a rotate loop (a => b, b => a) and some fringe (a => c, b => d). We treat
+ // the whole thing as a single rotate.
+ //
+ // We will find multiple disjoint such operations. We can execute them in any order.
+
+ // We interpret these as Moves that should be executed backwards. All shifts are keyed by their
+ // starting source.
+ HashMap<Arg, Vector<ShufflePair>> shifts;
+
+ // We interpret these as Swaps over src()'s that should be executed backwards, i.e. for a list
+ // of size 3 we would do "Swap list[1].src(), list[2].src(); Swap list[0].src(), list[1].src()".
+ // Note that we actually can't do that if the widths don't match or other bad things happen.
+ // But, prior to executing all of that, we need to execute the fringe: the shifts comming off the
+ // rotate.
+ Vector<Rotate> rotates;
+
+ {
+ HashMap<Arg, Vector<ShufflePair>> mapping;
+ for (const ShufflePair& pair : pairs)
+ mapping.add(pair.src(), Vector<ShufflePair>()).iterator->value.append(pair);
+
+ Vector<ShufflePair> currentPairs;
+
+ while (!mapping.isEmpty()) {
+ ASSERT(currentPairs.isEmpty());
+ Arg originalSrc = mapping.begin()->key;
+ ASSERT(!shifts.contains(originalSrc));
+ if (verbose)
+ dataLog("Processing from ", originalSrc, "\n");
+
+ GraphNodeWorklist<Arg> worklist;
+ worklist.push(originalSrc);
+ while (Arg src = worklist.pop()) {
+ HashMap<Arg, Vector<ShufflePair>>::iterator iter = mapping.find(src);
+ if (iter == mapping.end()) {
+ // With a shift it's possible that we previously built the tail of this shift.
+ // See if that's the case now.
+ if (verbose)
+ dataLog("Trying to append shift at ", src, "\n");
+ currentPairs.appendVector(shifts.take(src));
+ continue;
+ }
+ Vector<ShufflePair> pairs = WTFMove(iter->value);
+ mapping.remove(iter);
+
+ for (const ShufflePair& pair : pairs) {
+ currentPairs.append(pair);
+ ASSERT(pair.src() == src);
+ worklist.push(pair.dst());
+ }
+ }
+
+ ASSERT(currentPairs.size());
+ ASSERT(currentPairs[0].src() == originalSrc);
+
+ if (verbose)
+ dataLog("currentPairs = ", listDump(currentPairs), "\n");
+
+ bool isRotate = false;
+ for (const ShufflePair& pair : currentPairs) {
+ if (pair.dst() == originalSrc) {
+ isRotate = true;
+ break;
+ }
+ }
+
+ if (isRotate) {
+ if (verbose)
+ dataLog("It's a rotate.\n");
+ Rotate rotate;
+
+ // The common case is that the rotate does not have fringe. The only way to
+ // check for this is to examine the whole rotate.
+ bool ok;
+ if (currentPairs.last().dst() == originalSrc) {
+ ok = true;
+ for (unsigned i = currentPairs.size() - 1; i--;)
+ ok &= currentPairs[i].dst() == currentPairs[i + 1].src();
+ } else
+ ok = false;
+
+ if (ok)
+ rotate.loop = WTFMove(currentPairs);
+ else {
+ // This is the slow path. The rotate has fringe.
+
+ HashMap<Arg, ShufflePair> dstMapping;
+ for (const ShufflePair& pair : currentPairs)
+ dstMapping.add(pair.dst(), pair);
+
+ ShufflePair pair = dstMapping.take(originalSrc);
+ for (;;) {
+ rotate.loop.append(pair);
+
+ auto iter = dstMapping.find(pair.src());
+ if (iter == dstMapping.end())
+ break;
+ pair = iter->value;
+ dstMapping.remove(iter);
+ }
+
+ rotate.loop.reverse();
+
+ // Make sure that the fringe appears in the same order as how it appeared in the
+ // currentPairs, since that's the DFS order.
+ for (const ShufflePair& pair : currentPairs) {
+ // But of course we only include it if it's not in the loop.
+ if (dstMapping.contains(pair.dst()))
+ rotate.fringe.append(pair);
+ }
+ }
+
+ // If the graph search terminates because we returned to the first source, then the
+ // pair list has to have a very particular shape.
+ for (unsigned i = rotate.loop.size() - 1; i--;)
+ ASSERT(rotate.loop[i].dst() == rotate.loop[i + 1].src());
+ rotates.append(WTFMove(rotate));
+ currentPairs.resize(0);
+ } else {
+ if (verbose)
+ dataLog("It's a shift.\n");
+ shifts.add(originalSrc, WTFMove(currentPairs));
+ }
+ }
+ }
+
+ if (verbose) {
+ dataLog("Shifts:\n");
+ for (auto& entry : shifts)
+ dataLog(" ", entry.key, ": ", listDump(entry.value), "\n");
+ dataLog("Rotates:\n");
+ for (auto& rotate : rotates)
+ dataLog(" loop = ", listDump(rotate.loop), ", fringe = ", listDump(rotate.fringe), "\n");
+ }
+
+ // In the worst case, we need two scratch registers. The way we do this is that the client passes
+ // us what scratch registers he happens to have laying around. We will need scratch registers in
+ // the following cases:
+ //
+ // - Shuffle pairs where both src and dst refer to memory.
+ // - Rotate when no Swap instruction is available.
+ //
+ // Lucky for us, we are guaranteed to have extra scratch registers anytime we have a Shift that
+ // ends with a register. We search for such a register right now.
+
+ auto moveForWidth = [&] (Arg::Width width) -> Opcode {
+ switch (width) {
+ case Arg::Width32:
+ return type == Arg::GP ? Move32 : MoveFloat;
+ case Arg::Width64:
+ return type == Arg::GP ? Move : MoveDouble;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ };
+
+ Opcode conservativeMove = moveForWidth(Arg::conservativeWidth(type));
+
+ // We will emit things in reverse. We maintain a list of packs of instructions, and then we emit
+ // append them together in reverse (for example the thing at the end of resultPacks is placed
+ // first). This is useful because the last thing we emit frees up its destination registers, so
+ // it affects how we emit things before it.
+ Vector<Vector<Inst>> resultPacks;
+ Vector<Inst> result;
+
+ auto commitResult = [&] () {
+ resultPacks.append(WTFMove(result));
+ };
+
+ auto getScratch = [&] (unsigned index, Tmp possibleScratch) -> Tmp {
+ if (scratches[index].isTmp())
+ return scratches[index].tmp();
+
+ if (!possibleScratch)
+ return Tmp();
+ result.append(Inst(conservativeMove, origin, possibleScratch, scratches[index]));
+ return possibleScratch;
+ };
+
+ auto returnScratch = [&] (unsigned index, Tmp tmp) {
+ if (Arg(tmp) != scratches[index])
+ result.append(Inst(conservativeMove, origin, scratches[index], tmp));
+ };
+
+ auto handleShiftPair = [&] (const ShufflePair& pair, unsigned scratchIndex) {
+ Opcode move = moveForWidth(pair.width());
+
+ if (!isValidForm(move, pair.src().kind(), pair.dst().kind())) {
+ Tmp scratch =
+ getScratch(scratchIndex, findPossibleScratch(code, type, pair.src(), pair.dst()));
+ RELEASE_ASSERT(scratch);
+ if (isValidForm(move, pair.src().kind(), Arg::Tmp))
+ result.append(Inst(moveForWidth(pair.width()), origin, pair.src(), scratch));
+ else {
+ ASSERT(pair.src().isSomeImm());
+ ASSERT(move == Move32);
+ result.append(Inst(Move, origin, Arg::bigImm(pair.src().value()), scratch));
+ }
+ result.append(Inst(moveForWidth(pair.width()), origin, scratch, pair.dst()));
+ returnScratch(scratchIndex, scratch);
+ return;
+ }
+
+ result.append(Inst(move, origin, pair.src(), pair.dst()));
+ };
+
+ auto handleShift = [&] (Vector<ShufflePair>& shift) {
+ // FIXME: We could optimize the spill behavior of the shifter by checking if any of the
+ // shifts need spills. If they do, then we could try to get a register out here. Note that
+ // this may fail where the current strategy succeeds: out here we need a register that does
+ // not interfere with any of the shifts, while the current strategy only needs to find a
+ // scratch register that does not interfer with a particular shift. So, this optimization
+ // will be opportunistic: if it succeeds, then the individual shifts can use that scratch,
+ // otherwise they will do what they do now.
+
+ for (unsigned i = shift.size(); i--;)
+ handleShiftPair(shift[i], 0);
+
+ Arg lastDst = shift.last().dst();
+ if (lastDst.isTmp()) {
+ for (Arg& scratch : scratches) {
+ ASSERT(scratch != lastDst);
+ if (!scratch.isTmp()) {
+ scratch = lastDst;
+ break;
+ }
+ }
+ }
+ };
+
+ // First handle shifts whose last destination is a tmp because these free up scratch registers.
+ // These end up last in the final sequence, so the final destination of these shifts will be
+ // available as a scratch location for anything emitted prior (so, after, since we're emitting in
+ // reverse).
+ for (auto& entry : shifts) {
+ Vector<ShufflePair>& shift = entry.value;
+ if (shift.last().dst().isTmp())
+ handleShift(shift);
+ commitResult();
+ }
+
+ // Now handle the rest of the shifts.
+ for (auto& entry : shifts) {
+ Vector<ShufflePair>& shift = entry.value;
+ if (!shift.last().dst().isTmp())
+ handleShift(shift);
+ commitResult();
+ }
+
+ for (Rotate& rotate : rotates) {
+ if (!rotate.fringe.isEmpty()) {
+ // Make sure we do the fringe first! This won't clobber any of the registers that are
+ // part of the rotation.
+ handleShift(rotate.fringe);
+ }
+
+ bool canSwap = false;
+ Opcode swap = Oops;
+ Arg::Width swapWidth = Arg::Width8; // bogus value
+
+ // Currently, the swap instruction is not available for floating point on any architecture we
+ // support.
+ if (type == Arg::GP) {
+ // Figure out whether we will be doing 64-bit swaps or 32-bit swaps. If we have a mix of
+ // widths we handle that by fixing up the relevant register with zero-extends.
+ swap = Swap32;
+ swapWidth = Arg::Width32;
+ bool hasMemory = false;
+ bool hasIndex = false;
+ for (ShufflePair& pair : rotate.loop) {
+ switch (pair.width()) {
+ case Arg::Width32:
+ break;
+ case Arg::Width64:
+ swap = Swap64;
+ swapWidth = Arg::Width64;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ hasMemory |= pair.src().isMemory() || pair.dst().isMemory();
+ hasIndex |= pair.src().isIndex() || pair.dst().isIndex();
+ }
+
+ canSwap = isValidForm(swap, Arg::Tmp, Arg::Tmp);
+
+ // We can totally use swaps even if there are shuffles involving memory. But, we play it
+ // safe in that case. There are corner cases we don't handle, and our ability to do it is
+ // contingent upon swap form availability.
+
+ if (hasMemory) {
+ canSwap &= isValidForm(swap, Arg::Tmp, Arg::Addr);
+
+ // We don't take the swapping path if there is a mix of widths and some of the
+ // shuffles involve memory. That gets too confusing. We might be able to relax this
+ // to only bail if there are subwidth pairs involving memory, but I haven't thought
+ // about it very hard. Anyway, this case is not common: rotates involving memory
+ // don't arise for function calls, and they will only happen for rotates in user code
+ // if some of the variables get spilled. It's hard to imagine a program that rotates
+ // data around in variables while also doing a combination of uint32->uint64 and
+ // int64->int32 casts.
+ for (ShufflePair& pair : rotate.loop)
+ canSwap &= pair.width() == swapWidth;
+ }
+
+ if (hasIndex)
+ canSwap &= isValidForm(swap, Arg::Tmp, Arg::Index);
+ }
+
+ if (canSwap) {
+ for (unsigned i = rotate.loop.size() - 1; i--;) {
+ Arg left = rotate.loop[i].src();
+ Arg right = rotate.loop[i + 1].src();
+
+ if (left.isMemory() && right.isMemory()) {
+ // Note that this is a super rare outcome. Rotates are rare. Spills are rare.
+ // Moving data between two spills is rare. To get here a lot of rare stuff has to
+ // all happen at once.
+
+ Tmp scratch = getScratch(0, findPossibleScratch(code, type, left, right));
+ RELEASE_ASSERT(scratch);
+ result.append(Inst(moveForWidth(swapWidth), origin, left, scratch));
+ result.append(Inst(swap, origin, scratch, right));
+ result.append(Inst(moveForWidth(swapWidth), origin, scratch, left));
+ returnScratch(0, scratch);
+ continue;
+ }
+
+ if (left.isMemory())
+ std::swap(left, right);
+
+ result.append(Inst(swap, origin, left, right));
+ }
+
+ for (ShufflePair pair : rotate.loop) {
+ if (pair.width() == swapWidth)
+ continue;
+
+ RELEASE_ASSERT(pair.width() == Arg::Width32);
+ RELEASE_ASSERT(swapWidth == Arg::Width64);
+ RELEASE_ASSERT(pair.dst().isTmp());
+
+ // Need to do an extra zero extension.
+ result.append(Inst(Move32, origin, pair.dst(), pair.dst()));
+ }
+ } else {
+ // We can treat this as a shift so long as we take the last destination (i.e. first
+ // source) and save it first. Then we handle the first entry in the pair in the rotate
+ // specially, after we restore the last destination. This requires some special care to
+ // find a scratch register. It's possible that we have a rotate that uses the entire
+ // available register file.
+
+ Tmp scratch = findPossibleScratch(
+ code, type,
+ [&] (Tmp tmp) -> bool {
+ for (ShufflePair pair : rotate.loop) {
+ if (pair.src().usesTmp(tmp))
+ return false;
+ if (pair.dst().usesTmp(tmp))
+ return false;
+ }
+ return true;
+ });
+
+ // NOTE: This is the most likely use of scratch registers.
+ scratch = getScratch(0, scratch);
+
+ // We may not have found a scratch register. When this happens, we can just use the spill
+ // slot directly.
+ Arg rotateSave = scratch ? Arg(scratch) : scratches[0];
+
+ handleShiftPair(
+ ShufflePair(rotate.loop.last().dst(), rotateSave, rotate.loop[0].width()), 1);
+
+ for (unsigned i = rotate.loop.size(); i-- > 1;)
+ handleShiftPair(rotate.loop[i], 1);
+
+ handleShiftPair(
+ ShufflePair(rotateSave, rotate.loop[0].dst(), rotate.loop[0].width()), 1);
+
+ if (scratch)
+ returnScratch(0, scratch);
+ }
+
+ commitResult();
+ }
+
+ ASSERT(result.isEmpty());
+
+ for (unsigned i = resultPacks.size(); i--;)
+ result.appendVector(resultPacks[i]);
+
+ return result;
+}
+
+Vector<Inst> emitShuffle(
+ Code& code, const Vector<ShufflePair>& pairs,
+ const std::array<Arg, 2>& gpScratch, const std::array<Arg, 2>& fpScratch,
+ Value* origin)
+{
+ Vector<ShufflePair> gpPairs;
+ Vector<ShufflePair> fpPairs;
+ for (const ShufflePair& pair : pairs) {
+ if (pair.src().isMemory() && pair.dst().isMemory() && pair.width() > Arg::pointerWidth()) {
+ // 8-byte memory-to-memory moves on a 32-bit platform are best handled as float moves.
+ fpPairs.append(pair);
+ } else if (pair.src().isGP() && pair.dst().isGP()) {
+ // This means that gpPairs gets memory-to-memory shuffles. The assumption is that we
+ // can do that more efficiently using GPRs, except in the special case above.
+ gpPairs.append(pair);
+ } else
+ fpPairs.append(pair);
+ }
+
+ Vector<Inst> result;
+ result.appendVector(emitShuffle(code, gpPairs, gpScratch, Arg::GP, origin));
+ result.appendVector(emitShuffle(code, fpPairs, fpScratch, Arg::FP, origin));
+ return result;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirEmitShuffle.h b/Source/JavaScriptCore/b3/air/AirEmitShuffle.h
new file mode 100644
index 000000000..b2c3bb0c2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirEmitShuffle.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirInst.h"
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 {
+
+class Value;
+
+namespace Air {
+
+class Code;
+
+class ShufflePair {
+public:
+ ShufflePair()
+ {
+ }
+
+ ShufflePair(const Arg& src, const Arg& dst, Arg::Width width)
+ : m_src(src)
+ , m_dst(dst)
+ , m_width(width)
+ {
+ }
+
+ const Arg& src() const { return m_src; }
+ const Arg& dst() const { return m_dst; }
+
+ // The width determines the kind of move we do. You can only choose Width32 or Width64 right now.
+ // For GP, it picks between Move32 and Move. For FP, it picks between MoveFloat and MoveDouble.
+ Arg::Width width() const { return m_width; }
+
+ void dump(PrintStream&) const;
+
+private:
+ Arg m_src;
+ Arg m_dst;
+ Arg::Width m_width { Arg::Width8 };
+};
+
+// Create a Shuffle instruction.
+Inst createShuffle(Value* origin, const Vector<ShufflePair>&);
+
+// Perform a shuffle of a given type. The scratch argument is mandatory. You should pass it as
+// follows: If you know that you have scratch registers or temporaries available - that is, they're
+// registers that are not mentioned in the shuffle, have the same type as the shuffle, and are not
+// live at the shuffle - then you can pass them. If you don't have scratch registers available or if
+// you don't feel like looking for them, you can pass memory locations. It's always safe to pass a
+// pair of memory locations, and replacing either memory location with a register can be viewed as an
+// optimization. It's a pretty important optimization. Some more notes:
+//
+// - We define scratch registers as things that are not live before the shuffle and are not one of
+// the destinations of the shuffle. Not being live before the shuffle also means that they cannot
+// be used for any of the sources of the shuffle.
+//
+// - A second scratch location is only needed when you have shuffle pairs where memory is used both
+// as source and destination.
+//
+// - You're guaranteed not to need any scratch locations if there is a Swap instruction available for
+// the type and you don't have any memory locations that are both the source and the destination of
+// some pairs. GP supports Swap on x86 while FP never supports Swap.
+//
+// - Passing memory locations as scratch if are running emitShuffle() before register allocation is
+// silly, since that will cause emitShuffle() to pick some specific registers when it does need
+// scratch. One easy way to avoid that predicament is to ensure that you call emitShuffle() after
+// register allocation. For this reason we could add a Shuffle instruction so that we can defer
+// shufflings until after regalloc.
+//
+// - Shuffles with memory=>memory pairs are not very well tuned. You should avoid them if you want
+// performance. If you need to do them, then making sure that you reserve a temporary is one way to
+// get acceptable performance.
+//
+// NOTE: Use this method (and its friend below) to emit shuffles after register allocation. Before
+// register allocation it is much better to simply use the Shuffle instruction.
+Vector<Inst> emitShuffle(
+ Code& code, Vector<ShufflePair>, std::array<Arg, 2> scratch, Arg::Type, Value* origin);
+
+// Perform a shuffle that involves any number of types. Pass scratch registers or memory locations
+// for each type according to the rules above.
+Vector<Inst> emitShuffle(
+ Code& code, const Vector<ShufflePair>&,
+ const std::array<Arg, 2>& gpScratch, const std::array<Arg, 2>& fpScratch,
+ Value* origin);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirFixObviousSpills.cpp b/Source/JavaScriptCore/b3/air/AirFixObviousSpills.cpp
new file mode 100644
index 000000000..d000d6c5d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirFixObviousSpills.cpp
@@ -0,0 +1,569 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirFixObviousSpills.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include <wtf/IndexMap.h>
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool verbose = false;
+
+class FixObviousSpills {
+public:
+ FixObviousSpills(Code& code)
+ : m_code(code)
+ , m_atHead(code.size())
+ {
+ }
+
+ void run()
+ {
+ if (verbose)
+ dataLog("Code before fixObviousSpills:\n", m_code);
+
+ computeAliases();
+ fixCode();
+ }
+
+private:
+ void computeAliases()
+ {
+ m_atHead[m_code[0]].wasVisited = true;
+
+ bool changed = true;
+ while (changed) {
+ changed = false;
+
+ for (BasicBlock* block : m_code) {
+ m_block = block;
+ m_state = m_atHead[block];
+ if (!m_state.wasVisited)
+ continue;
+
+ if (verbose)
+ dataLog("Executing block ", *m_block, ": ", m_state, "\n");
+
+ for (m_instIndex = 0; m_instIndex < block->size(); ++m_instIndex)
+ executeInst();
+
+ for (BasicBlock* successor : block->successorBlocks()) {
+ State& toState = m_atHead[successor];
+ if (toState.wasVisited)
+ changed |= toState.merge(m_state);
+ else {
+ toState = m_state;
+ changed = true;
+ }
+ }
+ }
+ }
+ }
+
+ void fixCode()
+ {
+ for (BasicBlock* block : m_code) {
+ m_block = block;
+ m_state = m_atHead[block];
+ RELEASE_ASSERT(m_state.wasVisited);
+
+ for (m_instIndex = 0; m_instIndex < block->size(); ++m_instIndex) {
+ fixInst();
+ executeInst();
+ }
+ }
+ }
+
+ void executeInst()
+ {
+ Inst& inst = m_block->at(m_instIndex);
+
+ if (verbose)
+ dataLog(" Executing ", inst, ": ", m_state, "\n");
+
+ Inst::forEachDefWithExtraClobberedRegs<Arg>(
+ &inst, &inst,
+ [&] (const Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+ if (verbose)
+ dataLog(" Clobbering ", arg, "\n");
+ m_state.clobber(arg);
+ });
+
+ switch (inst.kind.opcode) {
+ case Move:
+ if (inst.args[0].isSomeImm()) {
+ if (inst.args[1].isReg())
+ m_state.addAlias(RegConst(inst.args[1].reg(), inst.args[0].value()));
+ else if (isSpillSlot(inst.args[1]))
+ m_state.addAlias(SlotConst(inst.args[1].stackSlot(), inst.args[0].value()));
+ } else if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+ if (std::optional<int64_t> constant = m_state.constantFor(inst.args[0]))
+ m_state.addAlias(RegConst(inst.args[1].reg(), *constant));
+ m_state.addAlias(
+ RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::AllBits));
+ } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+ if (std::optional<int64_t> constant = m_state.constantFor(inst.args[0]))
+ m_state.addAlias(SlotConst(inst.args[1].stackSlot(), *constant));
+ m_state.addAlias(
+ RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::AllBits));
+ }
+ break;
+
+ case Move32:
+ if (inst.args[0].isSomeImm()) {
+ if (inst.args[1].isReg())
+ m_state.addAlias(RegConst(inst.args[1].reg(), static_cast<uint32_t>(inst.args[0].value())));
+ else if (isSpillSlot(inst.args[1]))
+ m_state.addAlias(SlotConst(inst.args[1].stackSlot(), static_cast<uint32_t>(inst.args[0].value())));
+ } else if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+ if (std::optional<int64_t> constant = m_state.constantFor(inst.args[0]))
+ m_state.addAlias(RegConst(inst.args[1].reg(), static_cast<uint32_t>(*constant)));
+ m_state.addAlias(
+ RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::ZExt32));
+ } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+ if (std::optional<int64_t> constant = m_state.constantFor(inst.args[0]))
+ m_state.addAlias(SlotConst(inst.args[1].stackSlot(), static_cast<int32_t>(*constant)));
+ m_state.addAlias(
+ RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::Match32));
+ }
+ break;
+
+ case MoveFloat:
+ if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+ m_state.addAlias(
+ RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::Match32));
+ } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+ m_state.addAlias(
+ RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::Match32));
+ }
+ break;
+
+ case MoveDouble:
+ if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+ m_state.addAlias(
+ RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::AllBits));
+ } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+ m_state.addAlias(
+ RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::AllBits));
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ void fixInst()
+ {
+ Inst& inst = m_block->at(m_instIndex);
+
+ if (verbose)
+ dataLog("Fixing inst ", inst, ": ", m_state, "\n");
+
+ // First handle some special instructions.
+ switch (inst.kind.opcode) {
+ case Move: {
+ if (inst.args[0].isBigImm() && inst.args[1].isReg()
+ && isValidForm(Add64, Arg::Imm, Arg::Tmp, Arg::Tmp)) {
+ // BigImm materializations are super expensive on both x86 and ARM. Let's try to
+ // materialize this bad boy using math instead. Note that we use unsigned math here
+ // since it's more deterministic.
+ uint64_t myValue = inst.args[0].value();
+ Reg myDest = inst.args[1].reg();
+ for (const RegConst& regConst : m_state.regConst) {
+ uint64_t otherValue = regConst.constant;
+
+ // Let's try add. That's the only thing that works on all platforms, since it's
+ // the only cheap arithmetic op that x86 does in three operands. Long term, we
+ // should add fancier materializations here for ARM if the BigImm is yuge.
+ uint64_t delta = myValue - otherValue;
+
+ if (Arg::isValidImmForm(delta)) {
+ inst.kind = Add64;
+ inst.args.resize(3);
+ inst.args[0] = Arg::imm(delta);
+ inst.args[1] = Tmp(regConst.reg);
+ inst.args[2] = Tmp(myDest);
+ return;
+ }
+ }
+ return;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // Create a copy in case we invalidate the instruction. That doesn't happen often.
+ Inst instCopy = inst;
+
+ // The goal is to replace references to stack slots. We only care about early uses. We can't
+ // handle UseDefs. We could teach this to handle UseDefs if we inserted a store instruction
+ // after and we proved that the register aliased to the stack slot dies here. We can get that
+ // information from the liveness analysis. We also can't handle late uses, because we don't
+ // look at late clobbers when doing this.
+ bool didThings = false;
+ auto handleArg = [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width width) {
+ if (!isSpillSlot(arg))
+ return;
+ if (!Arg::isEarlyUse(role))
+ return;
+ if (Arg::isAnyDef(role))
+ return;
+
+ // Try to get a register if at all possible.
+ if (const RegSlot* alias = m_state.getRegSlot(arg.stackSlot())) {
+ switch (width) {
+ case Arg::Width64:
+ if (alias->mode != RegSlot::AllBits)
+ return;
+ if (verbose)
+ dataLog(" Replacing ", arg, " with ", alias->reg, "\n");
+ arg = Tmp(alias->reg);
+ didThings = true;
+ return;
+ case Arg::Width32:
+ if (verbose)
+ dataLog(" Replacing ", arg, " with ", alias->reg, " (subwidth case)\n");
+ arg = Tmp(alias->reg);
+ didThings = true;
+ return;
+ default:
+ return;
+ }
+ }
+
+ // Revert to immediate if that didn't work.
+ if (const SlotConst* alias = m_state.getSlotConst(arg.stackSlot())) {
+ if (verbose)
+ dataLog(" Replacing ", arg, " with constant ", alias->constant, "\n");
+ if (Arg::isValidImmForm(alias->constant))
+ arg = Arg::imm(alias->constant);
+ else
+ arg = Arg::bigImm(alias->constant);
+ didThings = true;
+ return;
+ }
+ };
+
+ inst.forEachArg(handleArg);
+ if (!didThings || inst.isValidForm())
+ return;
+
+ // We introduced something invalid along the way. Back up and carefully handle each argument.
+ inst = instCopy;
+ ASSERT(inst.isValidForm());
+ inst.forEachArg(
+ [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+ Arg argCopy = arg;
+ handleArg(arg, role, type, width);
+ if (!inst.isValidForm())
+ arg = argCopy;
+ });
+ }
+
+ static bool isSpillSlot(const Arg& arg)
+ {
+ return arg.isStack() && arg.stackSlot()->isSpill();
+ }
+
+ struct RegConst {
+ RegConst()
+ {
+ }
+
+ RegConst(Reg reg, int64_t constant)
+ : reg(reg)
+ , constant(constant)
+ {
+ }
+
+ explicit operator bool() const
+ {
+ return !!reg;
+ }
+
+ void dump(PrintStream& out) const
+ {
+ out.print(reg, "->", constant);
+ }
+
+ Reg reg;
+ int64_t constant { 0 };
+ };
+
+ struct RegSlot {
+ enum Mode : int8_t {
+ AllBits,
+ ZExt32, // Register contains zero-extended contents of stack slot.
+ Match32 // Low 32 bits of register match low 32 bits of stack slot.
+ };
+
+ RegSlot()
+ {
+ }
+
+ RegSlot(Reg reg, StackSlot* slot, Mode mode)
+ : slot(slot)
+ , reg(reg)
+ , mode(mode)
+ {
+ }
+
+ explicit operator bool() const
+ {
+ return slot && reg;
+ }
+
+ void dump(PrintStream& out) const
+ {
+ out.print(pointerDump(slot), "->", reg);
+ switch (mode) {
+ case AllBits:
+ out.print("(AllBits)");
+ break;
+ case ZExt32:
+ out.print("(ZExt32)");
+ break;
+ case Match32:
+ out.print("(Match32)");
+ break;
+ }
+ }
+
+ StackSlot* slot { nullptr };
+ Reg reg;
+ Mode mode { AllBits };
+ };
+
+ struct SlotConst {
+ SlotConst()
+ {
+ }
+
+ SlotConst(StackSlot* slot, int64_t constant)
+ : slot(slot)
+ , constant(constant)
+ {
+ }
+
+ explicit operator bool() const
+ {
+ return slot;
+ }
+
+ void dump(PrintStream& out) const
+ {
+ out.print(pointerDump(slot), "->", constant);
+ }
+
+ StackSlot* slot { nullptr };
+ int64_t constant { 0 };
+ };
+
+ struct State {
+ void addAlias(const RegConst& newAlias)
+ {
+ regConst.append(newAlias);
+ }
+ void addAlias(const RegSlot& newAlias)
+ {
+ regSlot.append(newAlias);
+ }
+ void addAlias(const SlotConst& newAlias)
+ {
+ slotConst.append(newAlias);
+ }
+
+ const RegConst* getRegConst(Reg reg) const
+ {
+ for (const RegConst& alias : regConst) {
+ if (alias.reg == reg)
+ return &alias;
+ }
+ return nullptr;
+ }
+
+ const RegSlot* getRegSlot(Reg reg) const
+ {
+ for (const RegSlot& alias : regSlot) {
+ if (alias.reg == reg)
+ return &alias;
+ }
+ return nullptr;
+ }
+
+ const RegSlot* getRegSlot(StackSlot* slot) const
+ {
+ for (const RegSlot& alias : regSlot) {
+ if (alias.slot == slot)
+ return &alias;
+ }
+ return nullptr;
+ }
+
+ const RegSlot* getRegSlot(Reg reg, StackSlot* slot) const
+ {
+ for (const RegSlot& alias : regSlot) {
+ if (alias.reg == reg && alias.slot == slot)
+ return &alias;
+ }
+ return nullptr;
+ }
+
+ const SlotConst* getSlotConst(StackSlot* slot) const
+ {
+ for (const SlotConst& alias : slotConst) {
+ if (alias.slot == slot)
+ return &alias;
+ }
+ return nullptr;
+ }
+
+ std::optional<int64_t> constantFor(const Arg& arg)
+ {
+ if (arg.isReg()) {
+ if (const RegConst* alias = getRegConst(arg.reg()))
+ return alias->constant;
+ return std::nullopt;
+ }
+ if (arg.isStack()) {
+ if (const SlotConst* alias = getSlotConst(arg.stackSlot()))
+ return alias->constant;
+ return std::nullopt;
+ }
+ return std::nullopt;
+ }
+
+ void clobber(const Arg& arg)
+ {
+ if (arg.isReg()) {
+ regConst.removeAllMatching(
+ [&] (const RegConst& alias) -> bool {
+ return alias.reg == arg.reg();
+ });
+ regSlot.removeAllMatching(
+ [&] (const RegSlot& alias) -> bool {
+ return alias.reg == arg.reg();
+ });
+ return;
+ }
+ if (arg.isStack()) {
+ slotConst.removeAllMatching(
+ [&] (const SlotConst& alias) -> bool {
+ return alias.slot == arg.stackSlot();
+ });
+ regSlot.removeAllMatching(
+ [&] (const RegSlot& alias) -> bool {
+ return alias.slot == arg.stackSlot();
+ });
+ }
+ }
+
+ bool merge(const State& other)
+ {
+ bool changed = false;
+
+ changed |= !!regConst.removeAllMatching(
+ [&] (RegConst& alias) -> bool {
+ const RegConst* otherAlias = other.getRegConst(alias.reg);
+ if (!otherAlias)
+ return true;
+ if (alias.constant != otherAlias->constant)
+ return true;
+ return false;
+ });
+
+ changed |= !!slotConst.removeAllMatching(
+ [&] (SlotConst& alias) -> bool {
+ const SlotConst* otherAlias = other.getSlotConst(alias.slot);
+ if (!otherAlias)
+ return true;
+ if (alias.constant != otherAlias->constant)
+ return true;
+ return false;
+ });
+
+ changed |= !!regSlot.removeAllMatching(
+ [&] (RegSlot& alias) -> bool {
+ const RegSlot* otherAlias = other.getRegSlot(alias.reg, alias.slot);
+ if (!otherAlias)
+ return true;
+ if (alias.mode != RegSlot::Match32 && alias.mode != otherAlias->mode) {
+ alias.mode = RegSlot::Match32;
+ changed = true;
+ }
+ return false;
+ });
+
+ return changed;
+ }
+
+ void dump(PrintStream& out) const
+ {
+ out.print(
+ "{regConst = [", listDump(regConst), "], slotConst = [", listDump(slotConst),
+ "], regSlot = [", listDump(regSlot), "], wasVisited = ", wasVisited, "}");
+ }
+
+ Vector<RegConst> regConst;
+ Vector<SlotConst> slotConst;
+ Vector<RegSlot> regSlot;
+ bool wasVisited { false };
+ };
+
+ Code& m_code;
+ IndexMap<BasicBlock, State> m_atHead;
+ State m_state;
+ BasicBlock* m_block { nullptr };
+ unsigned m_instIndex { 0 };
+};
+
+} // anonymous namespace
+
+void fixObviousSpills(Code& code)
+{
+ PhaseScope phaseScope(code, "fixObviousSpills");
+
+ FixObviousSpills fixObviousSpills(code);
+ fixObviousSpills.run();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirFixObviousSpills.h b/Source/JavaScriptCore/b3/air/AirFixObviousSpills.h
new file mode 100644
index 000000000..fb8e41fe2
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirFixObviousSpills.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is a forward flow phase that tracks equivalence between spills slots and registers. It
+// removes loads from spill slots in cases when the contents of the spill slot can be found in (or
+// computed from) a register.
+void fixObviousSpills(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.cpp b/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.cpp
new file mode 100644
index 000000000..b3d5d0b71
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirFixPartialRegisterStalls.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInst.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include "MacroAssembler.h"
+#include <wtf/IndexMap.h>
+#include <wtf/IndexSet.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool hasPartialXmmRegUpdate(const Inst& inst)
+{
+ switch (inst.kind.opcode) {
+ case ConvertDoubleToFloat:
+ case ConvertFloatToDouble:
+ case ConvertInt32ToDouble:
+ case ConvertInt64ToDouble:
+ case ConvertInt32ToFloat:
+ case ConvertInt64ToFloat:
+ case SqrtDouble:
+ case SqrtFloat:
+ case CeilDouble:
+ case CeilFloat:
+ case FloorDouble:
+ case FloorFloat:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+bool isDependencyBreaking(const Inst& inst)
+{
+ // "xorps reg, reg" is used by the frontend to remove the dependency on its argument.
+ return inst.kind.opcode == MoveZeroToDouble;
+}
+
+// FIXME: find a good distance per architecture experimentally.
+// LLVM uses a distance of 16 but that comes from Nehalem.
+unsigned char minimumSafeDistance = 16;
+
+struct FPDefDistance {
+ FPDefDistance()
+ {
+ for (unsigned i = 0; i < MacroAssembler::numberOfFPRegisters(); ++i)
+ distance[i] = 255;
+ }
+
+ void reset(FPRReg reg)
+ {
+ unsigned index = MacroAssembler::fpRegisterIndex(reg);
+ distance[index] = 255;
+ }
+
+ void add(FPRReg reg, unsigned registerDistance)
+ {
+ unsigned index = MacroAssembler::fpRegisterIndex(reg);
+ if (registerDistance < distance[index])
+ distance[index] = static_cast<unsigned char>(registerDistance);
+ }
+
+ bool updateFromPrecessor(FPDefDistance& precessorDistance, unsigned constantOffset = 0)
+ {
+ bool changed = false;
+ for (unsigned i = 0; i < MacroAssembler::numberOfFPRegisters(); ++i) {
+ unsigned regDistance = precessorDistance.distance[i] + constantOffset;
+ if (regDistance < minimumSafeDistance && regDistance < distance[i]) {
+ distance[i] = regDistance;
+ changed = true;
+ }
+ }
+ return changed;
+ }
+
+ unsigned char distance[MacroAssembler::numberOfFPRegisters()];
+};
+
+void updateDistances(Inst& inst, FPDefDistance& localDistance, unsigned& distanceToBlockEnd)
+{
+ --distanceToBlockEnd;
+
+ if (isDependencyBreaking(inst)) {
+ localDistance.reset(inst.args[0].tmp().fpr());
+ return;
+ }
+
+ inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Arg::Type, Arg::Width) {
+ ASSERT_WITH_MESSAGE(tmp.isReg(), "This phase must be run after register allocation.");
+
+ if (tmp.isFPR() && Arg::isAnyDef(role))
+ localDistance.add(tmp.fpr(), distanceToBlockEnd);
+ });
+}
+
+}
+
+void fixPartialRegisterStalls(Code& code)
+{
+ if (!isX86())
+ return;
+
+ PhaseScope phaseScope(code, "fixPartialRegisterStalls");
+
+ Vector<BasicBlock*> candidates;
+
+ for (BasicBlock* block : code) {
+ for (const Inst& inst : *block) {
+ if (hasPartialXmmRegUpdate(inst)) {
+ candidates.append(block);
+ break;
+ }
+ }
+ }
+
+ // Fortunately, Partial Stalls are rarely used. Return early if no block
+ // cares about them.
+ if (candidates.isEmpty())
+ return;
+
+ // For each block, this provides the distance to the last instruction setting each register
+ // on block *entry*.
+ IndexMap<BasicBlock, FPDefDistance> lastDefDistance(code.size());
+
+ // Blocks with dirty distance at head.
+ IndexSet<BasicBlock> dirty;
+
+ // First, we compute the local distance for each block and push it to the successors.
+ for (BasicBlock* block : code) {
+ FPDefDistance localDistance;
+
+ unsigned distanceToBlockEnd = block->size();
+ for (Inst& inst : *block)
+ updateDistances(inst, localDistance, distanceToBlockEnd);
+
+ for (BasicBlock* successor : block->successorBlocks()) {
+ if (lastDefDistance[successor].updateFromPrecessor(localDistance))
+ dirty.add(successor);
+ }
+ }
+
+ // Now we propagate the minimums accross blocks.
+ bool changed;
+ do {
+ changed = false;
+
+ for (BasicBlock* block : code) {
+ if (!dirty.remove(block))
+ continue;
+
+ // Little shortcut: if the block is big enough, propagating it won't add any information.
+ if (block->size() >= minimumSafeDistance)
+ continue;
+
+ unsigned blockSize = block->size();
+ FPDefDistance& blockDistance = lastDefDistance[block];
+ for (BasicBlock* successor : block->successorBlocks()) {
+ if (lastDefDistance[successor].updateFromPrecessor(blockDistance, blockSize)) {
+ dirty.add(successor);
+ changed = true;
+ }
+ }
+ }
+ } while (changed);
+
+ // Finally, update each block as needed.
+ InsertionSet insertionSet(code);
+ for (BasicBlock* block : candidates) {
+ unsigned distanceToBlockEnd = block->size();
+ FPDefDistance& localDistance = lastDefDistance[block];
+
+ for (unsigned i = 0; i < block->size(); ++i) {
+ Inst& inst = block->at(i);
+
+ if (hasPartialXmmRegUpdate(inst)) {
+ RegisterSet defs;
+ RegisterSet uses;
+ inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Arg::Type, Arg::Width) {
+ if (tmp.isFPR()) {
+ if (Arg::isAnyDef(role))
+ defs.set(tmp.fpr());
+ if (Arg::isAnyUse(role))
+ uses.set(tmp.fpr());
+ }
+ });
+ // We only care about values we define but not use. Otherwise we have to wait
+ // for the value to be resolved anyway.
+ defs.exclude(uses);
+
+ defs.forEach([&] (Reg reg) {
+ if (localDistance.distance[MacroAssembler::fpRegisterIndex(reg.fpr())] < minimumSafeDistance)
+ insertionSet.insert(i, MoveZeroToDouble, inst.origin, Tmp(reg));
+ });
+ }
+
+ updateDistances(inst, localDistance, distanceToBlockEnd);
+ }
+ insertionSet.execute(block);
+ }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.h b/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.h
new file mode 100644
index 000000000..009327948
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirFixPartialRegisterStalls.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// x86 has a pipelining hazard caused by false dependencies between instructions.
+//
+// Some instructions update only part of a register, they can only be scheduled after
+// the previous definition is computed. This problem can be avoided by the compiler
+// by explicitely resetting the entire register before executing the instruction with
+// partial update.
+//
+// See "Partial XMM Register Stalls" and "Dependency Breaking Idioms" in the manual.
+void fixPartialRegisterStalls(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirFrequentedBlock.h b/Source/JavaScriptCore/b3/air/AirFrequentedBlock.h
new file mode 100644
index 000000000..37cd28736
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirFrequentedBlock.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3GenericFrequentedBlock.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BasicBlock;
+
+typedef GenericFrequentedBlock<BasicBlock> FrequentedBlock;
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirGenerate.cpp b/Source/JavaScriptCore/b3/air/AirGenerate.cpp
new file mode 100644
index 000000000..a99f0501c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirGenerate.cpp
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirGenerate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirAllocateStack.h"
+#include "AirCode.h"
+#include "AirDumpAsJS.h"
+#include "AirEliminateDeadCode.h"
+#include "AirFixObviousSpills.h"
+#include "AirFixPartialRegisterStalls.h"
+#include "AirGenerationContext.h"
+#include "AirHandleCalleeSaves.h"
+#include "AirIteratedRegisterCoalescing.h"
+#include "AirLogRegisterPressure.h"
+#include "AirLowerAfterRegAlloc.h"
+#include "AirLowerEntrySwitch.h"
+#include "AirLowerMacros.h"
+#include "AirOpcodeUtils.h"
+#include "AirOptimizeBlockOrder.h"
+#include "AirReportUsedRegisters.h"
+#include "AirSimplifyCFG.h"
+#include "AirSpillEverything.h"
+#include "AirValidate.h"
+#include "B3Common.h"
+#include "B3Procedure.h"
+#include "B3TimingScope.h"
+#include "B3ValueInlines.h"
+#include "CCallHelpers.h"
+#include "DisallowMacroScratchRegisterUsage.h"
+#include "LinkBuffer.h"
+#include <wtf/IndexMap.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+void prepareForGeneration(Code& code)
+{
+ TimingScope timingScope("Air::prepareForGeneration");
+
+ // We don't expect the incoming code to have predecessors computed.
+ code.resetReachability();
+
+ if (shouldValidateIR())
+ validate(code);
+
+ // If we're doing super verbose dumping, the phase scope of any phase will already do a dump.
+ if (shouldDumpIR(AirMode) && !shouldDumpIRAtEachPhase(AirMode)) {
+ dataLog("Initial air:\n");
+ dataLog(code);
+ }
+
+ lowerMacros(code);
+
+ // This is where we run our optimizations and transformations.
+ // FIXME: Add Air optimizations.
+ // https://bugs.webkit.org/show_bug.cgi?id=150456
+
+ eliminateDeadCode(code);
+
+ // Register allocation for all the Tmps that do not have a corresponding machine register.
+ // After this phase, every Tmp has a reg.
+ //
+ // For debugging, you can use spillEverything() to put everything to the stack between each Inst.
+ if (Options::airSpillsEverything())
+ spillEverything(code);
+ else
+ iteratedRegisterCoalescing(code);
+
+ if (Options::logAirRegisterPressure()) {
+ dataLog("Register pressure after register allocation:\n");
+ logRegisterPressure(code);
+ }
+
+ // This replaces uses of spill slots with registers or constants if possible. It does this by
+ // minimizing the amount that we perturb the already-chosen register allocation. It may extend
+ // the live ranges of registers though.
+ fixObviousSpills(code);
+
+ lowerAfterRegAlloc(code);
+
+ // Prior to this point the prologue and epilogue is implicit. This makes it explicit. It also
+ // does things like identify which callee-saves we're using and saves them.
+ handleCalleeSaves(code);
+
+ if (Options::dumpAirAsJSBeforeAllocateStack()) {
+ dataLog("Dumping Air as JS before allocateStack:\n");
+ dumpAsJS(code, WTF::dataFile());
+ dataLog("Air hash: ", code.jsHash(), "\n");
+ }
+
+ // This turns all Stack and CallArg Args into Addr args that use the frame pointer. It does
+ // this by first-fit allocating stack slots. It should be pretty darn close to optimal, so we
+ // shouldn't have to worry about this very much.
+ allocateStack(code);
+
+ if (Options::dumpAirAfterAllocateStack()) {
+ dataLog("Dumping Air after allocateStack:\n");
+ dataLog(code);
+ dataLog("Air hash: ", code.jsHash(), "\n");
+ }
+
+ // If we coalesced moves then we can unbreak critical edges. This is the main reason for this
+ // phase.
+ simplifyCFG(code);
+
+ // This is needed to satisfy a requirement of B3::StackmapValue.
+ reportUsedRegisters(code);
+
+ // Attempt to remove false dependencies between instructions created by partial register changes.
+ // This must be executed as late as possible as it depends on the instructions order and register
+ // use. We _must_ run this after reportUsedRegisters(), since that kills variable assignments
+ // that seem dead. Luckily, this phase does not change register liveness, so that's OK.
+ fixPartialRegisterStalls(code);
+
+ // Actually create entrypoints.
+ lowerEntrySwitch(code);
+
+ // The control flow graph can be simplified further after we have lowered EntrySwitch.
+ simplifyCFG(code);
+
+ // This sorts the basic blocks in Code to achieve an ordering that maximizes the likelihood that a high
+ // frequency successor is also the fall-through target.
+ optimizeBlockOrder(code);
+
+ if (shouldValidateIR())
+ validate(code);
+
+ // Do a final dump of Air. Note that we have to do this even if we are doing per-phase dumping,
+ // since the final generation is not a phase.
+ if (shouldDumpIR(AirMode)) {
+ dataLog("Air after ", code.lastPhaseName(), ", before generation:\n");
+ dataLog(code);
+ }
+}
+
+void generate(Code& code, CCallHelpers& jit)
+{
+ TimingScope timingScope("Air::generate");
+
+ DisallowMacroScratchRegisterUsage disallowScratch(jit);
+
+ auto argFor = [&] (const RegisterAtOffset& entry) -> CCallHelpers::Address {
+ return CCallHelpers::Address(GPRInfo::callFrameRegister, entry.offset());
+ };
+
+ // And now, we generate code.
+ GenerationContext context;
+ context.code = &code;
+ context.blockLabels.resize(code.size());
+ for (BasicBlock* block : code) {
+ if (block)
+ context.blockLabels[block] = Box<CCallHelpers::Label>::create();
+ }
+ IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size());
+
+ auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
+ if (context.blockLabels[target]->isSet()) {
+ jump.linkTo(*context.blockLabels[target], &jit);
+ return;
+ }
+
+ blockJumps[target].append(jump);
+ };
+
+ PCToOriginMap& pcToOriginMap = code.proc().pcToOriginMap();
+ auto addItem = [&] (Inst& inst) {
+ if (!inst.origin) {
+ pcToOriginMap.appendItem(jit.labelIgnoringWatchpoints(), Origin());
+ return;
+ }
+ pcToOriginMap.appendItem(jit.labelIgnoringWatchpoints(), inst.origin->origin());
+ };
+
+ for (BasicBlock* block : code) {
+ context.currentBlock = block;
+ context.indexInBlock = UINT_MAX;
+ blockJumps[block].link(&jit);
+ CCallHelpers::Label label = jit.label();
+ *context.blockLabels[block] = label;
+
+ if (code.isEntrypoint(block)) {
+ jit.emitFunctionPrologue();
+ if (code.frameSize())
+ jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);
+
+ for (const RegisterAtOffset& entry : code.calleeSaveRegisters()) {
+ if (entry.reg().isGPR())
+ jit.storePtr(entry.reg().gpr(), argFor(entry));
+ else
+ jit.storeDouble(entry.reg().fpr(), argFor(entry));
+ }
+ }
+
+ ASSERT(block->size() >= 1);
+ for (unsigned i = 0; i < block->size() - 1; ++i) {
+ context.indexInBlock = i;
+ Inst& inst = block->at(i);
+ addItem(inst);
+ CCallHelpers::Jump jump = inst.generate(jit, context);
+ ASSERT_UNUSED(jump, !jump.isSet());
+ }
+
+ context.indexInBlock = block->size() - 1;
+
+ if (block->last().kind.opcode == Jump
+ && block->successorBlock(0) == code.findNextBlock(block))
+ continue;
+
+ addItem(block->last());
+
+ if (isReturn(block->last().kind.opcode)) {
+ // We currently don't represent the full prologue/epilogue in Air, so we need to
+ // have this override.
+ if (code.frameSize()) {
+ for (const RegisterAtOffset& entry : code.calleeSaveRegisters()) {
+ if (entry.reg().isGPR())
+ jit.loadPtr(argFor(entry), entry.reg().gpr());
+ else
+ jit.loadDouble(argFor(entry), entry.reg().fpr());
+ }
+ jit.emitFunctionEpilogue();
+ } else
+ jit.emitFunctionEpilogueWithEmptyFrame();
+ jit.ret();
+ addItem(block->last());
+ continue;
+ }
+
+ CCallHelpers::Jump jump = block->last().generate(jit, context);
+ // The jump won't be set for patchpoints. It won't be set for Oops because then it won't have
+ // any successors.
+ if (jump.isSet()) {
+ switch (block->numSuccessors()) {
+ case 1:
+ link(jump, block->successorBlock(0));
+ break;
+ case 2:
+ link(jump, block->successorBlock(0));
+ if (block->successorBlock(1) != code.findNextBlock(block))
+ link(jit.jump(), block->successorBlock(1));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+ addItem(block->last());
+ }
+
+ context.currentBlock = nullptr;
+ context.indexInBlock = UINT_MAX;
+
+ Vector<CCallHelpers::Label> entrypointLabels(code.numEntrypoints());
+ for (unsigned i = code.numEntrypoints(); i--;)
+ entrypointLabels[i] = *context.blockLabels[code.entrypoint(i).block()];
+ code.setEntrypointLabels(WTFMove(entrypointLabels));
+
+ pcToOriginMap.appendItem(jit.label(), Origin());
+ // FIXME: Make late paths have Origins: https://bugs.webkit.org/show_bug.cgi?id=153689
+ for (auto& latePath : context.latePaths)
+ latePath->run(jit, context);
+ pcToOriginMap.appendItem(jit.label(), Origin());
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirGenerate.h b/Source/JavaScriptCore/b3/air/AirGenerate.h
new file mode 100644
index 000000000..60839bea5
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirGenerate.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC {
+
+class CCallHelpers;
+
+namespace B3 { namespace Air {
+
+class Code;
+
+// This takes an Air::Code that hasn't had any stack allocation and optionally hasn't had any
+// register allocation and does both of those things.
+JS_EXPORT_PRIVATE void prepareForGeneration(Code&);
+
+// This generates the code using the given CCallHelpers instance. Note that this may call callbacks
+// in the supplied code as it is generating.
+JS_EXPORT_PRIVATE void generate(Code&, CCallHelpers&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirGenerated.cpp b/Source/JavaScriptCore/b3/air/AirGenerated.cpp
new file mode 100644
index 000000000..6dd2304a9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirGenerated.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(B3_JIT)
+
+// This is generated by opcode_generator.rb.
+#include "AirOpcodeGenerated.h"
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirGenerationContext.h b/Source/JavaScriptCore/b3/air/AirGenerationContext.h
new file mode 100644
index 000000000..f48b5bb8a
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirGenerationContext.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "CCallHelpers.h"
+#include <wtf/Box.h>
+#include <wtf/IndexMap.h>
+#include <wtf/SharedTask.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+struct GenerationContext {
+ WTF_MAKE_NONCOPYABLE(GenerationContext);
+public:
+
+ GenerationContext() = default;
+
+ typedef void LatePathFunction(CCallHelpers&, GenerationContext&);
+ typedef SharedTask<LatePathFunction> LatePath;
+
+ Vector<RefPtr<LatePath>> latePaths;
+ IndexMap<BasicBlock, Box<CCallHelpers::Label>> blockLabels;
+ BasicBlock* currentBlock { nullptr };
+ unsigned indexInBlock { UINT_MAX };
+ Code* code { nullptr };
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp b/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp
new file mode 100644
index 000000000..97cdfa1c9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirHandleCalleeSaves.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void handleCalleeSaves(Code& code)
+{
+ PhaseScope phaseScope(code, "handleCalleeSaves");
+
+ RegisterSet usedCalleeSaves;
+
+ for (BasicBlock* block : code) {
+ for (Inst& inst : *block) {
+ inst.forEachTmpFast(
+ [&] (Tmp& tmp) {
+ // At first we just record all used regs.
+ usedCalleeSaves.set(tmp.reg());
+ });
+
+ if (inst.kind.opcode == Patch)
+ usedCalleeSaves.merge(inst.extraClobberedRegs());
+ }
+ }
+
+ // Now we filter to really get the callee saves.
+ usedCalleeSaves.filter(RegisterSet::calleeSaveRegisters());
+ usedCalleeSaves.filter(code.mutableRegs());
+ usedCalleeSaves.exclude(RegisterSet::stackRegisters()); // We don't need to save FP here.
+
+ if (!usedCalleeSaves.numberOfSetRegisters())
+ return;
+
+ code.calleeSaveRegisters() = RegisterAtOffsetList(usedCalleeSaves);
+
+ size_t byteSize = 0;
+ for (const RegisterAtOffset& entry : code.calleeSaveRegisters())
+ byteSize = std::max(static_cast<size_t>(-entry.offset()), byteSize);
+
+ StackSlot* savesArea = code.addStackSlot(byteSize, StackSlotKind::Locked);
+ // This is a bit weird since we could have already pinned a different stack slot to this
+ // area. Also, our runtime does not require us to pin the saves area. Maybe we shouldn't pin it?
+ savesArea->setOffsetFromFP(-byteSize);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h b/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h
new file mode 100644
index 000000000..b4b78a3b7
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This phase identifies callee-save registers and adds code to save/restore them in the
+// prologue/epilogue to the code. It's a mandatory phase.
+
+// FIXME: It would be cool to make this more interactive with the Air client and also more
+// powerful.
+// We should have shrink wrapping: https://bugs.webkit.org/show_bug.cgi?id=150458
+// We should make this interact with the client: https://bugs.webkit.org/show_bug.cgi?id=150459
+
+void handleCalleeSaves(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirInsertionSet.cpp b/Source/JavaScriptCore/b3/air/AirInsertionSet.cpp
new file mode 100644
index 000000000..452d4888f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirInsertionSet.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirInsertionSet.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include <wtf/BubbleSort.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+void InsertionSet::insertInsts(size_t index, Vector<Inst>&& insts)
+{
+ for (Inst& inst : insts)
+ insertInst(index, WTFMove(inst));
+}
+
+void InsertionSet::execute(BasicBlock* block)
+{
+ bubbleSort(m_insertions.begin(), m_insertions.end());
+ executeInsertions(block->m_insts, m_insertions);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirInsertionSet.h b/Source/JavaScriptCore/b3/air/AirInsertionSet.h
new file mode 100644
index 000000000..84a791d40
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirInsertionSet.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirInst.h"
+#include <wtf/Insertion.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BasicBlock;
+class Code;
+
+typedef WTF::Insertion<Inst> Insertion;
+
+class InsertionSet {
+public:
+ InsertionSet(Code& code)
+ : m_code(code)
+ {
+ }
+
+ Code& code() { return m_code; }
+
+ template<typename T>
+ void appendInsertion(T&& insertion)
+ {
+ m_insertions.append(std::forward<T>(insertion));
+ }
+
+ template<typename Inst>
+ void insertInst(size_t index, Inst&& inst)
+ {
+ appendInsertion(Insertion(index, std::forward<Inst>(inst)));
+ }
+
+ template <typename InstVector>
+ void insertInsts(size_t index, const InstVector& insts)
+ {
+ for (const Inst& inst : insts)
+ insertInst(index, inst);
+ }
+ void insertInsts(size_t index, Vector<Inst>&&);
+
+ template<typename... Arguments>
+ void insert(size_t index, Arguments&&... arguments)
+ {
+ insertInst(index, Inst(std::forward<Arguments>(arguments)...));
+ }
+
+ void execute(BasicBlock*);
+
+private:
+ Code& m_code;
+ Vector<Insertion, 8> m_insertions;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirInst.cpp b/Source/JavaScriptCore/b3/air/AirInst.cpp
new file mode 100644
index 000000000..defb344b0
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirInst.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirInst.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirInstInlines.h"
+#include "B3Value.h"
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool Inst::hasArgEffects()
+{
+ bool result = false;
+ forEachArg(
+ [&] (Arg&, Arg::Role role, Arg::Type, Arg::Width) {
+ if (Arg::isAnyDef(role))
+ result = true;
+ });
+ return result;
+}
+
+unsigned Inst::jsHash() const
+{
+ // FIXME: This should do something for flags.
+ // https://bugs.webkit.org/show_bug.cgi?id=162751
+ unsigned result = static_cast<unsigned>(kind.opcode);
+
+ for (const Arg& arg : args)
+ result += arg.jsHash();
+
+ return result;
+}
+
+void Inst::dump(PrintStream& out) const
+{
+ out.print(kind, " ", listDump(args));
+ if (origin) {
+ if (args.size())
+ out.print(", ");
+ out.print(*origin);
+ }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirInst.h b/Source/JavaScriptCore/b3/air/AirInst.h
new file mode 100644
index 000000000..f38c21df8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirInst.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirKind.h"
+#include "CCallHelpers.h"
+
+namespace JSC {
+
+class CCallHelpers;
+class RegisterSet;
+
+namespace B3 {
+
+class Value;
+
+namespace Air {
+
+struct GenerationContext;
+
+struct Inst {
+public:
+ typedef Vector<Arg, 3> ArgList;
+
+ Inst()
+ : origin(nullptr)
+ {
+ }
+
+ Inst(Kind kind, Value* origin)
+ : origin(origin)
+ , kind(kind)
+ {
+ }
+
+ template<typename... Arguments>
+ Inst(Kind kind, Value* origin, Arg arg, Arguments... arguments)
+ : args{ arg, arguments... }
+ , origin(origin)
+ , kind(kind)
+ {
+ }
+
+ Inst(Kind kind, Value* origin, const ArgList& arguments)
+ : args(arguments)
+ , origin(origin)
+ , kind(kind)
+ {
+ }
+
+ Inst(Kind kind, Value* origin, ArgList&& arguments)
+ : args(WTFMove(arguments))
+ , origin(origin)
+ , kind(kind)
+ {
+ }
+
+ explicit operator bool() const { return origin || kind || args.size(); }
+
+ void append() { }
+
+ template<typename... Arguments>
+ void append(Arg arg, Arguments... arguments)
+ {
+ args.append(arg);
+ append(arguments...);
+ }
+
+ // Note that these functors all avoid using "const" because we want to use them for things that
+ // edit IR. IR is meant to be edited; if you're carrying around a "const Inst&" then you're
+ // probably doing it wrong.
+
+ // This only walks those Tmps that are explicitly mentioned, and it doesn't tell you their role
+ // or type.
+ template<typename Functor>
+ void forEachTmpFast(const Functor& functor)
+ {
+ for (Arg& arg : args)
+ arg.forEachTmpFast(functor);
+ }
+
+ typedef void EachArgCallback(Arg&, Arg::Role, Arg::Type, Arg::Width);
+
+ // Calls the functor with (arg, role, type, width). This function is auto-generated by
+ // opcode_generator.rb.
+ template<typename Functor>
+ void forEachArg(const Functor&);
+
+ // Calls the functor with (tmp, role, type, width).
+ template<typename Functor>
+ void forEachTmp(const Functor& functor)
+ {
+ forEachArg(
+ [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+ arg.forEachTmp(role, type, width, functor);
+ });
+ }
+
+ // Thing can be either Arg, Tmp, or StackSlot*.
+ template<typename Thing, typename Functor>
+ void forEach(const Functor&);
+
+ // Reports any additional registers clobbered by this operation. Note that for efficiency,
+ // extraClobberedRegs() only works for the Patch opcode.
+ RegisterSet extraClobberedRegs();
+ RegisterSet extraEarlyClobberedRegs();
+
+ // Iterate over all Def's that happen at the end of an instruction. You supply a pair
+ // instructions. The instructions must appear next to each other, in that order, in some basic
+ // block. You can pass null for the first instruction when analyzing what happens at the top of
+ // a basic block. You can pass null for the second instruction when analyzing what happens at the
+ // bottom of a basic block.
+ template<typename Thing, typename Functor>
+ static void forEachDef(Inst* prevInst, Inst* nextInst, const Functor&);
+
+ // Iterate over all Def's that happen at the end of this instruction, including extra clobbered
+ // registers. Note that Thing can only be Arg or Tmp when you use this functor.
+ template<typename Thing, typename Functor>
+ static void forEachDefWithExtraClobberedRegs(Inst* prevInst, Inst* nextInst, const Functor&);
+
+ // Use this to report which registers are live. This should be done just before codegen. Note
+ // that for efficiency, reportUsedRegisters() only works for the Patch opcode.
+ void reportUsedRegisters(const RegisterSet&);
+
+ // Is this instruction in one of the valid forms right now? This function is auto-generated by
+ // opcode_generator.rb.
+ bool isValidForm();
+
+ // Assuming this instruction is in a valid form right now, will it still be in one of the valid
+ // forms if we put an Addr referencing the stack (or a StackSlot or CallArg, of course) in the
+ // given index? Spilling uses this: it walks the args by index to find Tmps that need spilling;
+ // if it finds one, it calls this to see if it can replace the Arg::Tmp with an Arg::Addr. If it
+ // finds a non-Tmp Arg, then it calls that Arg's forEachTmp to do a replacement that way.
+ //
+ // This function is auto-generated by opcode_generator.rb.
+ bool admitsStack(unsigned argIndex);
+ bool admitsStack(Arg&);
+
+ // Defined by opcode_generator.rb.
+ bool isTerminal();
+
+ // Returns true if this instruction can have any effects other than control flow or arguments.
+ bool hasNonArgNonControlEffects();
+
+ // Returns true if this instruction can have any effects other than what is implied by arguments.
+ // For example, "Move $42, (%rax)" will return false because the effect of storing to (%rax) is
+ // implied by the second argument.
+ bool hasNonArgEffects();
+
+ // Tells you if this operation has arg effects.
+ bool hasArgEffects();
+
+ // Tells you if this operation has non-control effects.
+ bool hasNonControlEffects() { return hasNonArgNonControlEffects() || hasArgEffects(); }
+
+ // Generate some code for this instruction. This is, like, literally our backend. If this is the
+ // terminal, it returns the jump that needs to be linked for the "then" case, with the "else"
+ // case being fall-through. This function is auto-generated by opcode_generator.rb.
+ CCallHelpers::Jump generate(CCallHelpers&, GenerationContext&);
+
+ // If source arguments benefits from being aliased to a destination argument,
+ // this return the index of the destination argument.
+ // The source are assumed to be at (index - 1) and (index - 2)
+ // For example,
+ // Add Tmp1, Tmp2, Tmp3
+ // returns 2 if 0 and 1 benefit from aliasing to Tmp3.
+ std::optional<unsigned> shouldTryAliasingDef();
+
+ // This computes a hash for comparing this to JSAir's Inst.
+ unsigned jsHash() const;
+
+ void dump(PrintStream&) const;
+
+ ArgList args;
+ Value* origin; // The B3::Value that this originated from.
+ Kind kind;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirInstInlines.h b/Source/JavaScriptCore/b3/air/AirInstInlines.h
new file mode 100644
index 000000000..2d3da626f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirInstInlines.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirInst.h"
+#include "AirOpcodeUtils.h"
+#include "AirSpecial.h"
+#include "AirStackSlot.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+template<typename Thing, typename Functor>
+void Inst::forEach(const Functor& functor)
+{
+ forEachArg(
+ [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+ arg.forEach<Thing>(role, type, width, functor);
+ });
+}
+
+inline RegisterSet Inst::extraClobberedRegs()
+{
+ ASSERT(kind.opcode == Patch);
+ return args[0].special()->extraClobberedRegs(*this);
+}
+
+inline RegisterSet Inst::extraEarlyClobberedRegs()
+{
+ ASSERT(kind.opcode == Patch);
+ return args[0].special()->extraEarlyClobberedRegs(*this);
+}
+
+template<typename Thing, typename Functor>
+inline void Inst::forEachDef(Inst* prevInst, Inst* nextInst, const Functor& functor)
+{
+ if (prevInst) {
+ prevInst->forEach<Thing>(
+ [&] (Thing& thing, Arg::Role role, Arg::Type argType, Arg::Width argWidth) {
+ if (Arg::isLateDef(role))
+ functor(thing, role, argType, argWidth);
+ });
+ }
+
+ if (nextInst) {
+ nextInst->forEach<Thing>(
+ [&] (Thing& thing, Arg::Role role, Arg::Type argType, Arg::Width argWidth) {
+ if (Arg::isEarlyDef(role))
+ functor(thing, role, argType, argWidth);
+ });
+ }
+}
+
+template<typename Thing, typename Functor>
+inline void Inst::forEachDefWithExtraClobberedRegs(
+ Inst* prevInst, Inst* nextInst, const Functor& functor)
+{
+ forEachDef<Thing>(prevInst, nextInst, functor);
+
+ Arg::Role regDefRole;
+
+ auto reportReg = [&] (Reg reg) {
+ Arg::Type type = reg.isGPR() ? Arg::GP : Arg::FP;
+ functor(Thing(reg), regDefRole, type, Arg::conservativeWidth(type));
+ };
+
+ if (prevInst && prevInst->kind.opcode == Patch) {
+ regDefRole = Arg::Def;
+ prevInst->extraClobberedRegs().forEach(reportReg);
+ }
+
+ if (nextInst && nextInst->kind.opcode == Patch) {
+ regDefRole = Arg::EarlyDef;
+ nextInst->extraEarlyClobberedRegs().forEach(reportReg);
+ }
+}
+
+inline void Inst::reportUsedRegisters(const RegisterSet& usedRegisters)
+{
+ ASSERT(kind.opcode == Patch);
+ args[0].special()->reportUsedRegisters(*this, usedRegisters);
+}
+
+inline bool Inst::admitsStack(Arg& arg)
+{
+ return admitsStack(&arg - &args[0]);
+}
+
+inline std::optional<unsigned> Inst::shouldTryAliasingDef()
+{
+ if (!isX86())
+ return std::nullopt;
+
+ switch (kind.opcode) {
+ case Add32:
+ case Add64:
+ case And32:
+ case And64:
+ case Mul32:
+ case Mul64:
+ case Or32:
+ case Or64:
+ case Xor32:
+ case Xor64:
+ case AndFloat:
+ case AndDouble:
+ case OrFloat:
+ case OrDouble:
+ case XorDouble:
+ case XorFloat:
+ if (args.size() == 3)
+ return 2;
+ break;
+ case AddDouble:
+ case AddFloat:
+ case MulDouble:
+ case MulFloat:
+#if CPU(X86) || CPU(X86_64)
+ if (MacroAssembler::supportsAVX())
+ return std::nullopt;
+#endif
+ if (args.size() == 3)
+ return 2;
+ break;
+ case BranchAdd32:
+ case BranchAdd64:
+ if (args.size() == 4)
+ return 3;
+ break;
+ case MoveConditionally32:
+ case MoveConditionally64:
+ case MoveConditionallyTest32:
+ case MoveConditionallyTest64:
+ case MoveConditionallyDouble:
+ case MoveConditionallyFloat:
+ case MoveDoubleConditionally32:
+ case MoveDoubleConditionally64:
+ case MoveDoubleConditionallyTest32:
+ case MoveDoubleConditionallyTest64:
+ case MoveDoubleConditionallyDouble:
+ case MoveDoubleConditionallyFloat:
+ if (args.size() == 6)
+ return 5;
+ break;
+ break;
+ case Patch:
+ return PatchCustom::shouldTryAliasingDef(*this);
+ default:
+ break;
+ }
+ return std::nullopt;
+}
+
+inline bool isShiftValid(const Inst& inst)
+{
+#if CPU(X86) || CPU(X86_64)
+ return inst.args[0] == Tmp(X86Registers::ecx);
+#else
+ UNUSED_PARAM(inst);
+ return true;
+#endif
+}
+
+inline bool isLshift32Valid(const Inst& inst)
+{
+ return isShiftValid(inst);
+}
+
+inline bool isLshift64Valid(const Inst& inst)
+{
+ return isShiftValid(inst);
+}
+
+inline bool isRshift32Valid(const Inst& inst)
+{
+ return isShiftValid(inst);
+}
+
+inline bool isRshift64Valid(const Inst& inst)
+{
+ return isShiftValid(inst);
+}
+
+inline bool isUrshift32Valid(const Inst& inst)
+{
+ return isShiftValid(inst);
+}
+
+inline bool isUrshift64Valid(const Inst& inst)
+{
+ return isShiftValid(inst);
+}
+
+inline bool isRotateRight32Valid(const Inst& inst)
+{
+ return isShiftValid(inst);
+}
+
+inline bool isRotateLeft32Valid(const Inst& inst)
+{
+ return isShiftValid(inst);
+}
+
+inline bool isRotateRight64Valid(const Inst& inst)
+{
+ return isShiftValid(inst);
+}
+
+inline bool isRotateLeft64Valid(const Inst& inst)
+{
+ return isShiftValid(inst);
+}
+
+inline bool isX86DivHelperValid(const Inst& inst)
+{
+#if CPU(X86) || CPU(X86_64)
+ return inst.args[0] == Tmp(X86Registers::eax)
+ && inst.args[1] == Tmp(X86Registers::edx);
+#else
+ UNUSED_PARAM(inst);
+ return false;
+#endif
+}
+
+inline bool isX86ConvertToDoubleWord32Valid(const Inst& inst)
+{
+ return isX86DivHelperValid(inst);
+}
+
+inline bool isX86ConvertToQuadWord64Valid(const Inst& inst)
+{
+ return isX86DivHelperValid(inst);
+}
+
+inline bool isX86Div32Valid(const Inst& inst)
+{
+ return isX86DivHelperValid(inst);
+}
+
+inline bool isX86UDiv32Valid(const Inst& inst)
+{
+ return isX86DivHelperValid(inst);
+}
+
+inline bool isX86Div64Valid(const Inst& inst)
+{
+ return isX86DivHelperValid(inst);
+}
+
+inline bool isX86UDiv64Valid(const Inst& inst)
+{
+ return isX86DivHelperValid(inst);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp b/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp
new file mode 100644
index 000000000..7e81b5e01
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp
@@ -0,0 +1,1656 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirIteratedRegisterCoalescing.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPadInterference.h"
+#include "AirPhaseScope.h"
+#include "AirTmpInlines.h"
+#include "AirTmpWidth.h"
+#include "AirUseCounts.h"
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool debug = false;
+bool traceDebug = false;
+bool reportStats = false;
+
+// The AbstractColoringAllocator defines all the code that is independant
+// from the type or register and can be shared when allocating registers.
+template<typename IndexType>
+class AbstractColoringAllocator {
+public:
+ AbstractColoringAllocator(const Vector<Reg>& regsInPriorityOrder, IndexType lastPrecoloredRegisterIndex, unsigned tmpArraySize, const HashSet<unsigned>& unspillableTmp)
+ : m_regsInPriorityOrder(regsInPriorityOrder)
+ , m_lastPrecoloredRegisterIndex(lastPrecoloredRegisterIndex)
+ , m_unspillableTmps(unspillableTmp)
+ {
+ for (Reg reg : m_regsInPriorityOrder)
+ m_mutableRegs.set(reg);
+
+ initializeDegrees(tmpArraySize);
+
+ m_adjacencyList.resize(tmpArraySize);
+ m_moveList.resize(tmpArraySize);
+ m_coalescedTmps.fill(0, tmpArraySize);
+ m_isOnSelectStack.ensureSize(tmpArraySize);
+ }
+
+protected:
+ IndexType getAlias(IndexType tmpIndex) const
+ {
+ IndexType alias = tmpIndex;
+ while (IndexType nextAlias = m_coalescedTmps[alias])
+ alias = nextAlias;
+ return alias;
+ }
+
+ void addEdge(IndexType a, IndexType b)
+ {
+ if (a == b)
+ return;
+ addEdgeDistinct(a, b);
+ }
+
+ void makeWorkList()
+ {
+ IndexType firstNonRegIndex = m_lastPrecoloredRegisterIndex + 1;
+ for (IndexType i = firstNonRegIndex; i < m_degrees.size(); ++i) {
+ unsigned degree = m_degrees[i];
+ if (degree >= m_regsInPriorityOrder.size())
+ addToSpill(i);
+ else if (!m_moveList[i].isEmpty())
+ m_freezeWorklist.add(i);
+ else
+ m_simplifyWorklist.append(i);
+ }
+ }
+
+ void addToSpill(unsigned toSpill)
+ {
+ if (m_unspillableTmps.contains(toSpill))
+ return;
+
+ m_spillWorklist.add(toSpill);
+ }
+
+ // Low-degree vertex can always be colored: just pick any of the color taken by any
+ // other adjacent verices.
+ // The "Simplify" phase takes a low-degree out of the interference graph to simplify it.
+ void simplify()
+ {
+ IndexType lastIndex = m_simplifyWorklist.takeLast();
+
+ ASSERT(!m_selectStack.contains(lastIndex));
+ ASSERT(!m_isOnSelectStack.get(lastIndex));
+ m_selectStack.append(lastIndex);
+ m_isOnSelectStack.quickSet(lastIndex);
+
+ forEachAdjacent(lastIndex, [this](IndexType adjacentTmpIndex) {
+ decrementDegree(adjacentTmpIndex);
+ });
+ }
+
+ void freeze()
+ {
+ IndexType victimIndex = m_freezeWorklist.takeAny();
+ ASSERT_WITH_MESSAGE(getAlias(victimIndex) == victimIndex, "coalesce() should not leave aliased Tmp in the worklist.");
+ m_simplifyWorklist.append(victimIndex);
+ freezeMoves(victimIndex);
+ }
+
+ void freezeMoves(IndexType tmpIndex)
+ {
+ forEachNodeMoves(tmpIndex, [this, tmpIndex] (IndexType moveIndex) {
+ if (!m_activeMoves.quickClear(moveIndex))
+ m_worklistMoves.takeMove(moveIndex);
+
+ const MoveOperands& moveOperands = m_coalescingCandidates[moveIndex];
+ IndexType srcTmpIndex = moveOperands.srcIndex;
+ IndexType dstTmpIndex = moveOperands.dstIndex;
+
+ IndexType originalOtherTmp = srcTmpIndex != tmpIndex ? srcTmpIndex : dstTmpIndex;
+ IndexType otherTmpIndex = getAlias(originalOtherTmp);
+ if (m_degrees[otherTmpIndex] < m_regsInPriorityOrder.size() && !isMoveRelated(otherTmpIndex)) {
+ if (m_freezeWorklist.remove(otherTmpIndex))
+ m_simplifyWorklist.append(otherTmpIndex);
+ }
+ });
+ }
+
+ void coalesce()
+ {
+ unsigned moveIndex = m_worklistMoves.takeLastMove();
+ const MoveOperands& moveOperands = m_coalescingCandidates[moveIndex];
+ IndexType u = getAlias(moveOperands.srcIndex);
+ IndexType v = getAlias(moveOperands.dstIndex);
+
+ if (isPrecolored(v))
+ std::swap(u, v);
+
+ if (traceDebug)
+ dataLog("Coalescing move at index ", moveIndex, " u = ", u, " v = ", v, "\n");
+
+ if (u == v) {
+ addWorkList(u);
+
+ if (traceDebug)
+ dataLog(" Coalesced\n");
+ } else if (isPrecolored(v)
+ || m_interferenceEdges.contains(InterferenceEdge(u, v))
+ || (u == m_framePointerIndex && m_interferesWithFramePointer.quickGet(v))) {
+ addWorkList(u);
+ addWorkList(v);
+
+ if (traceDebug)
+ dataLog(" Constrained\n");
+ } else if (canBeSafelyCoalesced(u, v)) {
+ combine(u, v);
+ addWorkList(u);
+ m_hasCoalescedNonTrivialMove = true;
+
+ if (traceDebug)
+ dataLog(" Safe Coalescing\n");
+ } else {
+ m_activeMoves.quickSet(moveIndex);
+
+ if (traceDebug)
+ dataLog(" Failed coalescing, added to active moves.\n");
+ }
+ }
+
+ void assignColors()
+ {
+ ASSERT(m_simplifyWorklist.isEmpty());
+ ASSERT(m_worklistMoves.isEmpty());
+ ASSERT(m_freezeWorklist.isEmpty());
+ ASSERT(m_spillWorklist.isEmpty());
+
+ // Reclaim as much memory as possible.
+ m_interferenceEdges.clear();
+ m_degrees.clear();
+ m_moveList.clear();
+ m_worklistMoves.clear();
+ m_simplifyWorklist.clear();
+ m_spillWorklist.clear();
+ m_freezeWorklist.clear();
+
+ // Try to color the Tmp on the stack.
+ m_coloredTmp.resize(m_adjacencyList.size());
+
+ while (!m_selectStack.isEmpty()) {
+ unsigned tmpIndex = m_selectStack.takeLast();
+ ASSERT(!isPrecolored(tmpIndex));
+ ASSERT(!m_coloredTmp[tmpIndex]);
+
+ RegisterSet coloredRegisters;
+ for (IndexType adjacentTmpIndex : m_adjacencyList[tmpIndex]) {
+ IndexType aliasTmpIndex = getAlias(adjacentTmpIndex);
+ Reg reg = m_coloredTmp[aliasTmpIndex];
+
+ ASSERT(!isPrecolored(aliasTmpIndex) || (isPrecolored(aliasTmpIndex) && reg));
+
+ if (reg)
+ coloredRegisters.set(reg);
+ }
+
+ bool colorAssigned = false;
+ for (Reg reg : m_regsInPriorityOrder) {
+ if (!coloredRegisters.get(reg)) {
+ m_coloredTmp[tmpIndex] = reg;
+ colorAssigned = true;
+ break;
+ }
+ }
+
+ if (!colorAssigned)
+ m_spilledTmps.append(tmpIndex);
+ }
+ m_selectStack.clear();
+
+ if (m_spilledTmps.isEmpty())
+ m_coalescedTmpsAtSpill.clear();
+ else
+ m_coloredTmp.clear();
+ }
+
+private:
+ void initializeDegrees(unsigned tmpArraySize)
+ {
+ m_degrees.resize(tmpArraySize);
+
+ // All precolored registers have an "infinite" degree.
+ unsigned firstNonRegIndex = m_lastPrecoloredRegisterIndex + 1;
+ for (unsigned i = 0; i < firstNonRegIndex; ++i)
+ m_degrees[i] = std::numeric_limits<unsigned>::max();
+
+ memset(m_degrees.data() + firstNonRegIndex, 0, (tmpArraySize - firstNonRegIndex) * sizeof(unsigned));
+ }
+
+ void addEdgeDistinct(IndexType a, IndexType b)
+ {
+ ASSERT(a != b);
+ if (m_interferenceEdges.add(InterferenceEdge(a, b)).isNewEntry) {
+ if (!isPrecolored(a)) {
+ ASSERT(!m_adjacencyList[a].contains(b));
+ m_adjacencyList[a].append(b);
+ m_degrees[a]++;
+ }
+
+ if (!isPrecolored(b)) {
+ ASSERT(!m_adjacencyList[b].contains(a));
+ m_adjacencyList[b].append(a);
+ m_degrees[b]++;
+ }
+ }
+ }
+
+ void decrementDegree(IndexType tmpIndex)
+ {
+ ASSERT(m_degrees[tmpIndex]);
+
+ unsigned oldDegree = m_degrees[tmpIndex]--;
+ if (oldDegree == m_regsInPriorityOrder.size()) {
+ enableMovesOnValueAndAdjacents(tmpIndex);
+ m_spillWorklist.remove(tmpIndex);
+ if (isMoveRelated(tmpIndex))
+ m_freezeWorklist.add(tmpIndex);
+ else
+ m_simplifyWorklist.append(tmpIndex);
+ }
+ }
+
+
+ bool addEdgeDistinctWithoutDegreeChange(IndexType a, IndexType b)
+ {
+ ASSERT(a != b);
+ if (m_interferenceEdges.add(InterferenceEdge(a, b)).isNewEntry) {
+ if (!isPrecolored(a)) {
+ ASSERT(!m_adjacencyList[a].contains(b));
+ m_adjacencyList[a].append(b);
+ }
+
+ if (!isPrecolored(b)) {
+ ASSERT(!m_adjacencyList[b].contains(a));
+ m_adjacencyList[b].append(a);
+ }
+ return true;
+ }
+ return false;
+ }
+
+ bool isMoveRelated(IndexType tmpIndex)
+ {
+ for (unsigned moveIndex : m_moveList[tmpIndex]) {
+ if (m_activeMoves.quickGet(moveIndex) || m_worklistMoves.contains(moveIndex))
+ return true;
+ }
+ return false;
+ }
+
+ template<typename Function>
+ void forEachAdjacent(IndexType tmpIndex, Function function)
+ {
+ for (IndexType adjacentTmpIndex : m_adjacencyList[tmpIndex]) {
+ if (!hasBeenSimplified(adjacentTmpIndex))
+ function(adjacentTmpIndex);
+ }
+ }
+
+ bool hasBeenSimplified(IndexType tmpIndex)
+ {
+ return m_isOnSelectStack.quickGet(tmpIndex) || !!m_coalescedTmps[tmpIndex];
+ }
+
+ template<typename Function>
+ void forEachNodeMoves(IndexType tmpIndex, Function function)
+ {
+ for (unsigned moveIndex : m_moveList[tmpIndex]) {
+ if (m_activeMoves.quickGet(moveIndex) || m_worklistMoves.contains(moveIndex))
+ function(moveIndex);
+ }
+ }
+
+ void enableMovesOnValue(IndexType tmpIndex)
+ {
+ for (unsigned moveIndex : m_moveList[tmpIndex]) {
+ if (m_activeMoves.quickClear(moveIndex))
+ m_worklistMoves.returnMove(moveIndex);
+ }
+ }
+
+ void enableMovesOnValueAndAdjacents(IndexType tmpIndex)
+ {
+ enableMovesOnValue(tmpIndex);
+
+ forEachAdjacent(tmpIndex, [this] (IndexType adjacentTmpIndex) {
+ enableMovesOnValue(adjacentTmpIndex);
+ });
+ }
+
+ bool isPrecolored(IndexType tmpIndex)
+ {
+ return tmpIndex <= m_lastPrecoloredRegisterIndex;
+ }
+
+ void addWorkList(IndexType tmpIndex)
+ {
+ if (!isPrecolored(tmpIndex) && m_degrees[tmpIndex] < m_regsInPriorityOrder.size() && !isMoveRelated(tmpIndex)) {
+ m_freezeWorklist.remove(tmpIndex);
+ m_simplifyWorklist.append(tmpIndex);
+ }
+ }
+
+ void combine(IndexType u, IndexType v)
+ {
+ if (!m_freezeWorklist.remove(v))
+ m_spillWorklist.remove(v);
+
+ ASSERT(!m_coalescedTmps[v]);
+ m_coalescedTmps[v] = u;
+
+ auto& vMoves = m_moveList[v];
+ m_moveList[u].add(vMoves.begin(), vMoves.end());
+
+ forEachAdjacent(v, [this, u] (IndexType adjacentTmpIndex) {
+ if (addEdgeDistinctWithoutDegreeChange(adjacentTmpIndex, u)) {
+ // If we added a new edge between the adjacentTmp and u, it replaces the edge
+ // that existed with v.
+ // The degree of adjacentTmp remains the same since the edge just changed from u to v.
+ // All we need to do is update the degree of u.
+ if (!isPrecolored(u))
+ m_degrees[u]++;
+ } else {
+ // If we already had an edge between the adjacentTmp and u, the degree of u
+ // is already correct. The degree of the adjacentTmp decreases since the edge
+ // with v is no longer relevant (we can think of it as merged with the edge with u).
+ decrementDegree(adjacentTmpIndex);
+ }
+ });
+
+ if (m_framePointerIndex && m_interferesWithFramePointer.quickGet(v))
+ m_interferesWithFramePointer.quickSet(u);
+
+ if (m_degrees[u] >= m_regsInPriorityOrder.size() && m_freezeWorklist.remove(u))
+ addToSpill(u);
+ }
+
+ bool canBeSafelyCoalesced(IndexType u, IndexType v)
+ {
+ ASSERT(!isPrecolored(v));
+ if (isPrecolored(u))
+ return precoloredCoalescingHeuristic(u, v);
+ return conservativeHeuristic(u, v);
+ }
+
+ bool conservativeHeuristic(IndexType u, IndexType v)
+ {
+ // This is using the Briggs' conservative coalescing rule:
+ // If the number of combined adjacent node with a degree >= K is less than K,
+ // it is safe to combine the two nodes. The reason is that we know that if the graph
+ // is colorable, we have fewer than K adjacents with high order and there is a color
+ // for the current node.
+ ASSERT(u != v);
+ ASSERT(!isPrecolored(u));
+ ASSERT(!isPrecolored(v));
+
+ const auto& adjacentsOfU = m_adjacencyList[u];
+ const auto& adjacentsOfV = m_adjacencyList[v];
+
+ if (adjacentsOfU.size() + adjacentsOfV.size() < m_regsInPriorityOrder.size()) {
+ // Shortcut: if the total number of adjacents is less than the number of register, the condition is always met.
+ return true;
+ }
+
+ HashSet<IndexType> highOrderAdjacents;
+
+ for (IndexType adjacentTmpIndex : adjacentsOfU) {
+ ASSERT(adjacentTmpIndex != v);
+ ASSERT(adjacentTmpIndex != u);
+ if (!hasBeenSimplified(adjacentTmpIndex) && m_degrees[adjacentTmpIndex] >= m_regsInPriorityOrder.size()) {
+ auto addResult = highOrderAdjacents.add(adjacentTmpIndex);
+ if (addResult.isNewEntry && highOrderAdjacents.size() >= m_regsInPriorityOrder.size())
+ return false;
+ }
+ }
+ for (IndexType adjacentTmpIndex : adjacentsOfV) {
+ ASSERT(adjacentTmpIndex != u);
+ ASSERT(adjacentTmpIndex != v);
+ if (!hasBeenSimplified(adjacentTmpIndex) && m_degrees[adjacentTmpIndex] >= m_regsInPriorityOrder.size()) {
+ auto addResult = highOrderAdjacents.add(adjacentTmpIndex);
+ if (addResult.isNewEntry && highOrderAdjacents.size() >= m_regsInPriorityOrder.size())
+ return false;
+ }
+ }
+
+ ASSERT(highOrderAdjacents.size() < m_regsInPriorityOrder.size());
+ return true;
+ }
+
+ bool precoloredCoalescingHeuristic(IndexType u, IndexType v)
+ {
+ if (traceDebug)
+ dataLog(" Checking precoloredCoalescingHeuristic\n");
+ ASSERT(isPrecolored(u));
+ ASSERT(!isPrecolored(v));
+
+ // If u is a pinned register then it's always safe to coalesce. Note that when we call this,
+ // we have already proved that there is no interference between u and v.
+ if (!m_mutableRegs.get(m_coloredTmp[u]))
+ return true;
+
+ // If any adjacent of the non-colored node is not an adjacent of the colored node AND has a degree >= K
+ // there is a risk that this node needs to have the same color as our precolored node. If we coalesce such
+ // move, we may create an uncolorable graph.
+ const auto& adjacentsOfV = m_adjacencyList[v];
+ for (unsigned adjacentTmpIndex : adjacentsOfV) {
+ if (!isPrecolored(adjacentTmpIndex)
+ && !hasBeenSimplified(adjacentTmpIndex)
+ && m_degrees[adjacentTmpIndex] >= m_regsInPriorityOrder.size()
+ && !m_interferenceEdges.contains(InterferenceEdge(u, adjacentTmpIndex)))
+ return false;
+ }
+ return true;
+ }
+
+protected:
+#if PLATFORM(COCOA)
+#pragma mark -
+#endif
+
+ // Interference edges are not directed. An edge between any two Tmps is represented
+ // by the concatenated values of the smallest Tmp followed by the bigger Tmp.
+ class InterferenceEdge {
+ public:
+ InterferenceEdge()
+ {
+ }
+
+ InterferenceEdge(IndexType a, IndexType b)
+ {
+ ASSERT(a);
+ ASSERT(b);
+ ASSERT_WITH_MESSAGE(a != b, "A Tmp can never interfere with itself. Doing so would force it to be the superposition of two registers.");
+
+ if (b < a)
+ std::swap(a, b);
+ m_value = static_cast<uint64_t>(a) << 32 | b;
+ }
+
+ InterferenceEdge(WTF::HashTableDeletedValueType)
+ : m_value(std::numeric_limits<uint64_t>::max())
+ {
+ }
+
+ IndexType first() const
+ {
+ return m_value >> 32 & 0xffffffff;
+ }
+
+ IndexType second() const
+ {
+ return m_value & 0xffffffff;
+ }
+
+ bool operator==(const InterferenceEdge other) const
+ {
+ return m_value == other.m_value;
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return *this == InterferenceEdge(WTF::HashTableDeletedValue);
+ }
+
+ unsigned hash() const
+ {
+ return WTF::IntHash<uint64_t>::hash(m_value);
+ }
+
+ void dump(PrintStream& out) const
+ {
+ out.print(first(), "<=>", second());
+ }
+
+ private:
+ uint64_t m_value { 0 };
+ };
+
+ struct InterferenceEdgeHash {
+ static unsigned hash(const InterferenceEdge& key) { return key.hash(); }
+ static bool equal(const InterferenceEdge& a, const InterferenceEdge& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+ };
+ typedef SimpleClassHashTraits<InterferenceEdge> InterferenceEdgeHashTraits;
+
+ const Vector<Reg>& m_regsInPriorityOrder;
+ RegisterSet m_mutableRegs;
+ IndexType m_lastPrecoloredRegisterIndex { 0 };
+
+ // The interference graph.
+ HashSet<InterferenceEdge, InterferenceEdgeHash, InterferenceEdgeHashTraits> m_interferenceEdges;
+ Vector<Vector<IndexType, 0, UnsafeVectorOverflow, 4>, 0, UnsafeVectorOverflow> m_adjacencyList;
+ Vector<IndexType, 0, UnsafeVectorOverflow> m_degrees;
+
+ // Instead of keeping track of the move instructions, we just keep their operands around and use the index
+ // in the vector as the "identifier" for the move.
+ struct MoveOperands {
+ IndexType srcIndex;
+ IndexType dstIndex;
+ };
+ Vector<MoveOperands, 0, UnsafeVectorOverflow> m_coalescingCandidates;
+
+ // List of every move instruction associated with a Tmp.
+ Vector<HashSet<IndexType, typename DefaultHash<IndexType>::Hash, WTF::UnsignedWithZeroKeyHashTraits<IndexType>>> m_moveList;
+
+ // Colors.
+ Vector<Reg, 0, UnsafeVectorOverflow> m_coloredTmp;
+ Vector<IndexType> m_spilledTmps;
+
+ // Values that have been coalesced with an other value.
+ Vector<IndexType, 0, UnsafeVectorOverflow> m_coalescedTmps;
+
+ // The stack of Tmp removed from the graph and ready for coloring.
+ BitVector m_isOnSelectStack;
+ Vector<IndexType> m_selectStack;
+
+ IndexType m_framePointerIndex { 0 };
+ BitVector m_interferesWithFramePointer;
+
+ struct OrderedMoveSet {
+ unsigned addMove()
+ {
+ ASSERT(m_lowPriorityMoveList.isEmpty());
+ ASSERT(!m_firstLowPriorityMoveIndex);
+
+ unsigned nextIndex = m_positionInMoveList.size();
+ unsigned position = m_moveList.size();
+ m_moveList.append(nextIndex);
+ m_positionInMoveList.append(position);
+ return nextIndex;
+ }
+
+ void startAddingLowPriorityMoves()
+ {
+ ASSERT(m_lowPriorityMoveList.isEmpty());
+ m_firstLowPriorityMoveIndex = m_moveList.size();
+ }
+
+ unsigned addLowPriorityMove()
+ {
+ ASSERT(m_firstLowPriorityMoveIndex == m_moveList.size());
+
+ unsigned nextIndex = m_positionInMoveList.size();
+ unsigned position = m_lowPriorityMoveList.size();
+ m_lowPriorityMoveList.append(nextIndex);
+ m_positionInMoveList.append(position);
+
+ ASSERT(nextIndex >= m_firstLowPriorityMoveIndex);
+
+ return nextIndex;
+ }
+
+ bool isEmpty() const
+ {
+ return m_moveList.isEmpty() && m_lowPriorityMoveList.isEmpty();
+ }
+
+ bool contains(unsigned index)
+ {
+ return m_positionInMoveList[index] != std::numeric_limits<unsigned>::max();
+ }
+
+ void takeMove(unsigned moveIndex)
+ {
+ unsigned positionInMoveList = m_positionInMoveList[moveIndex];
+ if (positionInMoveList == std::numeric_limits<unsigned>::max())
+ return;
+
+ if (moveIndex < m_firstLowPriorityMoveIndex) {
+ ASSERT(m_moveList[positionInMoveList] == moveIndex);
+ unsigned lastIndex = m_moveList.last();
+ m_positionInMoveList[lastIndex] = positionInMoveList;
+ m_moveList[positionInMoveList] = lastIndex;
+ m_moveList.removeLast();
+ } else {
+ ASSERT(m_lowPriorityMoveList[positionInMoveList] == moveIndex);
+ unsigned lastIndex = m_lowPriorityMoveList.last();
+ m_positionInMoveList[lastIndex] = positionInMoveList;
+ m_lowPriorityMoveList[positionInMoveList] = lastIndex;
+ m_lowPriorityMoveList.removeLast();
+ }
+
+ m_positionInMoveList[moveIndex] = std::numeric_limits<unsigned>::max();
+
+ ASSERT(!contains(moveIndex));
+ }
+
+ unsigned takeLastMove()
+ {
+ ASSERT(!isEmpty());
+
+ unsigned lastIndex;
+ if (!m_moveList.isEmpty()) {
+ lastIndex = m_moveList.takeLast();
+ ASSERT(m_positionInMoveList[lastIndex] == m_moveList.size());
+ } else {
+ lastIndex = m_lowPriorityMoveList.takeLast();
+ ASSERT(m_positionInMoveList[lastIndex] == m_lowPriorityMoveList.size());
+ }
+ m_positionInMoveList[lastIndex] = std::numeric_limits<unsigned>::max();
+
+ ASSERT(!contains(lastIndex));
+ return lastIndex;
+ }
+
+ void returnMove(unsigned index)
+ {
+ // This assertion is a bit strict but that is how the move list should be used. The only kind of moves that can
+ // return to the list are the ones that we previously failed to coalesce with the conservative heuristics.
+ // Values should not be added back if they were never taken out when attempting coalescing.
+ ASSERT(!contains(index));
+
+ if (index < m_firstLowPriorityMoveIndex) {
+ unsigned position = m_moveList.size();
+ m_moveList.append(index);
+ m_positionInMoveList[index] = position;
+ } else {
+ unsigned position = m_lowPriorityMoveList.size();
+ m_lowPriorityMoveList.append(index);
+ m_positionInMoveList[index] = position;
+ }
+
+ ASSERT(contains(index));
+ }
+
+ void clear()
+ {
+ m_positionInMoveList.clear();
+ m_moveList.clear();
+ m_lowPriorityMoveList.clear();
+ }
+
+ private:
+ Vector<unsigned, 0, UnsafeVectorOverflow> m_positionInMoveList;
+ Vector<unsigned, 0, UnsafeVectorOverflow> m_moveList;
+ Vector<unsigned, 0, UnsafeVectorOverflow> m_lowPriorityMoveList;
+ unsigned m_firstLowPriorityMoveIndex { 0 };
+ };
+
+ // Work lists.
+ // Set of "move" enabled for possible coalescing.
+ OrderedMoveSet m_worklistMoves;
+ // Set of "move" not yet ready for coalescing.
+ BitVector m_activeMoves;
+ // Low-degree, non-Move related.
+ Vector<IndexType> m_simplifyWorklist;
+ // High-degree Tmp.
+ HashSet<IndexType> m_spillWorklist;
+ // Low-degree, Move related.
+ HashSet<IndexType> m_freezeWorklist;
+
+ bool m_hasSelectedSpill { false };
+ bool m_hasCoalescedNonTrivialMove { false };
+
+ // The mapping of Tmp to their alias for Moves that are always coalescing regardless of spilling.
+ Vector<IndexType, 0, UnsafeVectorOverflow> m_coalescedTmpsAtSpill;
+
+ const HashSet<unsigned>& m_unspillableTmps;
+};
+
+// This perform all the tasks that are specific to certain register type.
+template<Arg::Type type>
+class ColoringAllocator : public AbstractColoringAllocator<unsigned> {
+public:
+ ColoringAllocator(Code& code, TmpWidth& tmpWidth, const UseCounts<Tmp>& useCounts, const HashSet<unsigned>& unspillableTmp)
+ : AbstractColoringAllocator<unsigned>(code.regsInPriorityOrder(type), AbsoluteTmpMapper<type>::lastMachineRegisterIndex(), tmpArraySize(code), unspillableTmp)
+ , m_code(code)
+ , m_tmpWidth(tmpWidth)
+ , m_useCounts(useCounts)
+ {
+ if (type == Arg::GP) {
+ m_framePointerIndex = AbsoluteTmpMapper<type>::absoluteIndex(Tmp(MacroAssembler::framePointerRegister));
+ m_interferesWithFramePointer.ensureSize(tmpArraySize(code));
+ }
+
+ initializePrecoloredTmp();
+ build();
+ allocate();
+ }
+
+ Tmp getAlias(Tmp tmp) const
+ {
+ return AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(getAlias(AbsoluteTmpMapper<type>::absoluteIndex(tmp)));
+ }
+
+ // This tells you if a Move will be coalescable if the src and dst end up matching. This method
+ // relies on an analysis that is invalidated by register allocation, so you it's only meaningful to
+ // call this *before* replacing the Tmp's in this Inst with registers or spill slots.
+ bool mayBeCoalescable(const Inst& inst) const
+ {
+ return mayBeCoalescableImpl(inst, &m_tmpWidth);
+ }
+
+ bool isUselessMove(const Inst& inst) const
+ {
+ return mayBeCoalescableImpl(inst, nullptr) && inst.args[0].tmp() == inst.args[1].tmp();
+ }
+
+ Tmp getAliasWhenSpilling(Tmp tmp) const
+ {
+ ASSERT_WITH_MESSAGE(!m_spilledTmps.isEmpty(), "This function is only valid for coalescing during spilling.");
+
+ if (m_coalescedTmpsAtSpill.isEmpty())
+ return tmp;
+
+ unsigned aliasIndex = AbsoluteTmpMapper<type>::absoluteIndex(tmp);
+ while (unsigned nextAliasIndex = m_coalescedTmpsAtSpill[aliasIndex])
+ aliasIndex = nextAliasIndex;
+
+ Tmp alias = AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(aliasIndex);
+
+ ASSERT_WITH_MESSAGE(!m_spilledTmps.contains(aliasIndex) || alias == tmp, "The aliases at spill should always be colorable. Something went horribly wrong.");
+
+ return alias;
+ }
+
+ template<typename IndexIterator>
+ class IndexToTmpIteratorAdaptor {
+ public:
+ IndexToTmpIteratorAdaptor(IndexIterator&& indexIterator)
+ : m_indexIterator(WTFMove(indexIterator))
+ {
+ }
+
+ Tmp operator*() const { return AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(*m_indexIterator); }
+ IndexToTmpIteratorAdaptor& operator++() { ++m_indexIterator; return *this; }
+
+ bool operator==(const IndexToTmpIteratorAdaptor& other) const
+ {
+ return m_indexIterator == other.m_indexIterator;
+ }
+
+ bool operator!=(const IndexToTmpIteratorAdaptor& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ IndexIterator m_indexIterator;
+ };
+
+ template<typename Collection>
+ class IndexToTmpIterableAdaptor {
+ public:
+ IndexToTmpIterableAdaptor(const Collection& collection)
+ : m_collection(collection)
+ {
+ }
+
+ IndexToTmpIteratorAdaptor<typename Collection::const_iterator> begin() const
+ {
+ return m_collection.begin();
+ }
+
+ IndexToTmpIteratorAdaptor<typename Collection::const_iterator> end() const
+ {
+ return m_collection.end();
+ }
+
+ private:
+ const Collection& m_collection;
+ };
+
+ IndexToTmpIterableAdaptor<Vector<unsigned>> spilledTmps() const { return m_spilledTmps; }
+
+ bool requiresSpilling() const { return !m_spilledTmps.isEmpty(); }
+
+ Reg allocatedReg(Tmp tmp) const
+ {
+ ASSERT(!tmp.isReg());
+ ASSERT(m_coloredTmp.size());
+ ASSERT(tmp.isGP() == (type == Arg::GP));
+
+ Reg reg = m_coloredTmp[AbsoluteTmpMapper<type>::absoluteIndex(tmp)];
+ if (!reg) {
+ dataLog("FATAL: No color for ", tmp, "\n");
+ dataLog("Code:\n");
+ dataLog(m_code);
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ return reg;
+ }
+
+private:
+ static unsigned tmpArraySize(Code& code)
+ {
+ unsigned numTmps = code.numTmps(type);
+ return AbsoluteTmpMapper<type>::absoluteIndex(numTmps);
+ }
+
+ void initializePrecoloredTmp()
+ {
+ m_coloredTmp.resize(m_lastPrecoloredRegisterIndex + 1);
+ for (unsigned i = 1; i <= m_lastPrecoloredRegisterIndex; ++i) {
+ Tmp tmp = AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(i);
+ ASSERT(tmp.isReg());
+ m_coloredTmp[i] = tmp.reg();
+ }
+ }
+
+ bool mayBeCoalesced(Arg left, Arg right)
+ {
+ if (!left.isTmp() || !right.isTmp())
+ return false;
+
+ Tmp leftTmp = left.tmp();
+ Tmp rightTmp = right.tmp();
+
+ if (leftTmp == rightTmp)
+ return false;
+
+ if (leftTmp.isGP() != (type == Arg::GP) || rightTmp.isGP() != (type == Arg::GP))
+ return false;
+
+ unsigned leftIndex = AbsoluteTmpMapper<type>::absoluteIndex(leftTmp);
+ unsigned rightIndex = AbsoluteTmpMapper<type>::absoluteIndex(rightTmp);
+
+ return !m_interferenceEdges.contains(InterferenceEdge(leftIndex, rightIndex));
+ }
+
+ void addToLowPriorityCoalescingCandidates(Arg left, Arg right)
+ {
+ ASSERT(mayBeCoalesced(left, right));
+ Tmp leftTmp = left.tmp();
+ Tmp rightTmp = right.tmp();
+
+ unsigned leftIndex = AbsoluteTmpMapper<type>::absoluteIndex(leftTmp);
+ unsigned rightIndex = AbsoluteTmpMapper<type>::absoluteIndex(rightTmp);
+
+ unsigned nextMoveIndex = m_coalescingCandidates.size();
+ m_coalescingCandidates.append({ leftIndex, rightIndex });
+
+ unsigned newIndexInWorklist = m_worklistMoves.addLowPriorityMove();
+ ASSERT_UNUSED(newIndexInWorklist, newIndexInWorklist == nextMoveIndex);
+
+ ASSERT(nextMoveIndex <= m_activeMoves.size());
+ m_activeMoves.ensureSize(nextMoveIndex + 1);
+
+ m_moveList[leftIndex].add(nextMoveIndex);
+ m_moveList[rightIndex].add(nextMoveIndex);
+ }
+
+ void build()
+ {
+ TmpLiveness<type> liveness(m_code);
+ for (BasicBlock* block : m_code) {
+ typename TmpLiveness<type>::LocalCalc localCalc(liveness, block);
+ for (unsigned instIndex = block->size(); instIndex--;) {
+ Inst& inst = block->at(instIndex);
+ Inst* nextInst = block->get(instIndex + 1);
+ build(&inst, nextInst, localCalc);
+ localCalc.execute(instIndex);
+ }
+ build(nullptr, &block->at(0), localCalc);
+ }
+ buildLowPriorityMoveList();
+ }
+
+ void build(Inst* prevInst, Inst* nextInst, const typename TmpLiveness<type>::LocalCalc& localCalc)
+ {
+ if (traceDebug)
+ dataLog("Building between ", pointerDump(prevInst), " and ", pointerDump(nextInst), ":\n");
+ Inst::forEachDefWithExtraClobberedRegs<Tmp>(
+ prevInst, nextInst,
+ [&] (const Tmp& arg, Arg::Role, Arg::Type argType, Arg::Width) {
+ if (argType != type)
+ return;
+
+ // All the Def()s interfere with each other and with all the extra clobbered Tmps.
+ // We should not use forEachDefWithExtraClobberedRegs() here since colored Tmps
+ // do not need interference edges in our implementation.
+ Inst::forEachDef<Tmp>(
+ prevInst, nextInst,
+ [&] (Tmp& otherArg, Arg::Role, Arg::Type argType, Arg::Width) {
+ if (argType != type)
+ return;
+
+ if (traceDebug)
+ dataLog(" Adding def-def edge: ", arg, ", ", otherArg, "\n");
+ this->addEdge(arg, otherArg);
+ });
+ });
+
+ if (prevInst && mayBeCoalescable(*prevInst)) {
+ // We do not want the Use() of this move to interfere with the Def(), even if it is live
+ // after the Move. If we were to add the interference edge, it would be impossible to
+ // coalesce the Move even if the two Tmp never interfere anywhere.
+ Tmp defTmp;
+ Tmp useTmp;
+ prevInst->forEachTmp([&defTmp, &useTmp] (Tmp& argTmp, Arg::Role role, Arg::Type, Arg::Width) {
+ if (Arg::isLateDef(role))
+ defTmp = argTmp;
+ else {
+ ASSERT(Arg::isEarlyUse(role));
+ useTmp = argTmp;
+ }
+ });
+ ASSERT(defTmp);
+ ASSERT(useTmp);
+
+ unsigned nextMoveIndex = m_coalescingCandidates.size();
+ m_coalescingCandidates.append({ AbsoluteTmpMapper<type>::absoluteIndex(useTmp), AbsoluteTmpMapper<type>::absoluteIndex(defTmp) });
+
+ unsigned newIndexInWorklist = m_worklistMoves.addMove();
+ ASSERT_UNUSED(newIndexInWorklist, newIndexInWorklist == nextMoveIndex);
+
+ ASSERT(nextMoveIndex <= m_activeMoves.size());
+ m_activeMoves.ensureSize(nextMoveIndex + 1);
+
+ for (const Arg& arg : prevInst->args) {
+ auto& list = m_moveList[AbsoluteTmpMapper<type>::absoluteIndex(arg.tmp())];
+ list.add(nextMoveIndex);
+ }
+
+ for (const Tmp& liveTmp : localCalc.live()) {
+ if (liveTmp != useTmp) {
+ if (traceDebug)
+ dataLog(" Adding def-live for coalescable: ", defTmp, ", ", liveTmp, "\n");
+ addEdge(defTmp, liveTmp);
+ }
+ }
+
+ // The next instruction could have early clobbers or early def's. We need to consider
+ // those now.
+ addEdges(nullptr, nextInst, localCalc.live());
+ } else
+ addEdges(prevInst, nextInst, localCalc.live());
+ }
+
+ void buildLowPriorityMoveList()
+ {
+ if (!isX86())
+ return;
+
+ m_worklistMoves.startAddingLowPriorityMoves();
+ for (BasicBlock* block : m_code) {
+ for (Inst& inst : *block) {
+ if (std::optional<unsigned> defArgIndex = inst.shouldTryAliasingDef()) {
+ Arg op1 = inst.args[*defArgIndex - 2];
+ Arg op2 = inst.args[*defArgIndex - 1];
+ Arg dest = inst.args[*defArgIndex];
+
+ if (op1 == dest || op2 == dest)
+ continue;
+
+ if (mayBeCoalesced(op1, dest))
+ addToLowPriorityCoalescingCandidates(op1, dest);
+ if (op1 != op2 && mayBeCoalesced(op2, dest))
+ addToLowPriorityCoalescingCandidates(op2, dest);
+ }
+ }
+ }
+ }
+
+ void addEdges(Inst* prevInst, Inst* nextInst, typename TmpLiveness<type>::LocalCalc::Iterable liveTmps)
+ {
+ // All the Def()s interfere with everthing live.
+ Inst::forEachDefWithExtraClobberedRegs<Tmp>(
+ prevInst, nextInst,
+ [&] (const Tmp& arg, Arg::Role, Arg::Type argType, Arg::Width) {
+ if (argType != type)
+ return;
+
+ for (const Tmp& liveTmp : liveTmps) {
+ ASSERT(liveTmp.isGP() == (type == Arg::GP));
+
+ if (traceDebug)
+ dataLog(" Adding def-live edge: ", arg, ", ", liveTmp, "\n");
+
+ addEdge(arg, liveTmp);
+ }
+
+ if (type == Arg::GP && !arg.isGPR())
+ m_interferesWithFramePointer.quickSet(AbsoluteTmpMapper<type>::absoluteIndex(arg));
+ });
+ }
+
+ void addEdge(Tmp a, Tmp b)
+ {
+ ASSERT_WITH_MESSAGE(a.isGP() == b.isGP(), "An interference between registers of different types does not make sense, it can lead to non-colorable graphs.");
+
+ addEdge(AbsoluteTmpMapper<type>::absoluteIndex(a), AbsoluteTmpMapper<type>::absoluteIndex(b));
+ }
+
+ // Calling this without a tmpWidth will perform a more conservative coalescing analysis that assumes
+ // that Move32's are not coalescable.
+ static bool mayBeCoalescableImpl(const Inst& inst, TmpWidth* tmpWidth)
+ {
+ switch (type) {
+ case Arg::GP:
+ switch (inst.kind.opcode) {
+ case Move:
+ case Move32:
+ break;
+ default:
+ return false;
+ }
+ break;
+ case Arg::FP:
+ switch (inst.kind.opcode) {
+ case MoveFloat:
+ case MoveDouble:
+ break;
+ default:
+ return false;
+ }
+ break;
+ }
+
+ ASSERT_WITH_MESSAGE(inst.args.size() == 2, "We assume coalecable moves only have two arguments in a few places.");
+
+ if (!inst.args[0].isTmp() || !inst.args[1].isTmp())
+ return false;
+
+ ASSERT(inst.args[0].type() == type);
+ ASSERT(inst.args[1].type() == type);
+
+ // We can coalesce a Move32 so long as either of the following holds:
+ // - The input is already zero-filled.
+ // - The output only cares about the low 32 bits.
+ //
+ // Note that the input property requires an analysis over ZDef's, so it's only valid so long
+ // as the input gets a register. We don't know if the input gets a register, but we do know
+ // that if it doesn't get a register then we will still emit this Move32.
+ if (inst.kind.opcode == Move32) {
+ if (!tmpWidth)
+ return false;
+
+ if (tmpWidth->defWidth(inst.args[0].tmp()) > Arg::Width32
+ && tmpWidth->useWidth(inst.args[1].tmp()) > Arg::Width32)
+ return false;
+ }
+
+ return true;
+ }
+
+ void selectSpill()
+ {
+ if (!m_hasSelectedSpill) {
+ m_hasSelectedSpill = true;
+
+ if (m_hasCoalescedNonTrivialMove)
+ m_coalescedTmpsAtSpill = m_coalescedTmps;
+ }
+
+ auto iterator = m_spillWorklist.begin();
+
+ RELEASE_ASSERT_WITH_MESSAGE(iterator != m_spillWorklist.end(), "selectSpill() called when there was no spill.");
+ RELEASE_ASSERT_WITH_MESSAGE(!m_unspillableTmps.contains(*iterator), "trying to spill unspillable tmp");
+
+ // Higher score means more desirable to spill. Lower scores maximize the likelihood that a tmp
+ // gets a register.
+ auto score = [&] (Tmp tmp) -> double {
+ // Air exposes the concept of "fast tmps", and we interpret that to mean that the tmp
+ // should always be in a register.
+ if (m_code.isFastTmp(tmp))
+ return 0;
+
+ // All else being equal, the score should be directly related to the degree.
+ double degree = static_cast<double>(m_degrees[AbsoluteTmpMapper<type>::absoluteIndex(tmp)]);
+
+ // All else being equal, the score should be inversely related to the number of warm uses and
+ // defs.
+ const UseCounts<Tmp>::Counts* counts = m_useCounts[tmp];
+ if (!counts)
+ return std::numeric_limits<double>::infinity();
+
+ double uses = counts->numWarmUses + counts->numDefs;
+
+ // If it's a constant, then it's not as bad to spill. We can rematerialize it in many
+ // cases.
+ if (counts->numConstDefs == 1 && counts->numDefs == 1)
+ uses /= 2;
+
+ return degree / uses;
+ };
+
+ auto victimIterator = iterator;
+ double maxScore = score(AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(*iterator));
+
+ ++iterator;
+ for (;iterator != m_spillWorklist.end(); ++iterator) {
+ double tmpScore = score(AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(*iterator));
+ if (tmpScore > maxScore) {
+ ASSERT(!m_unspillableTmps.contains(*iterator));
+ victimIterator = iterator;
+ maxScore = tmpScore;
+ }
+ }
+
+ unsigned victimIndex = *victimIterator;
+ m_spillWorklist.remove(victimIterator);
+ m_simplifyWorklist.append(victimIndex);
+
+ freezeMoves(victimIndex);
+ }
+
+ void allocate()
+ {
+ ASSERT_WITH_MESSAGE(m_activeMoves.size() >= m_coalescingCandidates.size(), "The activeMove set should be big enough for the quick operations of BitVector.");
+
+ makeWorkList();
+
+ if (debug) {
+ dataLog("Interference: ", listDump(m_interferenceEdges), "\n");
+ dumpInterferenceGraphInDot(WTF::dataFile());
+ dataLog("Coalescing candidates:\n");
+ for (MoveOperands& moveOp : m_coalescingCandidates) {
+ dataLog(" ", AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(moveOp.srcIndex),
+ " -> ", AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(moveOp.dstIndex), "\n");
+ }
+ dataLog("Initial work list\n");
+ dumpWorkLists(WTF::dataFile());
+ }
+
+ do {
+ if (traceDebug) {
+ dataLog("Before Graph simplification iteration\n");
+ dumpWorkLists(WTF::dataFile());
+ }
+
+ if (!m_simplifyWorklist.isEmpty())
+ simplify();
+ else if (!m_worklistMoves.isEmpty())
+ coalesce();
+ else if (!m_freezeWorklist.isEmpty())
+ freeze();
+ else if (!m_spillWorklist.isEmpty())
+ selectSpill();
+
+ if (traceDebug) {
+ dataLog("After Graph simplification iteration\n");
+ dumpWorkLists(WTF::dataFile());
+ }
+ } while (!m_simplifyWorklist.isEmpty() || !m_worklistMoves.isEmpty() || !m_freezeWorklist.isEmpty() || !m_spillWorklist.isEmpty());
+
+ assignColors();
+ }
+
+#if PLATFORM(COCOA)
+#pragma mark - Debugging helpers.
+#endif
+
+ void dumpInterferenceGraphInDot(PrintStream& out)
+ {
+ out.print("graph InterferenceGraph { \n");
+
+ HashSet<Tmp> tmpsWithInterferences;
+ for (const auto& edge : m_interferenceEdges) {
+ tmpsWithInterferences.add(AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(edge.first()));
+ tmpsWithInterferences.add(AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(edge.second()));
+ }
+
+ for (const auto& tmp : tmpsWithInterferences) {
+ unsigned tmpIndex = AbsoluteTmpMapper<type>::absoluteIndex(tmp);
+ if (tmpIndex < m_degrees.size())
+ out.print(" ", tmp.internalValue(), " [label=\"", tmp, " (", m_degrees[tmpIndex], ")\"];\n");
+ else
+ out.print(" ", tmp.internalValue(), " [label=\"", tmp, "\"];\n");
+ }
+
+ for (const auto& edge : m_interferenceEdges)
+ out.print(" ", edge.first(), " -- ", edge.second(), ";\n");
+ out.print("}\n");
+ }
+
+ void dumpWorkLists(PrintStream& out)
+ {
+ out.print("Simplify work list:\n");
+ for (unsigned tmpIndex : m_simplifyWorklist)
+ out.print(" ", AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(tmpIndex), "\n");
+ out.printf("Moves work list is empty? %d\n", m_worklistMoves.isEmpty());
+ out.print("Freeze work list:\n");
+ for (unsigned tmpIndex : m_freezeWorklist)
+ out.print(" ", AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(tmpIndex), "\n");
+ out.print("Spill work list:\n");
+ for (unsigned tmpIndex : m_spillWorklist)
+ out.print(" ", AbsoluteTmpMapper<type>::tmpFromAbsoluteIndex(tmpIndex), "\n");
+ }
+
+ using AbstractColoringAllocator<unsigned>::addEdge;
+ using AbstractColoringAllocator<unsigned>::getAlias;
+
+ Code& m_code;
+ TmpWidth& m_tmpWidth;
+ // FIXME: spilling should not type specific. It is only a side effect of using UseCounts.
+ const UseCounts<Tmp>& m_useCounts;
+};
+
+class IteratedRegisterCoalescing {
+public:
+ IteratedRegisterCoalescing(Code& code)
+ : m_code(code)
+ , m_useCounts(code)
+ {
+ }
+
+ void run()
+ {
+ padInterference(m_code);
+
+ iteratedRegisterCoalescingOnType<Arg::GP>();
+ iteratedRegisterCoalescingOnType<Arg::FP>();
+
+ fixSpillsAfterTerminals();
+
+ if (reportStats)
+ dataLog("Num iterations = ", m_numIterations, "\n");
+ }
+
+private:
+ template<Arg::Type type>
+ void iteratedRegisterCoalescingOnType()
+ {
+ HashSet<unsigned> unspillableTmps = computeUnspillableTmps<type>();
+
+ // FIXME: If a Tmp is used only from a Scratch role and that argument is !admitsStack, then
+ // we should add the Tmp to unspillableTmps. That will help avoid relooping only to turn the
+ // Tmp into an unspillable Tmp.
+ // https://bugs.webkit.org/show_bug.cgi?id=152699
+
+ while (true) {
+ ++m_numIterations;
+
+ if (traceDebug)
+ dataLog("Code at iteration ", m_numIterations, ":\n", m_code);
+
+ // FIXME: One way to optimize this code is to remove the recomputation inside the fixpoint.
+ // We need to recompute because spilling adds tmps, but we could just update tmpWidth when we
+ // add those tmps. Note that one easy way to remove the recomputation is to make any newly
+ // added Tmps get the same use/def widths that the original Tmp got. But, this may hurt the
+ // spill code we emit. Since we currently recompute TmpWidth after spilling, the newly
+ // created Tmps may get narrower use/def widths. On the other hand, the spiller already
+ // selects which move instruction to use based on the original Tmp's widths, so it may not
+ // matter than a subsequent iteration sees a coservative width for the new Tmps. Also, the
+ // recomputation may not actually be a performance problem; it's likely that a better way to
+ // improve performance of TmpWidth is to replace its HashMap with something else. It's
+ // possible that most of the TmpWidth overhead is from queries of TmpWidth rather than the
+ // recomputation, in which case speeding up the lookup would be a bigger win.
+ // https://bugs.webkit.org/show_bug.cgi?id=152478
+ m_tmpWidth.recompute(m_code);
+
+ ColoringAllocator<type> allocator(m_code, m_tmpWidth, m_useCounts, unspillableTmps);
+ if (!allocator.requiresSpilling()) {
+ assignRegistersToTmp(allocator);
+ if (traceDebug)
+ dataLog("Successfull allocation at iteration ", m_numIterations, ":\n", m_code);
+
+ return;
+ }
+ addSpillAndFill<type>(allocator, unspillableTmps);
+ }
+ }
+
+ template<Arg::Type type>
+ HashSet<unsigned> computeUnspillableTmps()
+ {
+ HashSet<unsigned> unspillableTmps;
+
+ struct Range {
+ unsigned first { std::numeric_limits<unsigned>::max() };
+ unsigned last { 0 };
+ unsigned count { 0 };
+ unsigned admitStackCount { 0 };
+ };
+
+ unsigned numTmps = m_code.numTmps(type);
+ unsigned arraySize = AbsoluteTmpMapper<type>::absoluteIndex(numTmps);
+
+ Vector<Range, 0, UnsafeVectorOverflow> ranges;
+ ranges.fill(Range(), arraySize);
+
+ unsigned globalIndex = 0;
+ for (BasicBlock* block : m_code) {
+ for (Inst& inst : *block) {
+ inst.forEachArg([&] (Arg& arg, Arg::Role, Arg::Type argType, Arg::Width) {
+ if (arg.isTmp() && inst.admitsStack(arg)) {
+ if (argType != type)
+ return;
+
+ Tmp tmp = arg.tmp();
+ Range& range = ranges[AbsoluteTmpMapper<type>::absoluteIndex(tmp)];
+ range.count++;
+ range.admitStackCount++;
+ if (globalIndex < range.first) {
+ range.first = globalIndex;
+ range.last = globalIndex;
+ } else
+ range.last = globalIndex;
+
+ return;
+ }
+
+ arg.forEachTmpFast([&] (Tmp& tmp) {
+ if (tmp.isGP() != (type == Arg::GP))
+ return;
+
+ Range& range = ranges[AbsoluteTmpMapper<type>::absoluteIndex(tmp)];
+ range.count++;
+ if (globalIndex < range.first) {
+ range.first = globalIndex;
+ range.last = globalIndex;
+ } else
+ range.last = globalIndex;
+ });
+ });
+
+ ++globalIndex;
+ }
+ ++globalIndex;
+ }
+ for (unsigned i = AbsoluteTmpMapper<type>::lastMachineRegisterIndex() + 1; i < ranges.size(); ++i) {
+ Range& range = ranges[i];
+ if (range.last - range.first <= 1 && range.count > range.admitStackCount)
+ unspillableTmps.add(i);
+ }
+
+ return unspillableTmps;
+ }
+
+ template<Arg::Type type>
+ void assignRegistersToTmp(const ColoringAllocator<type>& allocator)
+ {
+ for (BasicBlock* block : m_code) {
+ // Give Tmp a valid register.
+ for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+ Inst& inst = block->at(instIndex);
+
+ // The mayBeCoalescable() method will change its mind for some operations after we
+ // complete register allocation. So, we record this before starting.
+ bool mayBeCoalescable = allocator.mayBeCoalescable(inst);
+
+ // Move32 is cheaper if we know that it's equivalent to a Move. It's
+ // equivalent if the destination's high bits are not observable or if the source's high
+ // bits are all zero. Note that we don't have the opposite optimization for other
+ // architectures, which may prefer Move over Move32, because Move is canonical already.
+ if (type == Arg::GP && inst.kind.opcode == Move
+ && inst.args[0].isTmp() && inst.args[1].isTmp()) {
+ if (m_tmpWidth.useWidth(inst.args[1].tmp()) <= Arg::Width32
+ || m_tmpWidth.defWidth(inst.args[0].tmp()) <= Arg::Width32)
+ inst.kind.opcode = Move32;
+ }
+
+ inst.forEachTmpFast([&] (Tmp& tmp) {
+ if (tmp.isReg() || tmp.isGP() == (type != Arg::GP))
+ return;
+
+ Tmp aliasTmp = allocator.getAlias(tmp);
+ Tmp assignedTmp;
+ if (aliasTmp.isReg())
+ assignedTmp = Tmp(aliasTmp.reg());
+ else {
+ auto reg = allocator.allocatedReg(aliasTmp);
+ ASSERT(reg);
+ assignedTmp = Tmp(reg);
+ }
+ ASSERT(assignedTmp.isReg());
+ tmp = assignedTmp;
+ });
+
+ if (mayBeCoalescable && inst.args[0].isTmp() && inst.args[1].isTmp()
+ && inst.args[0].tmp() == inst.args[1].tmp())
+ inst = Inst();
+ }
+
+ // Remove all the useless moves we created in this block.
+ block->insts().removeAllMatching([&] (const Inst& inst) {
+ return !inst;
+ });
+ }
+ }
+
+ static unsigned stackSlotMinimumWidth(Arg::Width width)
+ {
+ return width <= Arg::Width32 ? 4 : 8;
+ }
+
+ template<Arg::Type type>
+ void addSpillAndFill(const ColoringAllocator<type>& allocator, HashSet<unsigned>& unspillableTmps)
+ {
+ HashMap<Tmp, StackSlot*> stackSlots;
+ for (Tmp tmp : allocator.spilledTmps()) {
+ // All the spilled values become unspillable.
+ unspillableTmps.add(AbsoluteTmpMapper<type>::absoluteIndex(tmp));
+
+ // Allocate stack slot for each spilled value.
+ StackSlot* stackSlot = m_code.addStackSlot(
+ stackSlotMinimumWidth(m_tmpWidth.requiredWidth(tmp)), StackSlotKind::Spill);
+ bool isNewTmp = stackSlots.add(tmp, stackSlot).isNewEntry;
+ ASSERT_UNUSED(isNewTmp, isNewTmp);
+ }
+
+ // Rewrite the program to get rid of the spilled Tmp.
+ InsertionSet insertionSet(m_code);
+ for (BasicBlock* block : m_code) {
+ bool hasAliasedTmps = false;
+
+ for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+ Inst& inst = block->at(instIndex);
+
+ // The TmpWidth analysis will say that a Move only stores 32 bits into the destination,
+ // if the source only had 32 bits worth of non-zero bits. Same for the source: it will
+ // only claim to read 32 bits from the source if only 32 bits of the destination are
+ // read. Note that we only apply this logic if this turns into a load or store, since
+ // Move is the canonical way to move data between GPRs.
+ bool canUseMove32IfDidSpill = false;
+ bool didSpill = false;
+ if (type == Arg::GP && inst.kind.opcode == Move) {
+ if ((inst.args[0].isTmp() && m_tmpWidth.width(inst.args[0].tmp()) <= Arg::Width32)
+ || (inst.args[1].isTmp() && m_tmpWidth.width(inst.args[1].tmp()) <= Arg::Width32))
+ canUseMove32IfDidSpill = true;
+ }
+
+ // Try to replace the register use by memory use when possible.
+ inst.forEachArg(
+ [&] (Arg& arg, Arg::Role role, Arg::Type argType, Arg::Width width) {
+ if (!arg.isTmp())
+ return;
+ if (argType != type)
+ return;
+ if (arg.isReg())
+ return;
+
+ auto stackSlotEntry = stackSlots.find(arg.tmp());
+ if (stackSlotEntry == stackSlots.end())
+ return;
+ if (!inst.admitsStack(arg))
+ return;
+
+ // If the Tmp holds a constant then we want to rematerialize its
+ // value rather than loading it from the stack. In order for that
+ // optimization to kick in, we need to avoid placing the Tmp's stack
+ // address into the instruction.
+ if (!Arg::isColdUse(role)) {
+ const UseCounts<Tmp>::Counts* counts = m_useCounts[arg.tmp()];
+ if (counts && counts->numConstDefs == 1 && counts->numDefs == 1)
+ return;
+ }
+
+ Arg::Width spillWidth = m_tmpWidth.requiredWidth(arg.tmp());
+ if (Arg::isAnyDef(role) && width < spillWidth)
+ return;
+ ASSERT(inst.kind.opcode == Move || !(Arg::isAnyUse(role) && width > spillWidth));
+
+ if (spillWidth != Arg::Width32)
+ canUseMove32IfDidSpill = false;
+
+ stackSlotEntry->value->ensureSize(
+ canUseMove32IfDidSpill ? 4 : Arg::bytes(width));
+ arg = Arg::stack(stackSlotEntry->value);
+ didSpill = true;
+ });
+
+ if (didSpill && canUseMove32IfDidSpill)
+ inst.kind.opcode = Move32;
+
+ // For every other case, add Load/Store as needed.
+ inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Arg::Type argType, Arg::Width) {
+ if (tmp.isReg() || argType != type)
+ return;
+
+ auto stackSlotEntry = stackSlots.find(tmp);
+ if (stackSlotEntry == stackSlots.end()) {
+ Tmp alias = allocator.getAliasWhenSpilling(tmp);
+ if (alias != tmp) {
+ tmp = alias;
+ hasAliasedTmps = true;
+ }
+ return;
+ }
+
+ Arg::Width spillWidth = m_tmpWidth.requiredWidth(tmp);
+ Opcode move = Oops;
+ switch (stackSlotMinimumWidth(spillWidth)) {
+ case 4:
+ move = type == Arg::GP ? Move32 : MoveFloat;
+ break;
+ case 8:
+ move = type == Arg::GP ? Move : MoveDouble;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ tmp = m_code.newTmp(type);
+ unspillableTmps.add(AbsoluteTmpMapper<type>::absoluteIndex(tmp));
+
+ Arg arg = Arg::stack(stackSlotEntry->value);
+ if (Arg::isAnyUse(role) && role != Arg::Scratch)
+ insertionSet.insert(instIndex, move, inst.origin, arg, tmp);
+ if (Arg::isAnyDef(role))
+ insertionSet.insert(instIndex + 1, move, inst.origin, tmp, arg);
+ });
+ }
+ insertionSet.execute(block);
+
+ if (hasAliasedTmps) {
+ block->insts().removeAllMatching([&] (const Inst& inst) {
+ return allocator.isUselessMove(inst);
+ });
+ }
+ }
+ }
+
+ void fixSpillsAfterTerminals()
+ {
+ // Because there may be terminals that produce values, IRC may
+ // want to spill those terminals. It'll happen to spill it after
+ // the terminal. If we left the graph in this state, it'd be invalid
+ // because a terminal must be the last instruction in a block.
+ // We fix that here.
+
+ InsertionSet insertionSet(m_code);
+
+ bool addedBlocks = false;
+
+ for (BasicBlock* block : m_code) {
+ unsigned terminalIndex = block->size();
+ bool foundTerminal = false;
+ while (terminalIndex--) {
+ if (block->at(terminalIndex).isTerminal()) {
+ foundTerminal = true;
+ break;
+ }
+ }
+ ASSERT_UNUSED(foundTerminal, foundTerminal);
+
+ if (terminalIndex == block->size() - 1)
+ continue;
+
+ // There must be instructions after the terminal because it's not the last instruction.
+ ASSERT(terminalIndex < block->size() - 1);
+ Vector<Inst, 1> instsToMove;
+ for (unsigned i = terminalIndex + 1; i < block->size(); i++)
+ instsToMove.append(block->at(i));
+ RELEASE_ASSERT(instsToMove.size());
+
+ for (FrequentedBlock& frequentedSuccessor : block->successors()) {
+ BasicBlock* successor = frequentedSuccessor.block();
+ // If successor's only predecessor is block, we can plant the spill inside
+ // the successor. Otherwise, we must split the critical edge and create
+ // a new block for the spill.
+ if (successor->numPredecessors() == 1) {
+ insertionSet.insertInsts(0, instsToMove);
+ insertionSet.execute(successor);
+ } else {
+ addedBlocks = true;
+ // FIXME: We probably want better block ordering here.
+ BasicBlock* newBlock = m_code.addBlock();
+ for (const Inst& inst : instsToMove)
+ newBlock->appendInst(inst);
+ newBlock->appendInst(Inst(Jump, instsToMove.last().origin));
+ newBlock->successors().append(successor);
+ frequentedSuccessor.block() = newBlock;
+ }
+ }
+
+ block->resize(terminalIndex + 1);
+ }
+
+ if (addedBlocks)
+ m_code.resetReachability();
+ }
+
+ Code& m_code;
+ TmpWidth m_tmpWidth;
+ UseCounts<Tmp> m_useCounts;
+ unsigned m_numIterations { 0 };
+};
+
+} // anonymous namespace
+
+void iteratedRegisterCoalescing(Code& code)
+{
+ PhaseScope phaseScope(code, "iteratedRegisterCoalescing");
+
+ IteratedRegisterCoalescing iteratedRegisterCoalescing(code);
+ iteratedRegisterCoalescing.run();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h b/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h
new file mode 100644
index 000000000..ab689b35c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is a register allocation phase based on Andrew Appel's Iterated Register Coalescing
+// http://www.cs.cmu.edu/afs/cs/academic/class/15745-s07/www/papers/george.pdf
+void iteratedRegisterCoalescing(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirKind.cpp b/Source/JavaScriptCore/b3/air/AirKind.cpp
new file mode 100644
index 000000000..9fe252538
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirKind.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirKind.h"
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/CommaPrinter.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+void Kind::dump(PrintStream& out) const
+{
+ out.print(opcode);
+
+ CommaPrinter comma(", ", "<");
+ if (traps)
+ out.print(comma, "Traps");
+ if (comma.didPrint())
+ out.print(">");
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirKind.h b/Source/JavaScriptCore/b3/air/AirKind.h
new file mode 100644
index 000000000..e723d4683
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirKind.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AirKind_h
+#define AirKind_h
+
+#if ENABLE(B3_JIT)
+
+#include "AirOpcode.h"
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+// Air opcodes are always carried around with some flags. These flags are understood as having no
+// meaning if they are set for an opcode to which they do not apply. This makes sense, since Air
+// is a complex instruction set and most of these flags can apply to basically any opcode. In
+// fact, it's recommended to only represent something as a flag if you believe that it is largely
+// opcode-agnostic.
+
+struct Kind {
+ Kind(Opcode opcode)
+ : opcode(opcode)
+ , traps(false)
+ {
+ }
+
+ Kind()
+ : Kind(Nop)
+ {
+ }
+
+ bool operator==(const Kind& other) const
+ {
+ return opcode == other.opcode
+ && traps == other.traps;
+ }
+
+ bool operator!=(const Kind& other) const
+ {
+ return !(*this == other);
+ }
+
+ unsigned hash() const
+ {
+ return static_cast<unsigned>(opcode) + (static_cast<unsigned>(traps) << 16);
+ }
+
+ explicit operator bool() const
+ {
+ return *this != Kind();
+ }
+
+ void dump(PrintStream&) const;
+
+ Opcode opcode;
+
+ // This is an opcode-agnostic flag that indicates that we expect that this instruction will
+ // trap. This causes the compiler to assume that this side-exits and therefore has non-control
+ // non-arg effects. This also causes the compiler to tell you about all of these instructions.
+ // Note that this is just one of several ways of supporting trapping in Air, and it's the less
+ // precise variant because it's origin-based. This means that if an instruction was fused out
+ // of B3 values that had different origins, then the origin at which you'll appear to trap
+ // will be somewhat random. The upside of this approach is that it imposes by far the least
+ // overhead on the compiler.
+ // FIXME: Make this completely work.
+ // https://bugs.webkit.org/show_bug.cgi?id=162689
+ bool traps : 1;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+#endif // AirKind_h
+
diff --git a/Source/JavaScriptCore/b3/air/AirLiveness.h b/Source/JavaScriptCore/b3/air/AirLiveness.h
new file mode 100644
index 000000000..e727c36c9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLiveness.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirStackSlot.h"
+#include "AirTmpInlines.h"
+#include <wtf/IndexMap.h>
+#include <wtf/IndexSet.h>
+#include <wtf/IndexSparseSet.h>
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+template<Arg::Type adapterType>
+struct TmpLivenessAdapter {
+ typedef Tmp Thing;
+ typedef HashSet<unsigned> IndexSet;
+
+ TmpLivenessAdapter(Code&) { }
+
+ static unsigned numIndices(Code& code)
+ {
+ unsigned numTmps = code.numTmps(adapterType);
+ return AbsoluteTmpMapper<adapterType>::absoluteIndex(numTmps);
+ }
+ static bool acceptsType(Arg::Type type) { return type == adapterType; }
+ static unsigned valueToIndex(Tmp tmp) { return AbsoluteTmpMapper<adapterType>::absoluteIndex(tmp); }
+ static Tmp indexToValue(unsigned index) { return AbsoluteTmpMapper<adapterType>::tmpFromAbsoluteIndex(index); }
+};
+
+struct StackSlotLivenessAdapter {
+ typedef StackSlot* Thing;
+ typedef HashSet<unsigned, DefaultHash<unsigned>::Hash, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> IndexSet;
+
+ StackSlotLivenessAdapter(Code& code)
+ : m_code(code)
+ {
+ }
+
+ static unsigned numIndices(Code& code)
+ {
+ return code.stackSlots().size();
+ }
+ static bool acceptsType(Arg::Type) { return true; }
+ static unsigned valueToIndex(StackSlot* stackSlot) { return stackSlot->index(); }
+ StackSlot* indexToValue(unsigned index) { return m_code.stackSlots()[index]; }
+
+private:
+ Code& m_code;
+};
+
+struct RegLivenessAdapter {
+ typedef Reg Thing;
+ typedef BitVector IndexSet;
+
+ RegLivenessAdapter(Code&) { }
+
+ static unsigned numIndices(Code&)
+ {
+ return Reg::maxIndex() + 1;
+ }
+
+ static bool acceptsType(Arg::Type) { return true; }
+ static unsigned valueToIndex(Reg reg) { return reg.index(); }
+ Reg indexToValue(unsigned index) { return Reg::fromIndex(index); }
+};
+
+template<typename Adapter>
+class AbstractLiveness : public Adapter {
+ struct Workset;
+public:
+ typedef typename Adapter::Thing Thing;
+
+ AbstractLiveness(Code& code)
+ : Adapter(code)
+ , m_workset(Adapter::numIndices(code))
+ , m_liveAtHead(code.size())
+ , m_liveAtTail(code.size())
+ {
+ // The liveAtTail of each block automatically contains the LateUse's of the terminal.
+ for (BasicBlock* block : code) {
+ typename Adapter::IndexSet& liveAtTail = m_liveAtTail[block];
+
+ block->last().forEach<typename Adapter::Thing>(
+ [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+ if (Arg::isLateUse(role) && Adapter::acceptsType(type))
+ liveAtTail.add(Adapter::valueToIndex(thing));
+ });
+ }
+
+ // Blocks with new live values at tail.
+ BitVector dirtyBlocks;
+ for (size_t blockIndex = 0; blockIndex < code.size(); ++blockIndex)
+ dirtyBlocks.set(blockIndex);
+
+ bool changed;
+ do {
+ changed = false;
+
+ for (size_t blockIndex = code.size(); blockIndex--;) {
+ BasicBlock* block = code.at(blockIndex);
+ if (!block)
+ continue;
+
+ if (!dirtyBlocks.quickClear(blockIndex))
+ continue;
+
+ LocalCalc localCalc(*this, block);
+ for (size_t instIndex = block->size(); instIndex--;)
+ localCalc.execute(instIndex);
+
+ // Handle the early def's of the first instruction.
+ block->at(0).forEach<typename Adapter::Thing>(
+ [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+ if (Arg::isEarlyDef(role) && Adapter::acceptsType(type))
+ m_workset.remove(Adapter::valueToIndex(thing));
+ });
+
+ Vector<unsigned>& liveAtHead = m_liveAtHead[block];
+
+ // We only care about Tmps that were discovered in this iteration. It is impossible
+ // to remove a live value from the head.
+ // We remove all the values we already knew about so that we only have to deal with
+ // what is new in LiveAtHead.
+ if (m_workset.size() == liveAtHead.size())
+ m_workset.clear();
+ else {
+ for (unsigned liveIndexAtHead : liveAtHead)
+ m_workset.remove(liveIndexAtHead);
+ }
+
+ if (m_workset.isEmpty())
+ continue;
+
+ liveAtHead.reserveCapacity(liveAtHead.size() + m_workset.size());
+ for (unsigned newValue : m_workset)
+ liveAtHead.uncheckedAppend(newValue);
+
+ for (BasicBlock* predecessor : block->predecessors()) {
+ typename Adapter::IndexSet& liveAtTail = m_liveAtTail[predecessor];
+ for (unsigned newValue : m_workset) {
+ if (liveAtTail.add(newValue)) {
+ if (!dirtyBlocks.quickSet(predecessor->index()))
+ changed = true;
+ }
+ }
+ }
+ }
+ } while (changed);
+ }
+
+ // This calculator has to be run in reverse.
+ class LocalCalc {
+ public:
+ LocalCalc(AbstractLiveness& liveness, BasicBlock* block)
+ : m_liveness(liveness)
+ , m_block(block)
+ {
+ auto& workset = liveness.m_workset;
+ workset.clear();
+ typename Adapter::IndexSet& liveAtTail = liveness.m_liveAtTail[block];
+ for (unsigned index : liveAtTail)
+ workset.add(index);
+ }
+
+ struct Iterator {
+ Iterator(Adapter& adapter, IndexSparseSet<UnsafeVectorOverflow>::const_iterator sparceSetIterator)
+ : m_adapter(adapter)
+ , m_sparceSetIterator(sparceSetIterator)
+ {
+ }
+
+ Iterator& operator++()
+ {
+ ++m_sparceSetIterator;
+ return *this;
+ }
+
+ typename Adapter::Thing operator*() const
+ {
+ return m_adapter.indexToValue(*m_sparceSetIterator);
+ }
+
+ bool operator==(const Iterator& other) { return m_sparceSetIterator == other.m_sparceSetIterator; }
+ bool operator!=(const Iterator& other) { return m_sparceSetIterator != other.m_sparceSetIterator; }
+
+ private:
+ Adapter& m_adapter;
+ IndexSparseSet<UnsafeVectorOverflow>::const_iterator m_sparceSetIterator;
+ };
+
+ struct Iterable {
+ Iterable(AbstractLiveness& liveness)
+ : m_liveness(liveness)
+ {
+ }
+
+ Iterator begin() const { return Iterator(m_liveness, m_liveness.m_workset.begin()); }
+ Iterator end() const { return Iterator(m_liveness, m_liveness.m_workset.end()); }
+
+ bool contains(const typename Adapter::Thing& thing) const
+ {
+ return m_liveness.m_workset.contains(Adapter::valueToIndex(thing));
+ }
+
+ private:
+ AbstractLiveness& m_liveness;
+ };
+
+ Iterable live() const
+ {
+ return Iterable(m_liveness);
+ }
+
+ bool isLive(const typename Adapter::Thing& thing) const
+ {
+ return live().contains(thing);
+ }
+
+ void execute(unsigned instIndex)
+ {
+ Inst& inst = m_block->at(instIndex);
+ auto& workset = m_liveness.m_workset;
+
+ // First handle the early def's of the next instruction.
+ if (instIndex + 1 < m_block->size()) {
+ Inst& nextInst = m_block->at(instIndex + 1);
+ nextInst.forEach<typename Adapter::Thing>(
+ [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+ if (Arg::isEarlyDef(role) && Adapter::acceptsType(type))
+ workset.remove(Adapter::valueToIndex(thing));
+ });
+ }
+
+ // Then handle def's.
+ inst.forEach<typename Adapter::Thing>(
+ [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+ if (Arg::isLateDef(role) && Adapter::acceptsType(type))
+ workset.remove(Adapter::valueToIndex(thing));
+ });
+
+ // Then handle use's.
+ inst.forEach<typename Adapter::Thing>(
+ [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+ if (Arg::isEarlyUse(role) && Adapter::acceptsType(type))
+ workset.add(Adapter::valueToIndex(thing));
+ });
+
+ // And finally, handle the late use's of the previous instruction.
+ if (instIndex) {
+ Inst& prevInst = m_block->at(instIndex - 1);
+ prevInst.forEach<typename Adapter::Thing>(
+ [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+ if (Arg::isLateUse(role) && Adapter::acceptsType(type))
+ workset.add(Adapter::valueToIndex(thing));
+ });
+ }
+ }
+
+ private:
+ AbstractLiveness& m_liveness;
+ BasicBlock* m_block;
+ };
+
+ const Vector<unsigned>& rawLiveAtHead(BasicBlock* block)
+ {
+ return m_liveAtHead[block];
+ }
+
+ template<typename UnderlyingIterable>
+ class Iterable {
+ public:
+ Iterable(AbstractLiveness& liveness, const UnderlyingIterable& iterable)
+ : m_liveness(liveness)
+ , m_iterable(iterable)
+ {
+ }
+
+ class iterator {
+ public:
+ iterator()
+ : m_liveness(nullptr)
+ , m_iter()
+ {
+ }
+
+ iterator(AbstractLiveness& liveness, typename UnderlyingIterable::const_iterator iter)
+ : m_liveness(&liveness)
+ , m_iter(iter)
+ {
+ }
+
+ typename Adapter::Thing operator*()
+ {
+ return m_liveness->indexToValue(*m_iter);
+ }
+
+ iterator& operator++()
+ {
+ ++m_iter;
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ ASSERT(m_liveness == other.m_liveness);
+ return m_iter == other.m_iter;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ AbstractLiveness* m_liveness;
+ typename UnderlyingIterable::const_iterator m_iter;
+ };
+
+ iterator begin() const { return iterator(m_liveness, m_iterable.begin()); }
+ iterator end() const { return iterator(m_liveness, m_iterable.end()); }
+
+ bool contains(const typename Adapter::Thing& thing) const
+ {
+ return m_liveness.m_workset.contains(Adapter::valueToIndex(thing));
+ }
+
+ private:
+ AbstractLiveness& m_liveness;
+ const UnderlyingIterable& m_iterable;
+ };
+
+ Iterable<Vector<unsigned>> liveAtHead(BasicBlock* block)
+ {
+ return Iterable<Vector<unsigned>>(*this, m_liveAtHead[block]);
+ }
+
+ Iterable<typename Adapter::IndexSet> liveAtTail(BasicBlock* block)
+ {
+ return Iterable<typename Adapter::IndexSet>(*this, m_liveAtTail[block]);
+ }
+
+ IndexSparseSet<UnsafeVectorOverflow>& workset() { return m_workset; }
+
+private:
+ friend class LocalCalc;
+ friend struct LocalCalc::Iterable;
+
+ IndexSparseSet<UnsafeVectorOverflow> m_workset;
+ IndexMap<BasicBlock, Vector<unsigned>> m_liveAtHead;
+ IndexMap<BasicBlock, typename Adapter::IndexSet> m_liveAtTail;
+};
+
+template<Arg::Type type>
+using TmpLiveness = AbstractLiveness<TmpLivenessAdapter<type>>;
+
+typedef AbstractLiveness<TmpLivenessAdapter<Arg::GP>> GPLiveness;
+typedef AbstractLiveness<TmpLivenessAdapter<Arg::FP>> FPLiveness;
+typedef AbstractLiveness<StackSlotLivenessAdapter> StackSlotLiveness;
+typedef AbstractLiveness<RegLivenessAdapter> RegLiveness;
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.cpp b/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.cpp
new file mode 100644
index 000000000..dbbb257c1
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirLogRegisterPressure.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void logRegisterPressure(Code& code)
+{
+ const unsigned totalColumns = 200;
+ const unsigned registerColumns = 100;
+
+ RegLiveness liveness(code);
+
+ for (BasicBlock* block : code) {
+ RegLiveness::LocalCalc localCalc(liveness, block);
+
+ block->dumpHeader(WTF::dataFile());
+
+ Vector<CString> instDumps;
+ for (unsigned instIndex = block->size(); instIndex--;) {
+ Inst& inst = block->at(instIndex);
+ Inst* prevInst = block->get(instIndex - 1);
+
+ localCalc.execute(instIndex);
+
+ RegisterSet set;
+ set.setAll(localCalc.live());
+ Inst::forEachDefWithExtraClobberedRegs<Reg>(
+ prevInst, &inst,
+ [&] (Reg reg, Arg::Role, Arg::Type, Arg::Width) {
+ set.set(reg);
+ });
+
+ StringPrintStream instOut;
+ StringPrintStream lineOut;
+ lineOut.print(" ");
+ if (set.numberOfSetRegisters()) {
+ set.forEach(
+ [&] (Reg reg) {
+ CString text = toCString(" ", reg);
+ if (text.length() + lineOut.length() > totalColumns) {
+ instOut.print(lineOut.toCString(), "\n");
+ lineOut.reset();
+ lineOut.print(" ");
+ }
+ lineOut.print(text);
+ });
+ lineOut.print(":");
+ }
+ if (lineOut.length() > registerColumns) {
+ instOut.print(lineOut.toCString(), "\n");
+ lineOut.reset();
+ }
+ while (lineOut.length() < registerColumns)
+ lineOut.print(" ");
+ lineOut.print(" ");
+ lineOut.print(inst);
+ instOut.print(lineOut.toCString(), "\n");
+ instDumps.append(instOut.toCString());
+ }
+
+ for (unsigned i = instDumps.size(); i--;)
+ dataLog(instDumps[i]);
+
+ block->dumpFooter(WTF::dataFile());
+ }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.h b/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.h
new file mode 100644
index 000000000..3f7c3e24c
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLogRegisterPressure.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Dumps the registers that are used at each instruction.
+void logRegisterPressure(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp b/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp
new file mode 100644
index 000000000..e0018734b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirLowerAfterRegAlloc.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCCallingConvention.h"
+#include "AirCode.h"
+#include "AirEmitShuffle.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPhaseScope.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+#include "RegisterSet.h"
+#include <wtf/HashMap.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool verbose = false;
+
+} // anonymous namespace
+
+void lowerAfterRegAlloc(Code& code)
+{
+ PhaseScope phaseScope(code, "lowerAfterRegAlloc");
+
+ if (verbose)
+ dataLog("Code before lowerAfterRegAlloc:\n", code);
+
+ HashMap<Inst*, RegisterSet> usedRegisters;
+
+ RegLiveness liveness(code);
+ for (BasicBlock* block : code) {
+ RegLiveness::LocalCalc localCalc(liveness, block);
+
+ for (unsigned instIndex = block->size(); instIndex--;) {
+ Inst& inst = block->at(instIndex);
+
+ RegisterSet set;
+
+ bool isRelevant = inst.kind.opcode == Shuffle || inst.kind.opcode == ColdCCall;
+
+ if (isRelevant) {
+ for (Reg reg : localCalc.live())
+ set.set(reg);
+ }
+
+ localCalc.execute(instIndex);
+
+ if (isRelevant)
+ usedRegisters.add(&inst, set);
+ }
+ }
+
+ auto getScratches = [&] (RegisterSet set, Arg::Type type) -> std::array<Arg, 2> {
+ std::array<Arg, 2> result;
+ for (unsigned i = 0; i < 2; ++i) {
+ bool found = false;
+ for (Reg reg : code.regsInPriorityOrder(type)) {
+ if (!set.get(reg)) {
+ result[i] = Tmp(reg);
+ set.set(reg);
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ result[i] = Arg::stack(
+ code.addStackSlot(
+ Arg::bytes(Arg::conservativeWidth(type)),
+ StackSlotKind::Spill));
+ }
+ }
+ return result;
+ };
+
+ // Now transform the code.
+ InsertionSet insertionSet(code);
+ for (BasicBlock* block : code) {
+ for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+ Inst& inst = block->at(instIndex);
+
+ switch (inst.kind.opcode) {
+ case Shuffle: {
+ RegisterSet set = usedRegisters.get(&inst);
+ Vector<ShufflePair> pairs;
+ for (unsigned i = 0; i < inst.args.size(); i += 3) {
+ Arg src = inst.args[i + 0];
+ Arg dst = inst.args[i + 1];
+ Arg::Width width = inst.args[i + 2].width();
+
+ // The used register set contains things live after the shuffle. But
+ // emitShuffle() wants a scratch register that is not just dead but also does not
+ // interfere with either sources or destinations.
+ auto excludeRegisters = [&] (Tmp tmp) {
+ if (tmp.isReg())
+ set.set(tmp.reg());
+ };
+ src.forEachTmpFast(excludeRegisters);
+ dst.forEachTmpFast(excludeRegisters);
+
+ pairs.append(ShufflePair(src, dst, width));
+ }
+ std::array<Arg, 2> gpScratch = getScratches(set, Arg::GP);
+ std::array<Arg, 2> fpScratch = getScratches(set, Arg::FP);
+ insertionSet.insertInsts(
+ instIndex, emitShuffle(code, pairs, gpScratch, fpScratch, inst.origin));
+ inst = Inst();
+ break;
+ }
+
+ case ColdCCall: {
+ CCallValue* value = inst.origin->as<CCallValue>();
+ Kind oldKind = inst.kind;
+
+ RegisterSet liveRegs = usedRegisters.get(&inst);
+ RegisterSet regsToSave = liveRegs;
+ regsToSave.exclude(RegisterSet::calleeSaveRegisters());
+ regsToSave.exclude(RegisterSet::stackRegisters());
+ regsToSave.exclude(RegisterSet::reservedHardwareRegisters());
+
+ RegisterSet preUsed = regsToSave;
+ Vector<Arg> destinations = computeCCallingConvention(code, value);
+ Tmp result = cCallResult(value->type());
+ Arg originalResult = result ? inst.args[1] : Arg();
+
+ Vector<ShufflePair> pairs;
+ for (unsigned i = 0; i < destinations.size(); ++i) {
+ Value* child = value->child(i);
+ Arg src = inst.args[result ? (i >= 1 ? i + 1 : i) : i ];
+ Arg dst = destinations[i];
+ Arg::Width width = Arg::widthForB3Type(child->type());
+ pairs.append(ShufflePair(src, dst, width));
+
+ auto excludeRegisters = [&] (Tmp tmp) {
+ if (tmp.isReg())
+ preUsed.set(tmp.reg());
+ };
+ src.forEachTmpFast(excludeRegisters);
+ dst.forEachTmpFast(excludeRegisters);
+ }
+
+ std::array<Arg, 2> gpScratch = getScratches(preUsed, Arg::GP);
+ std::array<Arg, 2> fpScratch = getScratches(preUsed, Arg::FP);
+
+ // Also need to save all live registers. Don't need to worry about the result
+ // register.
+ if (originalResult.isReg())
+ regsToSave.clear(originalResult.reg());
+ Vector<StackSlot*> stackSlots;
+ regsToSave.forEach(
+ [&] (Reg reg) {
+ Tmp tmp(reg);
+ Arg arg(tmp);
+ Arg::Width width = Arg::conservativeWidth(arg.type());
+ StackSlot* stackSlot =
+ code.addStackSlot(Arg::bytes(width), StackSlotKind::Spill);
+ pairs.append(ShufflePair(arg, Arg::stack(stackSlot), width));
+ stackSlots.append(stackSlot);
+ });
+
+ if (verbose)
+ dataLog("Pre-call pairs for ", inst, ": ", listDump(pairs), "\n");
+
+ insertionSet.insertInsts(
+ instIndex, emitShuffle(code, pairs, gpScratch, fpScratch, inst.origin));
+
+ inst = buildCCall(code, inst.origin, destinations);
+ if (oldKind.traps)
+ inst.kind.traps = true;
+
+ // Now we need to emit code to restore registers.
+ pairs.resize(0);
+ unsigned stackSlotIndex = 0;
+ regsToSave.forEach(
+ [&] (Reg reg) {
+ Tmp tmp(reg);
+ Arg arg(tmp);
+ Arg::Width width = Arg::conservativeWidth(arg.type());
+ StackSlot* stackSlot = stackSlots[stackSlotIndex++];
+ pairs.append(ShufflePair(Arg::stack(stackSlot), arg, width));
+ });
+ if (result) {
+ ShufflePair pair(result, originalResult, Arg::widthForB3Type(value->type()));
+ pairs.append(pair);
+ }
+
+ // For finding scratch registers, we need to account for the possibility that
+ // the result is dead.
+ if (originalResult.isReg())
+ liveRegs.set(originalResult.reg());
+
+ gpScratch = getScratches(liveRegs, Arg::GP);
+ fpScratch = getScratches(liveRegs, Arg::FP);
+
+ insertionSet.insertInsts(
+ instIndex + 1, emitShuffle(code, pairs, gpScratch, fpScratch, inst.origin));
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ insertionSet.execute(block);
+
+ block->insts().removeAllMatching(
+ [&] (Inst& inst) -> bool {
+ return !inst;
+ });
+ }
+
+ if (verbose)
+ dataLog("Code after lowerAfterRegAlloc:\n", code);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.h b/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.h
new file mode 100644
index 000000000..d8234a7e6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This lowers Shuffle and ColdCCall instructions. This phase is designed to be run after register
+// allocation.
+
+void lowerAfterRegAlloc(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.cpp b/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.cpp
new file mode 100644
index 000000000..e14641da6
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirLowerEntrySwitch.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBlockWorklist.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void lowerEntrySwitch(Code& code)
+{
+ PhaseScope phaseScope(code, "lowerEntrySwitch");
+
+ // Figure out the set of blocks that should be duplicated.
+ BlockWorklist worklist;
+ for (BasicBlock* block : code) {
+ if (block->last().kind.opcode == EntrySwitch)
+ worklist.push(block);
+ }
+
+ // It's possible that we don't have any EntrySwitches. That's fine.
+ if (worklist.seen().isEmpty()) {
+ Vector<FrequentedBlock> entrypoints(code.proc().numEntrypoints(), FrequentedBlock(code[0]));
+ code.setEntrypoints(WTFMove(entrypoints));
+ return;
+ }
+
+ while (BasicBlock* block = worklist.pop())
+ worklist.pushAll(block->predecessors());
+
+ RELEASE_ASSERT(worklist.saw(code[0]));
+
+ Vector<FrequencyClass> entrypointFrequencies(code.proc().numEntrypoints(), FrequencyClass::Rare);
+ for (BasicBlock* block : code) {
+ if (block->last().kind.opcode != EntrySwitch)
+ continue;
+ for (unsigned entrypointIndex = code.proc().numEntrypoints(); entrypointIndex--;) {
+ entrypointFrequencies[entrypointIndex] = maxFrequency(
+ entrypointFrequencies[entrypointIndex],
+ block->successor(entrypointIndex).frequency());
+ }
+ }
+
+ auto fixEntrySwitch = [&] (BasicBlock* block, unsigned entrypointIndex) {
+ if (block->last().kind.opcode != EntrySwitch)
+ return;
+ FrequentedBlock target = block->successor(entrypointIndex);
+ block->last().kind.opcode = Jump;
+ block->successors().resize(1);
+ block->successor(0) = target;
+ };
+
+ // Now duplicate them.
+ Vector<FrequentedBlock> entrypoints;
+ entrypoints.append(FrequentedBlock(code[0], entrypointFrequencies[0]));
+ IndexMap<BasicBlock, BasicBlock*> map(code.size());
+ for (unsigned entrypointIndex = 1; entrypointIndex < code.proc().numEntrypoints(); ++entrypointIndex) {
+ map.clear();
+ for (BasicBlock* block : worklist.seen().values(code))
+ map[block] = code.addBlock(block->frequency());
+ entrypoints.append(FrequentedBlock(map[code[0]], entrypointFrequencies[entrypointIndex]));
+ for (BasicBlock* block : worklist.seen().values(code)) {
+ BasicBlock* newBlock = map[block];
+ for (const Inst& inst : *block)
+ newBlock->appendInst(inst);
+ newBlock->successors() = block->successors();
+ for (BasicBlock*& successor : newBlock->successorBlocks()) {
+ if (BasicBlock* replacement = map[successor])
+ successor = replacement;
+ }
+ fixEntrySwitch(newBlock, entrypointIndex);
+ }
+ }
+ for (BasicBlock* block : worklist.seen().values(code))
+ fixEntrySwitch(block, 0);
+
+ code.setEntrypoints(WTFMove(entrypoints));
+ code.resetReachability();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.h b/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.h
new file mode 100644
index 000000000..ff3500727
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerEntrySwitch.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Converts code that seems to have one entrypoint and emulates multiple entrypoints with
+// EntrySwitch into code that really has multiple entrypoints. This is accomplished by duplicating
+// the backwards transitive closure from all EntrySwitches.
+void lowerEntrySwitch(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirLowerMacros.cpp b/Source/JavaScriptCore/b3/air/AirLowerMacros.cpp
new file mode 100644
index 000000000..b086b7b08
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerMacros.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirLowerMacros.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallingConvention.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void lowerMacros(Code& code)
+{
+ PhaseScope phaseScope(code, "lowerMacros");
+
+ InsertionSet insertionSet(code);
+ for (BasicBlock* block : code) {
+ for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+ Inst& inst = block->at(instIndex);
+
+ switch (inst.kind.opcode) {
+ case CCall: {
+ CCallValue* value = inst.origin->as<CCallValue>();
+ Kind oldKind = inst.kind;
+
+ Vector<Arg> destinations = computeCCallingConvention(code, value);
+
+ Inst shuffleArguments(Shuffle, value);
+ unsigned offset = value->type() == Void ? 0 : 1;
+ for (unsigned i = 1; i < destinations.size(); ++i) {
+ Value* child = value->child(i);
+ shuffleArguments.args.append(inst.args[offset + i]);
+ shuffleArguments.args.append(destinations[i]);
+ shuffleArguments.args.append(Arg::widthArg(Arg::widthForB3Type(child->type())));
+ }
+ insertionSet.insertInst(instIndex, WTFMove(shuffleArguments));
+
+ // Indicate that we're using our original callee argument.
+ destinations[0] = inst.args[0];
+
+ // Save where the original instruction put its result.
+ Arg resultDst = value->type() == Void ? Arg() : inst.args[1];
+
+ inst = buildCCall(code, inst.origin, destinations);
+ if (oldKind.traps)
+ inst.kind.traps = true;
+
+ Tmp result = cCallResult(value->type());
+ switch (value->type()) {
+ case Void:
+ break;
+ case Float:
+ insertionSet.insert(instIndex + 1, MoveFloat, value, result, resultDst);
+ break;
+ case Double:
+ insertionSet.insert(instIndex + 1, MoveDouble, value, result, resultDst);
+ break;
+ case Int32:
+ insertionSet.insert(instIndex + 1, Move32, value, result, resultDst);
+ break;
+ case Int64:
+ insertionSet.insert(instIndex + 1, Move, value, result, resultDst);
+ break;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ insertionSet.execute(block);
+ }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirLowerMacros.h b/Source/JavaScriptCore/b3/air/AirLowerMacros.h
new file mode 100644
index 000000000..2dcd76dfe
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirLowerMacros.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Air has some opcodes that are very high-level and are meant to reduce the amount of low-level
+// knowledge in the B3->Air lowering. The current example is CCall.
+
+void lowerMacros(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirOpcode.opcodes b/Source/JavaScriptCore/b3/air/AirOpcode.opcodes
new file mode 100644
index 000000000..e82c9f5bf
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirOpcode.opcodes
@@ -0,0 +1,943 @@
+# Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# Syllabus:
+#
+# Examples of some roles, types, and widths:
+# U:G:32 => use of the low 32 bits of a general-purpose register or value
+# D:G:32 => def of the low 32 bits of a general-purpose register or value
+# UD:G:32 => use and def of the low 32 bits of a general-purpose register or value
+# U:G:64 => use of the low 64 bits of a general-purpose register or value
+# ZD:G:32 => def of all bits of a general-purpose register, where all but the low 32 bits are guaranteed to be zeroed.
+# UA:G:Ptr => UseAddr (see comment in Arg.h)
+# U:F:32 => use of a float register or value
+# U:F:64 => use of a double register or value
+# D:F:32 => def of a float register or value
+# UD:F:32 => use and def of a float register or value
+# S:F:32 => scratch float register.
+#
+# Argument kinds:
+# Tmp => temporary or register
+# Imm => 32-bit immediate int
+# BigImm => TrustedImm64
+# Addr => address as temporary/register+offset
+# Index => BaseIndex address
+# Abs => AbsoluteAddress
+#
+# The parser views these things as keywords, and understands that they fall into two distinct classes
+# of things. So, although this file uses a particular indentation style, none of the whitespace or
+# even newlines are meaningful to the parser. For example, you could write:
+#
+# Foo42 U:G:32, UD:F:32 Imm, Tmp Addr, Tmp
+#
+# And the parser would know that this is the same as:
+#
+# Foo42 U:G:32, UD:F:32
+# Imm, Tmp
+# Addr, Tmp
+#
+# I.e. a two-form instruction that uses a GPR or an int immediate and uses+defs a float register.
+#
+# Any opcode or opcode form can be preceded with an architecture list, which restricts the opcode to the
+# union of those architectures. For example, if this is the only overload of the opcode, then it makes the
+# opcode only available on x86_64:
+#
+# x86_64: Fuzz UD:G:64, D:G:64
+# Tmp, Tmp
+# Tmp, Addr
+#
+# But this only restricts the two-operand form, the other form is allowed on all architectures:
+#
+# x86_64: Fuzz UD:G:64, D:G:64
+# Tmp, Tmp
+# Tmp, Addr
+# Fuzz UD:G:Ptr, D:G:Ptr, U:F:Ptr
+# Tmp, Tmp, Tmp
+# Tmp, Addr, Tmp
+#
+# And you can also restrict individual forms:
+#
+# Thingy UD:G:32, D:G:32
+# Tmp, Tmp
+# arm64: Tmp, Addr
+#
+# Additionally, you can have an intersection between the architectures of the opcode overload and the
+# form. In this example, the version that takes an address is only available on armv7 while the other
+# versions are available on armv7 or x86_64:
+#
+# x86_64 armv7: Buzz U:G:32, UD:F:32
+# Tmp, Tmp
+# Imm, Tmp
+# armv7: Addr, Tmp
+#
+# Finally, you can specify architectures using helpful architecture groups. Here are all of the
+# architecture keywords that we support:
+#
+# x86: means x86-32 or x86-64.
+# x86_32: means just x86-32.
+# x86_64: means just x86-64.
+# arm: means armv7 or arm64.
+# armv7: means just armv7.
+# arm64: means just arm64.
+# 32: means x86-32 or armv7.
+# 64: means x86-64 or arm64.
+
+# Note that the opcodes here have a leading capital (Add32) but must correspond to MacroAssembler
+# API that has a leading lower-case (add32).
+
+Nop
+
+Add32 U:G:32, U:G:32, ZD:G:32
+ Imm, Tmp, Tmp
+ Tmp, Tmp, Tmp
+
+Add32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Imm, Addr
+ x86: Imm, Index
+ Imm, Tmp
+ x86: Addr, Tmp
+ x86: Tmp, Addr
+ x86: Tmp, Index
+
+x86: Add8 U:G:8, UD:G:8
+ Imm, Addr
+ Imm, Index
+ Tmp, Addr
+ Tmp, Index
+
+x86: Add16 U:G:16, UD:G:16
+ Imm, Addr
+ Imm, Index
+ Tmp, Addr
+ Tmp, Index
+
+64: Add64 U:G:64, UD:G:64
+ Tmp, Tmp
+ x86: Imm, Addr
+ Imm, Tmp
+ x86: Addr, Tmp
+ x86: Tmp, Addr
+
+64: Add64 U:G:64, U:G:64, D:G:64
+ Imm, Tmp, Tmp
+ Tmp, Tmp, Tmp
+
+AddDouble U:F:64, U:F:64, D:F:64
+ Tmp, Tmp, Tmp
+ x86: Addr, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Index, Tmp, Tmp
+
+x86: AddDouble U:F:64, UD:F:64
+ Tmp, Tmp
+ Addr, Tmp
+
+AddFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+ x86: Addr, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Index, Tmp, Tmp
+
+x86: AddFloat U:F:32, UD:F:32
+ Tmp, Tmp
+ Addr, Tmp
+
+Sub32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Imm, Addr
+ Imm, Tmp
+ x86: Addr, Tmp
+ x86: Tmp, Addr
+
+arm64: Sub32 U:G:32, U:G:32, D:G:32
+ Tmp, Tmp, Tmp
+
+64: Sub64 U:G:64, UD:G:64
+ Tmp, Tmp
+ x86: Imm, Addr
+ Imm, Tmp
+ x86: Addr, Tmp
+ x86: Tmp, Addr
+
+arm64: Sub64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+
+SubDouble U:F:64, U:F:64, D:F:64
+ arm64: Tmp, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Tmp, Index, Tmp
+
+x86: SubDouble U:F:64, UD:F:64
+ Tmp, Tmp
+ Addr, Tmp
+
+SubFloat U:F:32, U:F:32, D:F:32
+ arm64: Tmp, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Tmp, Index, Tmp
+
+x86: SubFloat U:F:32, UD:F:32
+ Tmp, Tmp
+ Addr, Tmp
+
+Neg32 UZD:G:32
+ Tmp
+ x86: Addr
+
+64: Neg64 UD:G:64
+ Tmp
+
+arm64: NegateDouble U:F:64, D:F:64
+ Tmp, Tmp
+
+arm64: NegateFloat U:F:32, D:F:32
+ Tmp, Tmp
+
+Mul32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+Mul32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ x86: Addr, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Imm, Tmp, Tmp
+
+64: Mul64 U:G:64, UD:G:64
+ Tmp, Tmp
+
+Mul64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+
+arm64: MultiplyAdd32 U:G:32, U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplyAdd64 U:G:64, U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplySub32 U:G:32, U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplySub64 U:G:64, U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplyNeg32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+
+arm64: MultiplyNeg64 U:G:64, U:G:64, ZD:G:64
+ Tmp, Tmp, Tmp
+
+arm64: Div32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+
+arm64: UDiv32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+
+arm64: Div64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+
+arm64: UDiv64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+
+MulDouble U:F:64, U:F:64, D:F:64
+ Tmp, Tmp, Tmp
+ x86: Addr, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Index, Tmp, Tmp
+
+x86: MulDouble U:F:64, UD:F:64
+ Tmp, Tmp
+ Addr, Tmp
+
+MulFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+ x86: Addr, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Index, Tmp, Tmp
+
+x86: MulFloat U:F:32, UD:F:32
+ Tmp, Tmp
+ Addr, Tmp
+
+arm64: DivDouble U:F:64, U:F:32, D:F:64
+ Tmp, Tmp, Tmp
+
+x86: DivDouble U:F:64, UD:F:64
+ Tmp, Tmp
+ Addr, Tmp
+
+arm64: DivFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+
+x86: DivFloat U:F:32, UD:F:32
+ Tmp, Tmp
+ Addr, Tmp
+
+x86: X86ConvertToDoubleWord32 U:G:32, ZD:G:32
+ Tmp*, Tmp*
+
+x86_64: X86ConvertToQuadWord64 U:G:64, D:G:64
+ Tmp*, Tmp*
+
+x86: X86Div32 UZD:G:32, UZD:G:32, U:G:32
+ Tmp*, Tmp*, Tmp
+
+x86: X86UDiv32 UZD:G:32, UZD:G:32, U:G:32
+ Tmp*, Tmp*, Tmp
+
+x86_64: X86Div64 UZD:G:64, UZD:G:64, U:G:64
+ Tmp*, Tmp*, Tmp
+
+x86_64: X86UDiv64 UZD:G:64, UZD:G:64, U:G:64
+ Tmp*, Tmp*, Tmp
+
+Lea32 UA:G:32, D:G:32
+ Addr, Tmp
+ x86: Index, Tmp as x86Lea32
+
+Lea64 UA:G:64, D:G:64
+ Addr, Tmp
+ x86: Index, Tmp as x86Lea64
+
+And32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Addr, Tmp, Tmp
+
+And32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Imm, Tmp
+ x86: Tmp, Addr
+ x86: Addr, Tmp
+ x86: Imm, Addr
+
+64: And64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
+
+x86_64: And64 U:G:64, UD:G:64
+ Tmp, Tmp
+ x86: Imm, Tmp
+
+AndDouble U:F:64, U:F:64, D:F:64
+ Tmp, Tmp, Tmp
+
+x86: AndDouble U:F:64, UD:F:64
+ Tmp, Tmp
+
+AndFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+
+x86: AndFloat U:F:32, UD:F:32
+ Tmp, Tmp
+
+OrDouble U:F:64, U:F:64, D:F:64
+ Tmp, Tmp, Tmp
+
+x86: OrDouble U:F:64, UD:F:64
+ Tmp, Tmp
+
+OrFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+
+x86: OrFloat U:F:32, UD:F:32
+ Tmp, Tmp
+
+x86: XorDouble U:F:64, U:F:64, D:F:64
+ Tmp, Tmp, Tmp
+
+x86: XorDouble U:F:64, UD:F:64
+ Tmp, Tmp
+
+x86: XorFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+
+x86: XorFloat U:F:32, UD:F:32
+ Tmp, Tmp
+
+arm64: Lshift32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86:Lshift32 U:G:32, UZD:G:32
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: Lshift64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86_64: Lshift64 U:G:64, UD:G:64
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: Rshift32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86: Rshift32 U:G:32, UZD:G:32
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: Rshift64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86_64: Rshift64 U:G:64, UD:G:64
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: Urshift32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86: Urshift32 U:G:32, UZD:G:32
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: Urshift64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86_64: Urshift64 U:G:64, UD:G:64
+ Tmp*, Tmp
+ Imm, Tmp
+
+x86_64: RotateRight32 U:G:32, UZD:G:32
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: RotateRight32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86_64: RotateRight64 U:G:64, UD:G:64
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: RotateRight64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86_64: RotateLeft32 U:G:32, UZD:G:32
+ Tmp*, Tmp
+ Imm, Tmp
+
+x86_64: RotateLeft64 U:G:64, UD:G:64
+ Tmp*, Tmp
+ Imm, Tmp
+
+Or32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Addr, Tmp, Tmp
+
+Or32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Imm, Tmp
+ x86: Tmp, Addr
+ x86: Addr, Tmp
+ x86: Imm, Addr
+
+64: Or64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
+
+64: Or64 U:G:64, UD:G:64
+ Tmp, Tmp
+ x86: Imm, Tmp
+
+Xor32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Addr, Tmp, Tmp
+
+Xor32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Imm, Tmp
+ x86: Tmp, Addr
+ x86: Addr, Tmp
+ x86: Imm, Addr
+
+64: Xor64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
+
+64: Xor64 U:G:64, UD:G:64
+ Tmp, Tmp
+ x86: Tmp, Addr
+ x86: Imm, Tmp
+
+arm64: Not32 U:G:32, ZD:G:32
+ Tmp, Tmp
+
+x86: Not32 UZD:G:32
+ Tmp
+ Addr
+
+arm64: Not64 U:G:64, D:G:64
+ Tmp, Tmp
+
+x86: Not64 UD:G:64
+ Tmp
+ Addr
+
+arm64: AbsDouble U:F:64, D:F:64
+ Tmp, Tmp
+
+arm64: AbsFloat U:F:32, D:F:32
+ Tmp, Tmp
+
+CeilDouble U:F:64, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+CeilFloat U:F:32, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+FloorDouble U:F:64, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+FloorFloat U:F:32, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+SqrtDouble U:F:64, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+SqrtFloat U:F:32, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+ConvertInt32ToDouble U:G:32, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+64: ConvertInt64ToDouble U:G:64, D:F:64
+ Tmp, Tmp
+ x86_64: Addr, Tmp
+
+ConvertInt32ToFloat U:G:32, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+64: ConvertInt64ToFloat U:G:64, D:F:32
+ Tmp, Tmp
+ x86_64: Addr, Tmp
+
+CountLeadingZeros32 U:G:32, ZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+64: CountLeadingZeros64 U:G:64, D:G:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+ConvertDoubleToFloat U:F:64, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+ConvertFloatToDouble U:F:32, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+# Note that Move operates over the full register size, which is either 32-bit or 64-bit depending on
+# the platform. I'm not entirely sure that this is a good thing; it might be better to just have a
+# Move64 instruction. OTOH, our MacroAssemblers already have this notion of "move()" that basically
+# means movePtr.
+Move U:G:Ptr, D:G:Ptr
+ Tmp, Tmp
+ Imm, Tmp as signExtend32ToPtr
+ BigImm, Tmp
+ Addr, Tmp as loadPtr # This means that "Move Addr, Tmp" is code-generated as "load" not "move".
+ Index, Tmp as loadPtr
+ Tmp, Addr as storePtr
+ Tmp, Index as storePtr
+ x86: Imm, Addr as storePtr
+
+x86: Swap32 UD:G:32, UD:G:32
+ Tmp, Tmp
+ Tmp, Addr
+
+x86_64: Swap64 UD:G:64, UD:G:64
+ Tmp, Tmp
+ Tmp, Addr
+
+Move32 U:G:32, ZD:G:32
+ Tmp, Tmp as zeroExtend32ToPtr
+ Addr, Tmp as load32
+ Index, Tmp as load32
+ Tmp, Addr as store32
+ Tmp, Index as store32
+ x86: Imm, Tmp as zeroExtend32ToPtr
+ x86: Imm, Addr as store32
+ x86: Imm, Index as store32
+
+StoreZero32 U:G:32
+ Addr
+ Index
+
+SignExtend32ToPtr U:G:32, D:G:Ptr
+ Tmp, Tmp
+
+ZeroExtend8To32 U:G:8, ZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp as load8
+ x86: Index, Tmp as load8
+
+SignExtend8To32 U:G:8, ZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp as load8SignedExtendTo32
+ x86: Index, Tmp as load8SignedExtendTo32
+
+ZeroExtend16To32 U:G:16, ZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp as load16
+ x86: Index, Tmp as load16
+
+SignExtend16To32 U:G:16, ZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp as load16SignedExtendTo32
+ x86: Index, Tmp as load16SignedExtendTo32
+
+MoveFloat U:F:32, D:F:32
+ Tmp, Tmp as moveDouble
+ Addr, Tmp as loadFloat
+ Index, Tmp as loadFloat
+ Tmp, Addr as storeFloat
+ Tmp, Index as storeFloat
+
+MoveDouble U:F:64, D:F:64
+ Tmp, Tmp
+ Addr, Tmp as loadDouble
+ Index, Tmp as loadDouble
+ Tmp, Addr as storeDouble
+ Tmp, Index as storeDouble
+
+MoveZeroToDouble D:F:64
+ Tmp
+
+64: Move64ToDouble U:G:64, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp as loadDouble
+ Index, Tmp as loadDouble
+
+Move32ToFloat U:G:32, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp as loadFloat
+ Index, Tmp as loadFloat
+
+64: MoveDoubleTo64 U:F:64, D:G:64
+ Tmp, Tmp
+ Addr, Tmp as load64
+ Index, Tmp as load64
+
+MoveFloatTo32 U:F:32, D:G:32
+ Tmp, Tmp
+ Addr, Tmp as load32
+ Index, Tmp as load32
+
+Load8 U:G:8, ZD:G:32
+ Addr, Tmp
+ Index, Tmp
+
+Store8 U:G:8, D:G:8
+ Tmp, Index
+ Tmp, Addr
+ x86: Imm, Index
+ x86: Imm, Addr
+
+Load8SignedExtendTo32 U:G:8, ZD:G:32
+ Addr, Tmp
+ Index, Tmp
+
+Load16 U:G:16, ZD:G:32
+ Addr, Tmp
+ Index, Tmp
+
+Load16SignedExtendTo32 U:G:16, ZD:G:32
+ Addr, Tmp
+ Index, Tmp
+
+Store16 U:G:16, D:G:16
+ Tmp, Index
+ Tmp, Addr
+
+Compare32 U:G:32, U:G:32, U:G:32, ZD:G:32
+ RelCond, Tmp, Tmp, Tmp
+ RelCond, Tmp, Imm, Tmp
+
+64: Compare64 U:G:32, U:G:64, U:G:64, ZD:G:32
+ RelCond, Tmp, Tmp, Tmp
+ x86: RelCond, Tmp, Imm, Tmp
+
+Test32 U:G:32, U:G:32, U:G:32, ZD:G:32
+ x86: ResCond, Addr, Imm, Tmp
+ ResCond, Tmp, Tmp, Tmp
+ ResCond, Tmp, BitImm, Tmp
+
+64: Test64 U:G:32, U:G:64, U:G:64, ZD:G:32
+ x86: ResCond, Tmp, Imm, Tmp
+ ResCond, Tmp, Tmp, Tmp
+
+CompareDouble U:G:32, U:F:64, U:F:64, ZD:G:32
+ DoubleCond, Tmp, Tmp, Tmp
+
+CompareFloat U:G:32, U:F:32, U:F:32, ZD:G:32
+ DoubleCond, Tmp, Tmp, Tmp
+
+# Note that branches have some logic in AirOptimizeBlockOrder.cpp. If you add new branches, please make sure
+# you opt them into the block order optimizations.
+
+Branch8 U:G:32, U:G:8, U:G:8 /branch
+ x86: RelCond, Addr, Imm
+ x86: RelCond, Index, Imm
+
+Branch32 U:G:32, U:G:32, U:G:32 /branch
+ x86: RelCond, Addr, Imm
+ RelCond, Tmp, Tmp
+ RelCond, Tmp, Imm
+ x86: RelCond, Tmp, Addr
+ x86: RelCond, Addr, Tmp
+ x86: RelCond, Index, Imm
+
+64: Branch64 U:G:32, U:G:64, U:G:64 /branch
+ RelCond, Tmp, Tmp
+ RelCond, Tmp, Imm
+ x86: RelCond, Tmp, Addr
+ x86: RelCond, Addr, Tmp
+ x86: RelCond, Addr, Imm
+ x86: RelCond, Index, Tmp
+
+BranchTest8 U:G:32, U:G:8, U:G:8 /branch
+ x86: ResCond, Addr, BitImm
+ x86: ResCond, Index, BitImm
+
+BranchTest32 U:G:32, U:G:32, U:G:32 /branch
+ ResCond, Tmp, Tmp
+ ResCond, Tmp, BitImm
+ x86: ResCond, Addr, BitImm
+ x86: ResCond, Index, BitImm
+
+# Warning: forms that take an immediate will sign-extend their immediate. You probably want
+# BranchTest32 in most cases where you use an immediate.
+64: BranchTest64 U:G:32, U:G:64, U:G:64 /branch
+ ResCond, Tmp, Tmp
+ arm64: ResCond, Tmp, BitImm64
+ x86: ResCond, Tmp, BitImm
+ x86: ResCond, Addr, BitImm
+ x86: ResCond, Addr, Tmp
+ x86: ResCond, Index, BitImm
+
+BranchDouble U:G:32, U:F:64, U:F:64 /branch
+ DoubleCond, Tmp, Tmp
+
+BranchFloat U:G:32, U:F:32, U:F:32 /branch
+ DoubleCond, Tmp, Tmp
+
+BranchAdd32 U:G:32, U:G:32, U:G:32, ZD:G:32 /branch
+ ResCond, Tmp, Tmp, Tmp
+ x86:ResCond, Tmp, Addr, Tmp
+ x86:ResCond, Addr, Tmp, Tmp
+
+BranchAdd32 U:G:32, U:G:32, UZD:G:32 /branch
+ ResCond, Tmp, Tmp
+ ResCond, Imm, Tmp
+ x86: ResCond, Imm, Addr
+ x86: ResCond, Tmp, Addr
+ x86: ResCond, Addr, Tmp
+
+BranchAdd64 U:G:32, U:G:64, U:G:64, ZD:G:64 /branch
+ ResCond, Tmp, Tmp, Tmp
+ x86:ResCond, Tmp, Addr, Tmp
+ x86:ResCond, Addr, Tmp, Tmp
+
+64: BranchAdd64 U:G:32, U:G:64, UD:G:64 /branch
+ ResCond, Imm, Tmp
+ ResCond, Tmp, Tmp
+ x86:ResCond, Addr, Tmp
+
+x86: BranchMul32 U:G:32, U:G:32, UZD:G:32 /branch
+ ResCond, Tmp, Tmp
+ ResCond, Addr, Tmp
+
+x86: BranchMul32 U:G:32, U:G:32, U:G:32, ZD:G:32 /branch
+ ResCond, Tmp, Imm, Tmp
+
+arm64: BranchMul32 U:G:32, U:G:32, U:G:32, S:G:32, S:G:32, ZD:G:32 /branch
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+x86_64: BranchMul64 U:G:32, U:G:64, UZD:G:64 /branch
+ ResCond, Tmp, Tmp
+
+arm64: BranchMul64 U:G:32, U:G:64, U:G:64, S:G:64, S:G:64, ZD:G:64 /branch
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+BranchSub32 U:G:32, U:G:32, UZD:G:32 /branch
+ ResCond, Tmp, Tmp
+ ResCond, Imm, Tmp
+ x86: ResCond, Imm, Addr
+ x86: ResCond, Tmp, Addr
+ x86: ResCond, Addr, Tmp
+
+64: BranchSub64 U:G:32, U:G:64, UD:G:64 /branch
+ ResCond, Imm, Tmp
+ ResCond, Tmp, Tmp
+
+BranchNeg32 U:G:32, UZD:G:32 /branch
+ ResCond, Tmp
+
+64: BranchNeg64 U:G:32, UZD:G:64 /branch
+ ResCond, Tmp
+
+MoveConditionally32 U:G:32, U:G:32, U:G:32, U:G:Ptr, UD:G:Ptr
+ RelCond, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionally32 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+64: MoveConditionally64 U:G:32, U:G:64, U:G:64, U:G:Ptr, UD:G:Ptr
+ RelCond, Tmp, Tmp, Tmp, Tmp
+
+64: MoveConditionally64 U:G:32, U:G:64, U:G:64, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+MoveConditionallyTest32 U:G:32, U:G:32, U:G:32, U:G:Ptr, UD:G:Ptr
+ ResCond, Tmp, Tmp, Tmp, Tmp
+ x86: ResCond, Tmp, Imm, Tmp, Tmp
+
+MoveConditionallyTest32 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ ResCond, Tmp, BitImm, Tmp, Tmp, Tmp
+
+64: MoveConditionallyTest64 U:G:32, U:G:64, U:G:64, U:G:Ptr, UD:G:Ptr
+ ResCond, Tmp, Tmp, Tmp, Tmp
+ x86: ResCond, Tmp, Imm, Tmp, Tmp
+
+64: MoveConditionallyTest64 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ x86_64: ResCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+MoveConditionallyDouble U:G:32, U:F:64, U:F:64, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyDouble U:G:32, U:F:64, U:F:64, U:G:Ptr, UD:G:Ptr
+ DoubleCond, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyFloat U:G:32, U:F:32, U:F:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyFloat U:G:32, U:F:32, U:F:32, U:G:Ptr, UD:G:Ptr
+ DoubleCond, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionally32 U:G:32, U:G:32, U:G:32, U:F:64, U:F:64, D:F:64
+ RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+ x86: RelCond, Addr, Imm, Tmp, Tmp, Tmp
+ x86: RelCond, Tmp, Addr, Tmp, Tmp, Tmp
+ x86: RelCond, Addr, Tmp, Tmp, Tmp, Tmp
+ x86: RelCond, Index, Imm, Tmp, Tmp, Tmp
+
+64: MoveDoubleConditionally64 U:G:32, U:G:64, U:G:64, U:F:64, U:F:64, D:F:64
+ RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+ x86_64: RelCond, Tmp, Addr, Tmp, Tmp, Tmp
+ x86_64: RelCond, Addr, Tmp, Tmp, Tmp, Tmp
+ x86_64: RelCond, Addr, Imm, Tmp, Tmp, Tmp
+ x86_64: RelCond, Index, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyTest32 U:G:32, U:G:32, U:G:32, U:F:64, U:F:64, D:F:64
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ ResCond, Tmp, BitImm, Tmp, Tmp, Tmp
+ x86: ResCond, Addr, Imm, Tmp, Tmp, Tmp
+ x86: ResCond, Index, Imm, Tmp, Tmp, Tmp
+
+# Warning: forms that take an immediate will sign-extend their immediate. You probably want
+# MoveDoubleConditionallyTest32 in most cases where you use an immediate.
+64: MoveDoubleConditionallyTest64 U:G:32, U:G:64, U:G:64, U:F:64, U:F:64, D:F:64
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ x86_64: ResCond, Tmp, Imm, Tmp, Tmp, Tmp
+ x86_64: ResCond, Addr, Imm, Tmp, Tmp, Tmp
+ x86_64: ResCond, Addr, Tmp, Tmp, Tmp, Tmp
+ x86_64: ResCond, Index, Imm, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyDouble U:G:32, U:F:64, U:F:64, U:F:64, U:F:64, D:F:64
+ DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyFloat U:G:32, U:F:32, U:F:32, U:F:64, U:F:64, D:F:64
+ DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MemoryFence /effects
+StoreFence /effects
+LoadFence /effects
+
+Jump /branch
+
+RetVoid /return
+
+Ret32 U:G:32 /return
+ Tmp
+
+64: Ret64 U:G:64 /return
+ Tmp
+
+RetFloat U:F:32 /return
+ Tmp
+
+RetDouble U:F:64 /return
+ Tmp
+
+Oops /terminal
+
+# This is a terminal but we express it as a Custom because we don't want it to have a code
+# generator.
+custom EntrySwitch
+
+# A Shuffle is a multi-source, multi-destination move. It simultaneously does multiple moves at once.
+# The moves are specified as triplets of src, dst, and width. For example you can request a swap this
+# way:
+# Shuffle %tmp1, %tmp2, 64, %tmp2, %tmp1, 64
+custom Shuffle
+
+# Air allows for exotic behavior. A Patch's behavior is determined entirely by the Special operand,
+# which must be the first operand.
+custom Patch
+
+# Instructions used for lowering C calls. These don't make it to Air generation. They get lowered to
+# something else first. The origin Value must be a CCallValue.
+custom CCall
+custom ColdCCall
+
+# This is a special wasm opcode that branches to a trap handler. This uses the generator located to Air::Code
+# to produce the side-exit code.
+custom WasmBoundsCheck
+
diff --git a/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.cpp b/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.cpp
new file mode 100644
index 000000000..11ca3f3d4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirOptimizeBlockOrder.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBlockWorklist.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include <wtf/BubbleSort.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+class SortedSuccessors {
+public:
+ SortedSuccessors()
+ {
+ }
+
+ void append(BasicBlock* block)
+ {
+ m_successors.append(block);
+ }
+
+ void process(BlockWorklist& worklist)
+ {
+ // We prefer a stable sort, and we don't want it to go off the rails if we see NaN. Also, the number
+ // of successors is bounded. In fact, it currently cannot be more than 2. :-)
+ bubbleSort(
+ m_successors.begin(), m_successors.end(),
+ [] (BasicBlock* left, BasicBlock* right) {
+ return left->frequency() < right->frequency();
+ });
+
+ // Pushing the successors in ascending order of frequency ensures that the very next block we visit
+ // is our highest-frequency successor (unless that successor has already been visited).
+ for (unsigned i = 0; i < m_successors.size(); ++i)
+ worklist.push(m_successors[i]);
+
+ m_successors.resize(0);
+ }
+
+private:
+ Vector<BasicBlock*, 2> m_successors;
+};
+
+} // anonymous namespace
+
+Vector<BasicBlock*> blocksInOptimizedOrder(Code& code)
+{
+ Vector<BasicBlock*> blocksInOrder;
+
+ BlockWorklist fastWorklist;
+ SortedSuccessors sortedSuccessors;
+ SortedSuccessors sortedSlowSuccessors;
+
+ // We expect entrypoint lowering to have already happened.
+ RELEASE_ASSERT(code.numEntrypoints());
+
+ auto appendSuccessor = [&] (const FrequentedBlock& block) {
+ if (block.isRare())
+ sortedSlowSuccessors.append(block.block());
+ else
+ sortedSuccessors.append(block.block());
+ };
+
+ // For everything but the first entrypoint, we push them in order of frequency and frequency
+ // class.
+ for (unsigned i = 1; i < code.numEntrypoints(); ++i)
+ appendSuccessor(code.entrypoint(i));
+
+ // Always push the primary successor last so that it gets highest priority.
+ fastWorklist.push(code.entrypoint(0).block());
+
+ while (BasicBlock* block = fastWorklist.pop()) {
+ blocksInOrder.append(block);
+ for (FrequentedBlock& successor : block->successors())
+ appendSuccessor(successor);
+ sortedSuccessors.process(fastWorklist);
+ }
+
+ BlockWorklist slowWorklist;
+ sortedSlowSuccessors.process(slowWorklist);
+
+ while (BasicBlock* block = slowWorklist.pop()) {
+ // We might have already processed this block.
+ if (fastWorklist.saw(block))
+ continue;
+
+ blocksInOrder.append(block);
+ for (BasicBlock* successor : block->successorBlocks())
+ sortedSuccessors.append(successor);
+ sortedSuccessors.process(slowWorklist);
+ }
+
+ ASSERT(fastWorklist.isEmpty());
+ ASSERT(slowWorklist.isEmpty());
+
+ return blocksInOrder;
+}
+
+void optimizeBlockOrder(Code& code)
+{
+ PhaseScope phaseScope(code, "optimizeBlockOrder");
+
+ Vector<BasicBlock*> blocksInOrder = blocksInOptimizedOrder(code);
+
+ // Place blocks into Code's block list according to the ordering in blocksInOrder. We do this by leaking
+ // all of the blocks and then readopting them.
+ for (auto& entry : code.blockList())
+ entry.release();
+
+ code.blockList().resize(0);
+
+ for (unsigned i = 0; i < blocksInOrder.size(); ++i) {
+ BasicBlock* block = blocksInOrder[i];
+ block->setIndex(i);
+ code.blockList().append(std::unique_ptr<BasicBlock>(block));
+ }
+
+ // Finally, flip any branches that we recognize. It's most optimal if the taken successor does not point
+ // at the next block.
+ for (BasicBlock* block : code) {
+ Inst& branch = block->last();
+
+ // It's somewhat tempting to just say that if the block has two successors and the first arg is
+ // invertible, then we can do the optimization. But that's wagging the dog. The fact that an
+ // instruction happens to have an argument that is invertible doesn't mean it's a branch, even though
+ // it is true that currently only branches have invertible arguments. It's also tempting to say that
+ // the /branch flag in AirOpcode.opcodes tells us that something is a branch - except that there,
+ // /branch also means Jump. The approach taken here means that if you add new branch instructions and
+ // forget about this phase, then at worst your new instructions won't opt into the inversion
+ // optimization. You'll probably realize that as soon as you look at the disassembly, and it
+ // certainly won't cause any correctness issues.
+
+ switch (branch.kind.opcode) {
+ case Branch8:
+ case Branch32:
+ case Branch64:
+ case BranchTest8:
+ case BranchTest32:
+ case BranchTest64:
+ case BranchFloat:
+ case BranchDouble:
+ case BranchAdd32:
+ case BranchAdd64:
+ case BranchMul32:
+ case BranchMul64:
+ case BranchSub32:
+ case BranchSub64:
+ case BranchNeg32:
+ case BranchNeg64:
+ if (code.findNextBlock(block) == block->successorBlock(0) && branch.args[0].isInvertible()) {
+ std::swap(block->successor(0), block->successor(1));
+ branch.args[0] = branch.args[0].inverted();
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.h b/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.h
new file mode 100644
index 000000000..3911fcc8d
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirOptimizeBlockOrder.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/Vector.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BasicBlock;
+class Code;
+
+// Returns a list of blocks sorted according to what would be the current optimal order. This shares
+// some properties with a pre-order traversal. In particular, each block will appear after at least
+// one of its predecessors.
+Vector<BasicBlock*> blocksInOptimizedOrder(Code&);
+
+// Reorders the basic blocks to keep hot blocks at the top, and maximize the likelihood that a frequently
+// taken edge is just a fall-through.
+
+void optimizeBlockOrder(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirPadInterference.cpp b/Source/JavaScriptCore/b3/air/AirPadInterference.cpp
new file mode 100644
index 000000000..91de56bc8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirPadInterference.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirPadInterference.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void padInterference(Code& code)
+{
+ InsertionSet insertionSet(code);
+ for (BasicBlock* block : code) {
+ bool prevHadLate = false;
+ for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+ Inst& inst = block->at(instIndex);
+
+ bool hasEarlyDef = false;
+ bool hasLate = false;
+ inst.forEachArg(
+ [&] (Arg&, Arg::Role role, Arg::Type, Arg::Width) {
+ switch (role) {
+ case Arg::EarlyDef:
+ hasEarlyDef = true;
+ break;
+ case Arg::LateUse:
+ case Arg::Def:
+ case Arg::ZDef:
+ case Arg::LateColdUse:
+ case Arg::UseDef:
+ case Arg::UseZDef:
+ hasLate = true;
+ break;
+ case Arg::Scratch:
+ hasEarlyDef = true;
+ hasLate = true;
+ break;
+ case Arg::Use:
+ case Arg::ColdUse:
+ case Arg::UseAddr:
+ break;
+ }
+ });
+ if (inst.kind.opcode == Patch) {
+ hasEarlyDef |= !inst.extraEarlyClobberedRegs().isEmpty();
+ hasLate |= !inst.extraClobberedRegs().isEmpty();
+ }
+
+ if (hasEarlyDef && prevHadLate)
+ insertionSet.insert(instIndex, Nop, inst.origin);
+
+ prevHadLate = hasLate;
+ }
+ insertionSet.execute(block);
+ }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirPadInterference.h b/Source/JavaScriptCore/b3/air/AirPadInterference.h
new file mode 100644
index 000000000..18f80832f
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirPadInterference.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This isn't a phase - it's meant to be a utility that other phases use. Air reasons about liveness by
+// reasoning about interference at boundaries between instructions. This can go wrong - for example, a
+// late use in one instruction doesn't actually interfere with an early def of the next instruction, but
+// Air thinks that it does. This is convenient because it works great in the most common case: early uses
+// and late defs. In practice, only the register allocators need to use this, since only they need to be
+// able to color the interference graph using a bounded number of colors.
+//
+// See https://bugs.webkit.org/show_bug.cgi?id=163548#c2 for more info.
+
+void padInterference(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirPhaseScope.cpp b/Source/JavaScriptCore/b3/air/AirPhaseScope.cpp
new file mode 100644
index 000000000..062ea2483
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirPhaseScope.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirPhaseScope.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirValidate.h"
+#include "B3Common.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+PhaseScope::PhaseScope(Code& code, const char* name)
+ : m_code(code)
+ , m_name(name)
+ , m_timingScope(name)
+{
+ if (shouldDumpIRAtEachPhase(AirMode)) {
+ dataLog("Air after ", code.lastPhaseName(), ", before ", name, ":\n");
+ dataLog(code);
+ }
+
+ if (shouldSaveIRBeforePhase())
+ m_dumpBefore = toCString(code);
+}
+
+PhaseScope::~PhaseScope()
+{
+ m_code.setLastPhaseName(m_name);
+ if (shouldValidateIRAtEachPhase())
+ validate(m_code, m_dumpBefore.data());
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirPhaseScope.h b/Source/JavaScriptCore/b3/air/AirPhaseScope.h
new file mode 100644
index 000000000..71f788fce
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirPhaseScope.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3TimingScope.h"
+#include <wtf/Noncopyable.h>
+#include <wtf/text/CString.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+class PhaseScope {
+ WTF_MAKE_NONCOPYABLE(PhaseScope);
+public:
+ PhaseScope(Code&, const char* name);
+ ~PhaseScope(); // this does validation
+
+private:
+ Code& m_code;
+ const char* m_name;
+ TimingScope m_timingScope;
+ CString m_dumpBefore;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.cpp b/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.cpp
new file mode 100644
index 000000000..bb0aeab77
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirReportUsedRegisters.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPhaseScope.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void reportUsedRegisters(Code& code)
+{
+ PhaseScope phaseScope(code, "reportUsedRegisters");
+
+ RegLiveness liveness(code);
+
+ for (BasicBlock* block : code) {
+ RegLiveness::LocalCalc localCalc(liveness, block);
+
+ for (unsigned instIndex = block->size(); instIndex--;) {
+ Inst& inst = block->at(instIndex);
+
+ // Kill dead assignments to registers. For simplicity we say that a store is killable if
+ // it has only late defs and those late defs are to registers that are dead right now.
+ if (!inst.hasNonArgEffects()) {
+ bool canDelete = true;
+ inst.forEachArg(
+ [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
+ if (Arg::isEarlyDef(role)) {
+ canDelete = false;
+ return;
+ }
+ if (!Arg::isLateDef(role))
+ return;
+ if (!arg.isReg()) {
+ canDelete = false;
+ return;
+ }
+ if (localCalc.isLive(arg.reg())) {
+ canDelete = false;
+ return;
+ }
+ });
+ if (canDelete)
+ inst = Inst();
+ }
+
+ if (inst.kind.opcode == Patch) {
+ RegisterSet registerSet;
+ for (Reg reg : localCalc.live())
+ registerSet.set(reg);
+ inst.reportUsedRegisters(registerSet);
+ }
+ localCalc.execute(instIndex);
+ }
+
+ block->insts().removeAllMatching(
+ [&] (const Inst& inst) -> bool {
+ return !inst;
+ });
+ }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.h b/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.h
new file mode 100644
index 000000000..ea175dcf4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirReportUsedRegisters.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Performs a liveness analysis over registers and reports the live registers to every Special. Takes
+// the opportunity to kill dead assignments to registers, since it has access to register liveness.
+
+void reportUsedRegisters(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirSimplifyCFG.cpp b/Source/JavaScriptCore/b3/air/AirSimplifyCFG.cpp
new file mode 100644
index 000000000..c66f63feb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSimplifyCFG.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirSimplifyCFG.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool simplifyCFG(Code& code)
+{
+ const bool verbose = false;
+
+ PhaseScope phaseScope(code, "simplifyCFG");
+
+ // We have three easy simplification rules:
+ //
+ // 1) If a successor is a block that just jumps to another block, then jump directly to
+ // that block.
+ //
+ // 2) If all successors are the same and the operation has no effects, then use a jump
+ // instead.
+ //
+ // 3) If you jump to a block that is not you and has one predecessor, then merge.
+ //
+ // Note that because of the first rule, this phase may introduce critical edges. That's fine.
+ // If you need broken critical edges, then you have to break them yourself.
+
+ bool result = false;
+ for (;;) {
+ if (verbose) {
+ dataLog("Air before an iteration of simplifyCFG:\n");
+ dataLog(code);
+ }
+
+ bool changed = false;
+ for (BasicBlock* block : code) {
+ // We rely on predecessors being conservatively correct. Verify this here.
+ if (shouldValidateIRAtEachPhase()) {
+ for (BasicBlock* block : code) {
+ for (BasicBlock* successor : block->successorBlocks())
+ RELEASE_ASSERT(successor->containsPredecessor(block));
+ }
+ }
+
+ // We don't care about blocks that don't have successors.
+ if (!block->numSuccessors())
+ continue;
+
+ // First check if any of the successors of this block can be forwarded over.
+ for (BasicBlock*& successor : block->successorBlocks()) {
+ if (successor != block
+ && successor->size() == 1
+ && successor->last().kind.opcode == Jump) {
+ BasicBlock* newSuccessor = successor->successorBlock(0);
+ if (newSuccessor != successor) {
+ if (verbose) {
+ dataLog(
+ "Replacing ", pointerDump(block), "->", pointerDump(successor),
+ " with ", pointerDump(block), "->", pointerDump(newSuccessor), "\n");
+ }
+ // Note that we do not do replacePredecessor() because the block we're
+ // skipping will still have newSuccessor as its successor.
+ newSuccessor->addPredecessor(block);
+ successor = newSuccessor;
+ changed = true;
+ }
+ }
+ }
+
+ // Now check if the block's terminal can be replaced with a jump. The terminal must not
+ // have weird effects.
+ if (block->numSuccessors() > 1
+ && !block->last().hasNonControlEffects()) {
+ // All of the successors must be the same.
+ bool allSame = true;
+ BasicBlock* firstSuccessor = block->successorBlock(0);
+ for (unsigned i = 1; i < block->numSuccessors(); ++i) {
+ if (block->successorBlock(i) != firstSuccessor) {
+ allSame = false;
+ break;
+ }
+ }
+ if (allSame) {
+ if (verbose)
+ dataLog("Changing ", pointerDump(block), "'s terminal to a Jump.\n");
+ block->last() = Inst(Jump, block->last().origin);
+ block->successors().resize(1);
+ block->successors()[0].frequency() = FrequencyClass::Normal;
+ changed = true;
+ }
+ }
+
+ // Finally handle jumps to a block with one predecessor.
+ if (block->numSuccessors() == 1
+ && !block->last().hasNonControlEffects()) {
+ BasicBlock* successor = block->successorBlock(0);
+ if (successor != block && successor->numPredecessors() == 1) {
+ RELEASE_ASSERT(successor->predecessor(0) == block);
+
+ // We can merge the two blocks because the predecessor only jumps to the successor
+ // and the successor is only reachable from the predecessor.
+
+ // Remove the terminal.
+ Value* origin = block->insts().takeLast().origin;
+
+ // Append the full contents of the successor to the predecessor.
+ block->insts().reserveCapacity(block->size() + successor->size());
+ for (Inst& inst : *successor)
+ block->appendInst(WTFMove(inst));
+
+ // Make sure that our successors are the successor's successors.
+ block->successors() = WTFMove(successor->successors());
+
+ // Make sure that the successor has nothing left in it except an oops.
+ successor->resize(1);
+ successor->last() = Inst(Oops, origin);
+ successor->successors().clear();
+
+ // Ensure that the predecessors of block's new successors know what's up.
+ for (BasicBlock* newSuccessor : block->successorBlocks())
+ newSuccessor->replacePredecessor(successor, block);
+
+ if (verbose)
+ dataLog("Merged ", pointerDump(block), "->", pointerDump(successor), "\n");
+ changed = true;
+ }
+ }
+ }
+
+ if (!changed)
+ break;
+ result = true;
+ code.resetReachability();
+ }
+
+ return result;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/Source/JavaScriptCore/b3/air/AirSimplifyCFG.h b/Source/JavaScriptCore/b3/air/AirSimplifyCFG.h
new file mode 100644
index 000000000..7ac510d4b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSimplifyCFG.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Simplifies the control flow graph by removing jump-only blocks and merging jumps.
+
+bool simplifyCFG(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirSpecial.cpp b/Source/JavaScriptCore/b3/air/AirSpecial.cpp
new file mode 100644
index 000000000..e825767b0
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSpecial.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include <limits.h>
+#include <wtf/StringPrintStream.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+const char* const Special::dumpPrefix = "&";
+
+Special::Special()
+{
+}
+
+Special::~Special()
+{
+}
+
+CString Special::name() const
+{
+ StringPrintStream out;
+ dumpImpl(out);
+ return out.toCString();
+}
+
+std::optional<unsigned> Special::shouldTryAliasingDef(Inst&)
+{
+ return std::nullopt;
+}
+
+bool Special::isTerminal(Inst&)
+{
+ return false;
+}
+
+bool Special::hasNonArgEffects(Inst&)
+{
+ return true;
+}
+
+bool Special::hasNonArgNonControlEffects(Inst&)
+{
+ return true;
+}
+
+void Special::dump(PrintStream& out) const
+{
+ out.print(dumpPrefix);
+ dumpImpl(out);
+ if (m_index != UINT_MAX)
+ out.print(m_index);
+}
+
+void Special::deepDump(PrintStream& out) const
+{
+ out.print(*this, ": ");
+ deepDumpImpl(out);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirSpecial.h b/Source/JavaScriptCore/b3/air/AirSpecial.h
new file mode 100644
index 000000000..480cbfcba
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSpecial.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirInst.h"
+#include "B3SparseCollection.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/ScopedLambda.h>
+#include <wtf/text/CString.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+struct GenerationContext;
+
+class Special {
+ WTF_MAKE_NONCOPYABLE(Special);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ static const char* const dumpPrefix;
+
+ Special();
+ virtual ~Special();
+
+ Code& code() const { return *m_code; }
+
+ CString name() const;
+
+ virtual void forEachArg(Inst&, const ScopedLambda<Inst::EachArgCallback>&) = 0;
+ virtual bool isValid(Inst&) = 0;
+ virtual bool admitsStack(Inst&, unsigned argIndex) = 0;
+ virtual std::optional<unsigned> shouldTryAliasingDef(Inst&);
+
+ // This gets called on for each Inst that uses this Special. Note that there is no way to
+ // guarantee that a Special gets used from just one Inst, because Air might taildup late. So,
+ // if you want to pass this information down to generate(), then you have to either:
+ //
+ // 1) Generate Air that starts with a separate Special per Patch Inst, and then merge
+ // usedRegister sets. This is probably not great, but it optimizes for the common case that
+ // Air didn't duplicate code or that such duplication didn't cause any interesting changes to
+ // register assignment.
+ //
+ // 2) Have the Special maintain a HashMap<Inst*, RegisterSet>. This works because the analysis
+ // that feeds into this call is performed just before code generation and there is no way
+ // for the Vector<>'s that contain the Insts to be reallocated. This allows generate() to
+ // consult the HashMap.
+ //
+ // 3) Hybrid: you could use (1) and fire up a HashMap if you see multiple calls.
+ //
+ // Note that it's not possible to rely on reportUsedRegisters() being called in the same order
+ // as generate(). If we could rely on that, then we could just have each Special instance
+ // maintain a Vector of RegisterSet's and then process that vector in the right order in
+ // generate(). But, the ordering difference is unlikely to change since it would harm the
+ // performance of the liveness analysis.
+ //
+ // Currently, we do (1) for B3 stackmaps.
+ virtual void reportUsedRegisters(Inst&, const RegisterSet&) = 0;
+
+ virtual CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&) = 0;
+
+ virtual RegisterSet extraEarlyClobberedRegs(Inst&) = 0;
+ virtual RegisterSet extraClobberedRegs(Inst&) = 0;
+
+ // By default, this returns false.
+ virtual bool isTerminal(Inst&);
+
+ // By default, this returns true.
+ virtual bool hasNonArgEffects(Inst&);
+
+ // By default, this returns true.
+ virtual bool hasNonArgNonControlEffects(Inst&);
+
+ void dump(PrintStream&) const;
+ void deepDump(PrintStream&) const;
+
+protected:
+ virtual void dumpImpl(PrintStream&) const = 0;
+ virtual void deepDumpImpl(PrintStream&) const = 0;
+
+private:
+ friend class Code;
+ friend class SparseCollection<Special>;
+
+ unsigned m_index { UINT_MAX };
+ Code* m_code { nullptr };
+};
+
+class DeepSpecialDump {
+public:
+ DeepSpecialDump(const Special* special)
+ : m_special(special)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_special)
+ m_special->deepDump(out);
+ else
+ out.print("<null>");
+ }
+
+private:
+ const Special* m_special;
+};
+
+inline DeepSpecialDump deepDump(const Special* special)
+{
+ return DeepSpecialDump(special);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirSpillEverything.cpp b/Source/JavaScriptCore/b3/air/AirSpillEverything.cpp
new file mode 100644
index 000000000..ebf3774a5
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSpillEverything.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirSpillEverything.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPadInterference.h"
+#include "AirPhaseScope.h"
+#include <wtf/IndexMap.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+void spillEverything(Code& code)
+{
+ PhaseScope phaseScope(code, "spillEverything");
+
+ padInterference(code);
+
+ // We want to know the set of registers used at every point in every basic block.
+ IndexMap<BasicBlock, Vector<RegisterSet>> usedRegisters(code.size());
+ GPLiveness gpLiveness(code);
+ FPLiveness fpLiveness(code);
+ for (BasicBlock* block : code) {
+ GPLiveness::LocalCalc gpLocalCalc(gpLiveness, block);
+ FPLiveness::LocalCalc fpLocalCalc(fpLiveness, block);
+
+ usedRegisters[block].resize(block->size() + 1);
+
+ auto setUsedRegisters = [&] (unsigned index) {
+ RegisterSet& registerSet = usedRegisters[block][index];
+ for (Tmp tmp : gpLocalCalc.live()) {
+ if (tmp.isReg())
+ registerSet.set(tmp.reg());
+ }
+ for (Tmp tmp : fpLocalCalc.live()) {
+ if (tmp.isReg())
+ registerSet.set(tmp.reg());
+ }
+
+ // Gotta account for dead assignments to registers. These may happen because the input
+ // code is suboptimal.
+ Inst::forEachDefWithExtraClobberedRegs<Tmp>(
+ block->get(index - 1), block->get(index),
+ [&] (const Tmp& tmp, Arg::Role, Arg::Type, Arg::Width) {
+ if (tmp.isReg())
+ registerSet.set(tmp.reg());
+ });
+ };
+
+ for (unsigned instIndex = block->size(); instIndex--;) {
+ setUsedRegisters(instIndex + 1);
+ gpLocalCalc.execute(instIndex);
+ fpLocalCalc.execute(instIndex);
+ }
+ setUsedRegisters(0);
+ }
+
+ // Allocate a stack slot for each tmp.
+ Vector<StackSlot*> allStackSlots[Arg::numTypes];
+ for (unsigned typeIndex = 0; typeIndex < Arg::numTypes; ++typeIndex) {
+ Vector<StackSlot*>& stackSlots = allStackSlots[typeIndex];
+ Arg::Type type = static_cast<Arg::Type>(typeIndex);
+ stackSlots.resize(code.numTmps(type));
+ for (unsigned tmpIndex = code.numTmps(type); tmpIndex--;)
+ stackSlots[tmpIndex] = code.addStackSlot(8, StackSlotKind::Spill);
+ }
+
+ InsertionSet insertionSet(code);
+ for (BasicBlock* block : code) {
+ for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+ RegisterSet& setBefore = usedRegisters[block][instIndex];
+ RegisterSet& setAfter = usedRegisters[block][instIndex + 1];
+ Inst& inst = block->at(instIndex);
+
+ // First try to spill directly.
+ for (unsigned i = 0; i < inst.args.size(); ++i) {
+ Arg& arg = inst.args[i];
+
+ if (arg.isTmp()) {
+ if (arg.isReg())
+ continue;
+
+ if (inst.admitsStack(i)) {
+ StackSlot* stackSlot = allStackSlots[arg.type()][arg.tmpIndex()];
+ arg = Arg::stack(stackSlot);
+ continue;
+ }
+ }
+ }
+
+ // Now fall back on spilling using separate Move's to load/store the tmp.
+ inst.forEachTmp(
+ [&] (Tmp& tmp, Arg::Role role, Arg::Type type, Arg::Width) {
+ if (tmp.isReg())
+ return;
+
+ StackSlot* stackSlot = allStackSlots[type][tmp.tmpIndex()];
+ Arg arg = Arg::stack(stackSlot);
+
+ // Need to figure out a register to use. How we do that depends on the role.
+ Reg chosenReg;
+ switch (role) {
+ case Arg::Use:
+ case Arg::ColdUse:
+ for (Reg reg : code.regsInPriorityOrder(type)) {
+ if (!setBefore.get(reg)) {
+ setBefore.set(reg);
+ chosenReg = reg;
+ break;
+ }
+ }
+ break;
+ case Arg::Def:
+ case Arg::ZDef:
+ for (Reg reg : code.regsInPriorityOrder(type)) {
+ if (!setAfter.get(reg)) {
+ setAfter.set(reg);
+ chosenReg = reg;
+ break;
+ }
+ }
+ break;
+ case Arg::UseDef:
+ case Arg::UseZDef:
+ case Arg::LateUse:
+ case Arg::LateColdUse:
+ case Arg::Scratch:
+ case Arg::EarlyDef:
+ for (Reg reg : code.regsInPriorityOrder(type)) {
+ if (!setBefore.get(reg) && !setAfter.get(reg)) {
+ setAfter.set(reg);
+ setBefore.set(reg);
+ chosenReg = reg;
+ break;
+ }
+ }
+ break;
+ case Arg::UseAddr:
+ // We will never UseAddr a Tmp, that doesn't make sense.
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ RELEASE_ASSERT(chosenReg);
+
+ tmp = Tmp(chosenReg);
+
+ Opcode move = type == Arg::GP ? Move : MoveDouble;
+
+ if (Arg::isAnyUse(role) && role != Arg::Scratch)
+ insertionSet.insert(instIndex, move, inst.origin, arg, tmp);
+ if (Arg::isAnyDef(role))
+ insertionSet.insert(instIndex + 1, move, inst.origin, tmp, arg);
+ });
+ }
+ insertionSet.execute(block);
+ }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirSpillEverything.h b/Source/JavaScriptCore/b3/air/AirSpillEverything.h
new file mode 100644
index 000000000..0fdca6677
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirSpillEverything.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is a phase for testing. It behaves like a register allocator in the sense that it
+// eliminates temporaries from the program. It accomplishes this by always spilling all
+// temporaries. The resulting code is going to be very inefficient. This phase is great if you
+// think that there is a bug in the register allocator. You can confirm this by running this
+// phase instead of the register allocator.
+//
+// Note that even though this phase does the cheapest thing possible, it's not even written in a
+// particularly efficient way. So, don't get any ideas about using this phase to reduce compiler
+// latency. If you wanted to do that, you should come up with a clever algorithm instead of using
+// this silly thing.
+
+void spillEverything(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirStackSlot.cpp b/Source/JavaScriptCore/b3/air/AirStackSlot.cpp
new file mode 100644
index 000000000..58cac0657
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirStackSlot.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirStackSlot.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackSlot.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void StackSlot::setOffsetFromFP(intptr_t value)
+{
+ m_offsetFromFP = value;
+ if (m_b3Slot)
+ m_b3Slot->m_offsetFromFP = value;
+}
+
+unsigned StackSlot::jsHash() const
+{
+ return static_cast<unsigned>(m_kind) + m_byteSize * 3 + m_offsetFromFP * 7;
+}
+
+void StackSlot::dump(PrintStream& out) const
+{
+ if (isSpill())
+ out.print("spill");
+ else
+ out.print("stack");
+ out.print(m_index);
+}
+
+void StackSlot::deepDump(PrintStream& out) const
+{
+ out.print("byteSize = ", m_byteSize, ", offsetFromFP = ", m_offsetFromFP, ", kind = ", m_kind);
+ if (m_b3Slot)
+ out.print(", b3Slot = ", *m_b3Slot, ": (", B3::deepDump(m_b3Slot), ")");
+}
+
+StackSlot::StackSlot(unsigned byteSize, StackSlotKind kind, B3::StackSlot* b3Slot)
+ : m_byteSize(byteSize)
+ , m_offsetFromFP(b3Slot ? b3Slot->offsetFromFP() : 0)
+ , m_kind(kind)
+ , m_b3Slot(b3Slot)
+{
+ ASSERT(byteSize);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirStackSlot.h b/Source/JavaScriptCore/b3/air/AirStackSlot.h
new file mode 100644
index 000000000..85c94acc8
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirStackSlot.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirStackSlotKind.h"
+#include "B3SparseCollection.h"
+#include <limits.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC { namespace B3 {
+
+class StackSlot;
+
+namespace Air {
+
+class StackSlot {
+ WTF_MAKE_NONCOPYABLE(StackSlot);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ unsigned byteSize() const { return m_byteSize; }
+ StackSlotKind kind() const { return m_kind; }
+ bool isLocked() const { return m_kind == StackSlotKind::Locked; }
+ bool isSpill() const { return m_kind == StackSlotKind::Spill; }
+ unsigned index() const { return m_index; }
+
+ void ensureSize(unsigned requestedSize)
+ {
+ ASSERT(!m_offsetFromFP);
+ m_byteSize = std::max(m_byteSize, requestedSize);
+ }
+
+ unsigned alignment() const
+ {
+ if (byteSize() <= 1)
+ return 1;
+ if (byteSize() <= 2)
+ return 2;
+ if (byteSize() <= 4)
+ return 4;
+ return 8;
+ }
+
+ B3::StackSlot* b3Slot() const { return m_b3Slot; }
+
+ // Zero means that it's not yet assigned.
+ intptr_t offsetFromFP() const { return m_offsetFromFP; }
+
+ // This should usually just be called from phases that do stack allocation. But you can
+ // totally force a stack slot to land at some offset.
+ void setOffsetFromFP(intptr_t);
+
+ // This computes a hash for comparing this to JSAir's StackSlot.
+ unsigned jsHash() const;
+
+ void dump(PrintStream&) const;
+ void deepDump(PrintStream&) const;
+
+private:
+ friend class Code;
+ friend class SparseCollection<StackSlot>;
+
+ StackSlot(unsigned byteSize, StackSlotKind, B3::StackSlot*);
+
+ unsigned m_byteSize { 0 };
+ unsigned m_index { UINT_MAX };
+ intptr_t m_offsetFromFP { 0 };
+ StackSlotKind m_kind { StackSlotKind::Locked };
+ B3::StackSlot* m_b3Slot { nullptr };
+};
+
+class DeepStackSlotDump {
+public:
+ DeepStackSlotDump(const StackSlot* slot)
+ : m_slot(slot)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_slot)
+ m_slot->deepDump(out);
+ else
+ out.print("<null>");
+ }
+
+private:
+ const StackSlot* m_slot;
+};
+
+inline DeepStackSlotDump deepDump(const StackSlot* slot)
+{
+ return DeepStackSlotDump(slot);
+}
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+inline void printInternal(PrintStream& out, JSC::B3::Air::StackSlot* stackSlot)
+{
+ out.print(pointerDump(stackSlot));
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirStackSlotKind.cpp b/Source/JavaScriptCore/b3/air/AirStackSlotKind.cpp
new file mode 100644
index 000000000..af83de1b9
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirStackSlotKind.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirStackSlotKind.h"
+
+#if ENABLE(B3_JIT)
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+using namespace JSC::B3::Air;
+
+void printInternal(PrintStream& out, StackSlotKind kind)
+{
+ switch (kind) {
+ case StackSlotKind::Locked:
+ out.print("Locked");
+ return;
+ case StackSlotKind::Spill:
+ out.print("Spill");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirStackSlotKind.h b/Source/JavaScriptCore/b3/air/AirStackSlotKind.h
new file mode 100644
index 000000000..9ef205772
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirStackSlotKind.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+enum class StackSlotKind : uint8_t {
+ // A locked stack slot is an area of stack requested by the client. It cannot be killed. The
+ // client can get its FP offset and write to it from stack walking code, so we must assume
+ // that reads and writes to a locked stack slot can be clobbered the same way as reads and
+ // writes to any memory location.
+ Locked,
+
+ // A spill slot. These have fundamentally different behavior than a typical memory location.
+ // They are lowered to from temporaries. This means for example that a 32-bit ZDef store to a
+ // 8 byte stack slot will zero the top 4 bytes, even though a 32-bit ZDef store to any other
+ // kind of memory location would do no such thing. UseAddr on a spill slot is not allowed, so
+ // they never escape.
+ Spill
+
+ // FIXME: We should add a third mode, which means that the stack slot will be read asynchronously
+ // as with Locked, but never written to asynchronously. Then, Air could optimize spilling and
+ // filling by tracking whether the value had been stored to a read-only locked slot. If it had,
+ // then we can refill from that slot.
+ // https://bugs.webkit.org/show_bug.cgi?id=150587
+};
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::B3::Air::StackSlotKind);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirTmp.cpp b/Source/JavaScriptCore/b3/air/AirTmp.cpp
new file mode 100644
index 000000000..487f52177
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirTmp.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirTmp.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+void Tmp::dump(PrintStream& out) const
+{
+ if (!*this) {
+ out.print("<none>");
+ return;
+ }
+
+ if (isReg()) {
+ out.print(reg());
+ return;
+ }
+
+ if (isGP()) {
+ out.print("%tmp", gpTmpIndex());
+ return;
+ }
+
+ out.print("%ftmp", fpTmpIndex());
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirTmp.h b/Source/JavaScriptCore/b3/air/AirTmp.h
new file mode 100644
index 000000000..c01427c2b
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirTmp.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "Reg.h"
+#include <wtf/HashMap.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Arg;
+
+// A Tmp is a generalization of a register. It can be used to refer to any GPR or FPR. It can also
+// be used to refer to an unallocated register (i.e. a temporary). Like many Air classes, we use
+// deliberately terse naming since we will have to use this name a lot.
+
+class Tmp {
+public:
+ Tmp()
+ : m_value(0)
+ {
+ }
+
+ explicit Tmp(Reg reg)
+ {
+ if (reg) {
+ if (reg.isGPR())
+ m_value = encodeGPR(reg.gpr());
+ else
+ m_value = encodeFPR(reg.fpr());
+ } else
+ m_value = 0;
+ }
+
+ explicit Tmp(const Arg&);
+
+ static Tmp gpTmpForIndex(unsigned index)
+ {
+ Tmp result;
+ result.m_value = encodeGPTmp(index);
+ return result;
+ }
+
+ static Tmp fpTmpForIndex(unsigned index)
+ {
+ Tmp result;
+ result.m_value = encodeFPTmp(index);
+ return result;
+ }
+
+ explicit operator bool() const { return !!m_value; }
+
+ bool isGP() const
+ {
+ return isEncodedGP(m_value);
+ }
+
+ bool isFP() const
+ {
+ return isEncodedFP(m_value);
+ }
+
+ bool isGPR() const
+ {
+ return isEncodedGPR(m_value);
+ }
+
+ bool isFPR() const
+ {
+ return isEncodedFPR(m_value);
+ }
+
+ bool isReg() const
+ {
+ return isGPR() || isFPR();
+ }
+
+ GPRReg gpr() const
+ {
+ return decodeGPR(m_value);
+ }
+
+ FPRReg fpr() const
+ {
+ return decodeFPR(m_value);
+ }
+
+ Reg reg() const
+ {
+ if (isGP())
+ return gpr();
+ return fpr();
+ }
+
+ bool hasTmpIndex() const
+ {
+ return !isReg();
+ }
+
+ unsigned gpTmpIndex() const
+ {
+ return decodeGPTmp(m_value);
+ }
+
+ unsigned fpTmpIndex() const
+ {
+ return decodeFPTmp(m_value);
+ }
+
+ unsigned tmpIndex() const
+ {
+ if (isGP())
+ return gpTmpIndex();
+ return fpTmpIndex();
+ }
+
+ bool isAlive() const
+ {
+ return !!*this;
+ }
+
+ bool operator==(const Tmp& other) const
+ {
+ return m_value == other.m_value;
+ }
+
+ bool operator!=(const Tmp& other) const
+ {
+ return !(*this == other);
+ }
+
+ void dump(PrintStream& out) const;
+
+ Tmp(WTF::HashTableDeletedValueType)
+ : m_value(std::numeric_limits<int>::max())
+ {
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return *this == Tmp(WTF::HashTableDeletedValue);
+ }
+
+ unsigned hash() const
+ {
+ return WTF::IntHash<int>::hash(m_value);
+ }
+
+ unsigned internalValue() const { return static_cast<unsigned>(m_value); }
+
+ static Tmp tmpForInternalValue(unsigned index)
+ {
+ Tmp result;
+ result.m_value = static_cast<int>(index);
+ return result;
+ }
+
+private:
+ static int encodeGP(unsigned index)
+ {
+ return 1 + index;
+ }
+
+ static int encodeFP(unsigned index)
+ {
+ return -1 - index;
+ }
+
+ static int encodeGPR(GPRReg gpr)
+ {
+ return encodeGP(gpr - MacroAssembler::firstRegister());
+ }
+
+ static int encodeFPR(FPRReg fpr)
+ {
+ return encodeFP(fpr - MacroAssembler::firstFPRegister());
+ }
+
+ static int encodeGPTmp(unsigned index)
+ {
+ return encodeGPR(MacroAssembler::lastRegister()) + 1 + index;
+ }
+
+ static int encodeFPTmp(unsigned index)
+ {
+ return encodeFPR(MacroAssembler::lastFPRegister()) - 1 - index;
+ }
+
+ static bool isEncodedGP(int value)
+ {
+ return value > 0;
+ }
+
+ static bool isEncodedFP(int value)
+ {
+ return value < 0;
+ }
+
+ static bool isEncodedGPR(int value)
+ {
+ return isEncodedGP(value) && value <= encodeGPR(MacroAssembler::lastRegister());
+ }
+
+ static bool isEncodedFPR(int value)
+ {
+ return isEncodedFP(value) && value >= encodeFPR(MacroAssembler::lastFPRegister());
+ }
+
+ static bool isEncodedGPTmp(int value)
+ {
+ return isEncodedGP(value) && !isEncodedGPR(value);
+ }
+
+ static bool isEncodedFPTmp(int value)
+ {
+ return isEncodedFP(value) && !isEncodedFPR(value);
+ }
+
+ static GPRReg decodeGPR(int value)
+ {
+ ASSERT(isEncodedGPR(value));
+ return static_cast<GPRReg>(
+ (value - encodeGPR(MacroAssembler::firstRegister())) + MacroAssembler::firstRegister());
+ }
+
+ static FPRReg decodeFPR(int value)
+ {
+ ASSERT(isEncodedFPR(value));
+ return static_cast<FPRReg>(
+ (encodeFPR(MacroAssembler::firstFPRegister()) - value) +
+ MacroAssembler::firstFPRegister());
+ }
+
+ static unsigned decodeGPTmp(int value)
+ {
+ ASSERT(isEncodedGPTmp(value));
+ return value - (encodeGPR(MacroAssembler::lastRegister()) + 1);
+ }
+
+ static unsigned decodeFPTmp(int value)
+ {
+ ASSERT(isEncodedFPTmp(value));
+ return (encodeFPR(MacroAssembler::lastFPRegister()) - 1) - value;
+ }
+
+ // 0: empty Tmp
+ // positive: GPRs and then GP temps.
+ // negative: FPRs and then FP temps.
+ int m_value;
+};
+
+struct TmpHash {
+ static unsigned hash(const Tmp& key) { return key.hash(); }
+ static bool equal(const Tmp& a, const Tmp& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::B3::Air::Tmp> {
+ typedef JSC::B3::Air::TmpHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::B3::Air::Tmp> : SimpleClassHashTraits<JSC::B3::Air::Tmp> { };
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirTmpInlines.h b/Source/JavaScriptCore/b3/air/AirTmpInlines.h
new file mode 100644
index 000000000..a7de098b4
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirTmpInlines.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirTmp.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+inline Tmp::Tmp(const Arg& arg)
+{
+ *this = arg.tmp();
+}
+
+// When a Hash structure is too slow or when Sets contains most values, you can
+// use direct array addressing with Tmps.
+template<Arg::Type type>
+struct AbsoluteTmpMapper;
+
+template<>
+struct AbsoluteTmpMapper<Arg::GP> {
+ static unsigned absoluteIndex(const Tmp& tmp)
+ {
+ ASSERT(tmp.isGP());
+ ASSERT(static_cast<int>(tmp.internalValue()) > 0);
+ return tmp.internalValue();
+ }
+
+ static unsigned absoluteIndex(unsigned tmpIndex)
+ {
+ return absoluteIndex(Tmp::gpTmpForIndex(tmpIndex));
+ }
+
+ static unsigned lastMachineRegisterIndex()
+ {
+ return absoluteIndex(Tmp(MacroAssembler::lastRegister()));
+ }
+
+ static Tmp tmpFromAbsoluteIndex(unsigned tmpIndex)
+ {
+ return Tmp::tmpForInternalValue(tmpIndex);
+ }
+};
+
+template<>
+struct AbsoluteTmpMapper<Arg::FP> {
+ static unsigned absoluteIndex(const Tmp& tmp)
+ {
+ ASSERT(tmp.isFP());
+ ASSERT(static_cast<int>(tmp.internalValue()) < 0);
+ return -tmp.internalValue();
+ }
+
+ static unsigned absoluteIndex(unsigned tmpIndex)
+ {
+ return absoluteIndex(Tmp::fpTmpForIndex(tmpIndex));
+ }
+
+ static unsigned lastMachineRegisterIndex()
+ {
+ return absoluteIndex(Tmp(MacroAssembler::lastFPRegister()));
+ }
+
+ static Tmp tmpFromAbsoluteIndex(unsigned tmpIndex)
+ {
+ return Tmp::tmpForInternalValue(-tmpIndex);
+ }
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirTmpWidth.cpp b/Source/JavaScriptCore/b3/air/AirTmpWidth.cpp
new file mode 100644
index 000000000..f1173c022
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirTmpWidth.cpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirTmpWidth.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+TmpWidth::TmpWidth()
+{
+}
+
+TmpWidth::TmpWidth(Code& code)
+{
+ recompute(code);
+}
+
+TmpWidth::~TmpWidth()
+{
+}
+
+void TmpWidth::recompute(Code& code)
+{
+ // Set this to true to cause this analysis to always return pessimistic results.
+ const bool beCareful = false;
+
+ const bool verbose = false;
+
+ if (verbose) {
+ dataLog("Code before TmpWidth:\n");
+ dataLog(code);
+ }
+
+ m_width.clear();
+
+ auto assumeTheWorst = [&] (Tmp tmp) {
+ Widths& widths = m_width.add(tmp, Widths()).iterator->value;
+ Arg::Type type = Arg(tmp).type();
+ widths.use = Arg::conservativeWidth(type);
+ widths.def = Arg::conservativeWidth(type);
+ };
+
+ // Assume the worst for registers.
+ RegisterSet::allRegisters().forEach(
+ [&] (Reg reg) {
+ assumeTheWorst(Tmp(reg));
+ });
+
+ if (beCareful) {
+ code.forAllTmps(assumeTheWorst);
+
+ // We fall through because the fixpoint that follows can only make things even more
+ // conservative. This mode isn't meant to be fast, just safe.
+ }
+
+ // Now really analyze everything but Move's over Tmp's, but set aside those Move's so we can find
+ // them quickly during the fixpoint below. Note that we can make this analysis stronger by
+ // recognizing more kinds of Move's or anything that has Move-like behavior, though it's probably not
+ // worth it.
+ Vector<Inst*> moves;
+ for (BasicBlock* block : code) {
+ for (Inst& inst : *block) {
+ if (inst.kind.opcode == Move && inst.args[1].isTmp()) {
+ if (inst.args[0].isTmp()) {
+ // Make sure that both sides of the Move have a width already initialized. The
+ // fixpoint below assumes that it never has to add things to the HashMap.
+ m_width.add(inst.args[0].tmp(), Widths(Arg::GP));
+ m_width.add(inst.args[1].tmp(), Widths(Arg::GP));
+
+ moves.append(&inst);
+ continue;
+ }
+ if (inst.args[0].isImm()
+ && inst.args[0].value() >= 0) {
+ Tmp tmp = inst.args[1].tmp();
+ Widths& widths = m_width.add(tmp, Widths(Arg::GP)).iterator->value;
+
+ if (inst.args[0].value() <= std::numeric_limits<int8_t>::max())
+ widths.def = std::max(widths.def, Arg::Width8);
+ else if (inst.args[0].value() <= std::numeric_limits<int16_t>::max())
+ widths.def = std::max(widths.def, Arg::Width16);
+ else if (inst.args[0].value() <= std::numeric_limits<int32_t>::max())
+ widths.def = std::max(widths.def, Arg::Width32);
+ else
+ widths.def = std::max(widths.def, Arg::Width64);
+
+ continue;
+ }
+ }
+ inst.forEachTmp(
+ [&] (Tmp& tmp, Arg::Role role, Arg::Type type, Arg::Width width) {
+ Widths& widths = m_width.add(tmp, Widths(type)).iterator->value;
+
+ if (Arg::isAnyUse(role))
+ widths.use = std::max(widths.use, width);
+
+ if (Arg::isZDef(role))
+ widths.def = std::max(widths.def, width);
+ else if (Arg::isAnyDef(role))
+ widths.def = Arg::conservativeWidth(type);
+ });
+ }
+ }
+
+ // Finally, fixpoint over the Move's.
+ bool changed = true;
+ while (changed) {
+ changed = false;
+ for (Inst* move : moves) {
+ ASSERT(move->kind.opcode == Move);
+ ASSERT(move->args[0].isTmp());
+ ASSERT(move->args[1].isTmp());
+
+ // We already ensure that both tmps are added to the width map. That's important
+ // because you cannot add both tmps here while simultaneously getting a reference to
+ // their values, since the second add would invalidate the reference returned by the
+ // first one.
+ Widths& srcWidths = m_width.find(move->args[0].tmp())->value;
+ Widths& dstWidths = m_width.find(move->args[1].tmp())->value;
+
+ // Legend:
+ //
+ // Move %src, %dst
+
+ // defWidth(%dst) is a promise about how many high bits are zero. The smaller the width, the
+ // stronger the promise. This Move may weaken that promise if we know that %src is making a
+ // weaker promise. Such forward flow is the only thing that determines defWidth().
+ if (dstWidths.def < srcWidths.def) {
+ dstWidths.def = srcWidths.def;
+ changed = true;
+ }
+
+ // srcWidth(%src) is a promise about how many high bits are ignored. The smaller the width,
+ // the stronger the promise. This Move may weaken that promise if we know that %dst is making
+ // a weaker promise. Such backward flow is the only thing that determines srcWidth().
+ if (srcWidths.use < dstWidths.use) {
+ srcWidths.use = dstWidths.use;
+ changed = true;
+ }
+ }
+ }
+
+ if (verbose)
+ dataLog("width: ", mapDump(m_width), "\n");
+}
+
+void TmpWidth::Widths::dump(PrintStream& out) const
+{
+ out.print("{use = ", use, ", def = ", def, "}");
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirTmpWidth.h b/Source/JavaScriptCore/b3/air/AirTmpWidth.h
new file mode 100644
index 000000000..ea612b662
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirTmpWidth.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+class TmpWidth {
+public:
+ TmpWidth();
+ TmpWidth(Code&);
+ ~TmpWidth();
+
+ void recompute(Code&);
+
+ // The width of a Tmp is the number of bits that you need to be able to track without some trivial
+ // recovery. A Tmp may have a "subwidth" (say, Width32 on a 64-bit system) if either of the following
+ // is true:
+ //
+ // - The high bits are never read.
+ // - The high bits are always zero.
+ //
+ // This doesn't tell you which of those properties holds, but you can query that using the other
+ // methods.
+ Arg::Width width(Tmp tmp) const
+ {
+ auto iter = m_width.find(tmp);
+ if (iter == m_width.end())
+ return Arg::minimumWidth(Arg(tmp).type());
+ return std::min(iter->value.use, iter->value.def);
+ }
+
+ // Return the minimum required width for all defs/uses of this Tmp.
+ Arg::Width requiredWidth(Tmp tmp)
+ {
+ auto iter = m_width.find(tmp);
+ if (iter == m_width.end())
+ return Arg::minimumWidth(Arg(tmp).type());
+ return std::max(iter->value.use, iter->value.def);
+ }
+
+ // This indirectly tells you how much of the tmp's high bits are guaranteed to be zero. The number of
+ // high bits that are zero are:
+ //
+ // TotalBits - defWidth(tmp)
+ //
+ // Where TotalBits are the total number of bits in the register, so 64 on a 64-bit system.
+ Arg::Width defWidth(Tmp tmp) const
+ {
+ auto iter = m_width.find(tmp);
+ if (iter == m_width.end())
+ return Arg::minimumWidth(Arg(tmp).type());
+ return iter->value.def;
+ }
+
+ // This tells you how much of Tmp is going to be read.
+ Arg::Width useWidth(Tmp tmp) const
+ {
+ auto iter = m_width.find(tmp);
+ if (iter == m_width.end())
+ return Arg::minimumWidth(Arg(tmp).type());
+ return iter->value.use;
+ }
+
+private:
+ struct Widths {
+ Widths() { }
+
+ Widths(Arg::Type type)
+ {
+ use = Arg::minimumWidth(type);
+ def = Arg::minimumWidth(type);
+ }
+
+ void dump(PrintStream& out) const;
+
+ Arg::Width use;
+ Arg::Width def;
+ };
+
+ HashMap<Tmp, Widths> m_width;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirUseCounts.h b/Source/JavaScriptCore/b3/air/AirUseCounts.h
new file mode 100644
index 000000000..98a749321
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirUseCounts.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirBlockWorklist.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include <wtf/HashMap.h>
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Computes the number of uses of a variable based on frequency of execution. The frequency of blocks
+// that are only reachable by rare edges is scaled by Options::rareBlockPenalty().
+
+// Thing can be either Tmp or Arg.
+template<typename Thing>
+class UseCounts {
+public:
+ struct Counts {
+ void dump(PrintStream& out) const
+ {
+ out.print(
+ "{numWarmUses = ", numWarmUses, ", numColdUses = ", numColdUses, ", numDefs = ",
+ numDefs, "}");
+ }
+
+ double numWarmUses { 0 };
+ double numColdUses { 0 };
+ double numDefs { 0 };
+ double numConstDefs { 0 };
+ };
+
+ UseCounts(Code& code)
+ {
+ // Find non-rare blocks.
+ BlockWorklist fastWorklist;
+ fastWorklist.push(code[0]);
+ while (BasicBlock* block = fastWorklist.pop()) {
+ for (FrequentedBlock& successor : block->successors()) {
+ if (!successor.isRare())
+ fastWorklist.push(successor.block());
+ }
+ }
+
+ for (BasicBlock* block : code) {
+ double frequency = block->frequency();
+ if (!fastWorklist.saw(block))
+ frequency *= Options::rareBlockPenalty();
+ for (Inst& inst : *block) {
+ inst.forEach<Thing>(
+ [&] (Thing& arg, Arg::Role role, Arg::Type, Arg::Width) {
+ Counts& counts = m_counts.add(arg, Counts()).iterator->value;
+
+ if (Arg::isWarmUse(role))
+ counts.numWarmUses += frequency;
+ if (Arg::isColdUse(role))
+ counts.numColdUses += frequency;
+ if (Arg::isAnyDef(role))
+ counts.numDefs += frequency;
+ });
+
+ if ((inst.kind.opcode == Move || inst.kind.opcode == Move32)
+ && inst.args[0].isSomeImm()
+ && inst.args[1].is<Thing>())
+ m_counts.add(inst.args[1].as<Thing>(), Counts()).iterator->value.numConstDefs++;
+ }
+ }
+ }
+
+ const Counts* operator[](const Thing& arg) const
+ {
+ auto iter = m_counts.find(arg);
+ if (iter == m_counts.end())
+ return nullptr;
+ return &iter->value;
+ }
+
+ void dump(PrintStream& out) const
+ {
+ out.print(mapDump(m_counts));
+ }
+
+private:
+ HashMap<Thing, Counts> m_counts;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/AirValidate.cpp b/Source/JavaScriptCore/b3/air/AirValidate.cpp
new file mode 100644
index 000000000..d90de62eb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirValidate.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirValidate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+class Validater {
+public:
+ Validater(Code& code, const char* dumpBefore)
+ : m_code(code)
+ , m_dumpBefore(dumpBefore)
+ {
+ }
+
+#define VALIDATE(condition, message) do { \
+ if (condition) \
+ break; \
+ fail(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #condition, toCString message); \
+ } while (false)
+
+ void run()
+ {
+ HashSet<StackSlot*> validSlots;
+ HashSet<BasicBlock*> validBlocks;
+ HashSet<Special*> validSpecials;
+
+ for (BasicBlock* block : m_code)
+ validBlocks.add(block);
+ for (StackSlot* slot : m_code.stackSlots())
+ validSlots.add(slot);
+ for (Special* special : m_code.specials())
+ validSpecials.add(special);
+
+ for (BasicBlock* block : m_code) {
+ // Blocks that are entrypoints must not have predecessors.
+ if (m_code.isEntrypoint(block))
+ VALIDATE(!block->numPredecessors(), ("At entrypoint ", *block));
+
+ for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+ Inst& inst = block->at(instIndex);
+ for (Arg& arg : inst.args) {
+ switch (arg.kind()) {
+ case Arg::Stack:
+ VALIDATE(validSlots.contains(arg.stackSlot()), ("At ", inst, " in ", *block));
+ break;
+ case Arg::Special:
+ VALIDATE(validSpecials.contains(arg.special()), ("At ", inst, " in ", *block));
+ break;
+ default:
+ break;
+ }
+ }
+ VALIDATE(inst.isValidForm(), ("At ", inst, " in ", *block));
+ if (instIndex == block->size() - 1)
+ VALIDATE(inst.isTerminal(), ("At ", inst, " in ", *block));
+ else
+ VALIDATE(!inst.isTerminal(), ("At ", inst, " in ", *block));
+
+ // forEachArg must return Arg&'s that point into the args array.
+ inst.forEachArg(
+ [&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+ VALIDATE(&arg >= &inst.args[0], ("At ", arg, " in ", inst, " in ", *block));
+ VALIDATE(&arg <= &inst.args.last(), ("At ", arg, " in ", inst, " in ", *block));
+ });
+
+ switch (inst.kind.opcode) {
+ case EntrySwitch:
+ VALIDATE(block->numSuccessors() == m_code.proc().numEntrypoints(), ("At ", inst, " in ", *block));
+ break;
+ case Shuffle:
+ // We can't handle trapping shuffles because of how we lower them. That could
+ // be fixed though.
+ VALIDATE(!inst.kind.traps, ("At ", inst, " in ", *block));
+ break;
+ default:
+ break;
+ }
+ }
+ for (BasicBlock* successor : block->successorBlocks())
+ VALIDATE(validBlocks.contains(successor), ("In ", *block));
+ }
+ }
+
+private:
+ NO_RETURN_DUE_TO_CRASH void fail(
+ const char* filename, int lineNumber, const char* function, const char* condition,
+ CString message)
+ {
+ CString failureMessage;
+ {
+ StringPrintStream out;
+ out.print("AIR VALIDATION FAILURE\n");
+ out.print(" ", condition, " (", filename, ":", lineNumber, ")\n");
+ out.print(" ", message, "\n");
+ out.print(" After ", m_code.lastPhaseName(), "\n");
+ failureMessage = out.toCString();
+ }
+
+ dataLog(failureMessage);
+ if (m_dumpBefore) {
+ dataLog("Before ", m_code.lastPhaseName(), ":\n");
+ dataLog(m_dumpBefore);
+ }
+ dataLog("At time of failure:\n");
+ dataLog(m_code);
+
+ dataLog(failureMessage);
+ WTFReportAssertionFailure(filename, lineNumber, function, condition);
+ CRASH();
+ }
+
+ Code& m_code;
+ const char* m_dumpBefore;
+};
+
+} // anonymous namespace
+
+void validate(Code& code, const char* dumpBefore)
+{
+ Validater validater(code, dumpBefore);
+ validater.run();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/Source/JavaScriptCore/b3/air/AirValidate.h b/Source/JavaScriptCore/b3/air/AirValidate.h
new file mode 100644
index 000000000..472c76379
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirValidate.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+JS_EXPORT_PRIVATE void validate(Code&, const char* dumpBefore = nullptr);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/Source/JavaScriptCore/b3/air/opcode_generator.rb b/Source/JavaScriptCore/b3/air/opcode_generator.rb
new file mode 100644
index 000000000..d14240515
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/opcode_generator.rb
@@ -0,0 +1,1228 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "pathname"
+
+class Opcode
+ attr_reader :name, :custom, :overloads
+ attr_reader :attributes
+
+ def initialize(name, custom)
+ @name = name
+ @custom = custom
+ @attributes = {}
+ unless custom
+ @overloads = []
+ end
+ end
+
+ def masmName
+ name[0].downcase + name[1..-1]
+ end
+end
+
+class Arg
+ attr_reader :role, :type, :width
+
+ def initialize(role, type, width)
+ @role = role
+ @type = type
+ @width = width
+ end
+
+ def widthCode
+ if width == "Ptr"
+ "Arg::pointerWidth()"
+ else
+ "Arg::Width#{width}"
+ end
+ end
+end
+
+class Overload
+ attr_reader :signature, :forms
+
+ def initialize(signature, forms)
+ @signature = signature
+ @forms = forms
+ end
+end
+
+class Kind
+ attr_reader :name
+ attr_accessor :custom
+
+ def initialize(name)
+ @name = name
+ @custom = false
+ end
+
+ def ==(other)
+ if other.is_a? String
+ @name == other
+ else
+ @name == other.name and @custom == other.custom
+ end
+ end
+
+ def Kind.argKinds(kind)
+ if kind == "Addr"
+ ["Addr", "Stack", "CallArg"]
+ else
+ [kind]
+ end
+ end
+
+ def argKinds
+ Kind.argKinds(kind)
+ end
+end
+
+class Form
+ attr_reader :kinds, :altName, :archs
+
+ def initialize(kinds, altName, archs)
+ @kinds = kinds
+ @altName = altName
+ @archs = archs
+ end
+end
+
+class Origin
+ attr_reader :fileName, :lineNumber
+
+ def initialize(fileName, lineNumber)
+ @fileName = fileName
+ @lineNumber = lineNumber
+ end
+
+ def to_s
+ "#{fileName}:#{lineNumber}"
+ end
+end
+
+class Token
+ attr_reader :origin, :string
+
+ def initialize(origin, string)
+ @origin = origin
+ @string = string
+ end
+
+ def ==(other)
+ if other.is_a? Token
+ @string == other.string
+ else
+ @string == other
+ end
+ end
+
+ def =~(other)
+ @string =~ other
+ end
+
+ def to_s
+ "#{@string.inspect} at #{origin}"
+ end
+
+ def parseError(*comment)
+ if comment.empty?
+ raise "Parse error: #{to_s}"
+ else
+ raise "Parse error: #{to_s}: #{comment[0]}"
+ end
+ end
+end
+
+def lex(str, fileName)
+ fileName = Pathname.new(fileName)
+ result = []
+ lineNumber = 1
+ while not str.empty?
+ case str
+ when /\A\#([^\n]*)/
+ # comment, ignore
+ when /\A\n/
+ # newline, ignore
+ lineNumber += 1
+ when /\A([a-zA-Z0-9_]+)/
+ result << Token.new(Origin.new(fileName, lineNumber), $&)
+ when /\A([ \t\r]+)/
+ # whitespace, ignore
+ when /\A[,:*\/]/
+ result << Token.new(Origin.new(fileName, lineNumber), $&)
+ else
+ raise "Lexer error at #{Origin.new(fileName, lineNumber).to_s}, unexpected sequence #{str[0..20].inspect}"
+ end
+ str = $~.post_match
+ end
+ result
+end
+
+def isRole(token)
+ token =~ /\A((U)|(D)|(UD)|(ZD)|(UZD)|(UA)|(S))\Z/
+end
+
+def isGF(token)
+ token =~ /\A((G)|(F))\Z/
+end
+
+def isKind(token)
+ token =~ /\A((Tmp)|(Imm)|(BigImm)|(BitImm)|(BitImm64)|(Addr)|(Index)|(RelCond)|(ResCond)|(DoubleCond))\Z/
+end
+
+def isArch(token)
+ token =~ /\A((x86)|(x86_32)|(x86_64)|(arm)|(armv7)|(arm64)|(32)|(64))\Z/
+end
+
+def isWidth(token)
+ token =~ /\A((8)|(16)|(32)|(64)|(Ptr))\Z/
+end
+
+def isKeyword(token)
+ isRole(token) or isGF(token) or isKind(token) or isArch(token) or isWidth(token) or
+ token == "custom" or token == "as"
+end
+
+def isIdentifier(token)
+ token =~ /\A([a-zA-Z0-9_]+)\Z/ and not isKeyword(token)
+end
+
+class Parser
+ def initialize(data, fileName)
+ @tokens = lex(data, fileName)
+ @idx = 0
+ end
+
+ def token
+ @tokens[@idx]
+ end
+
+ def advance
+ @idx += 1
+ end
+
+ def parseError(*comment)
+ if token
+ token.parseError(*comment)
+ else
+ if comment.empty?
+ raise "Parse error at end of file"
+ else
+ raise "Parse error at end of file: #{comment[0]}"
+ end
+ end
+ end
+
+ def consume(string)
+ parseError("Expected #{string}") unless token == string
+ advance
+ end
+
+ def consumeIdentifier
+ result = token.string
+ parseError("Expected identifier") unless isIdentifier(result)
+ advance
+ result
+ end
+
+ def consumeRole
+ result = token.string
+ parseError("Expected role (U, D, UD, ZD, UZD, UA, or S)") unless isRole(result)
+ advance
+ result
+ end
+
+ def consumeType
+ result = token.string
+ parseError("Expected type (G or F)") unless isGF(result)
+ advance
+ result
+ end
+
+ def consumeKind
+ result = token.string
+ parseError("Expected kind (Imm, BigImm, BitImm, BitImm64, Tmp, Addr, Index, RelCond, ResCond, or DoubleCond)") unless isKind(result)
+ advance
+ result
+ end
+
+ def consumeWidth
+ result = token.string
+ parseError("Expected width (8, 16, 32, or 64)") unless isWidth(result)
+ advance
+ result
+ end
+
+ def parseArchs
+ return nil unless isArch(token)
+
+ result = []
+ while isArch(token)
+ case token.string
+ when "x86"
+ result << "X86"
+ result << "X86_64"
+ when "x86_32"
+ result << "X86"
+ when "x86_64"
+ result << "X86_64"
+ when "arm"
+ result << "ARMv7"
+ result << "ARM64"
+ when "armv7"
+ result << "ARMv7"
+ when "arm64"
+ result << "ARM64"
+ when "32"
+ result << "X86"
+ result << "ARMv7"
+ when "64"
+ result << "X86_64"
+ result << "ARM64"
+ else
+ raise token.string
+ end
+ advance
+ end
+
+ consume(":")
+ @lastArchs = result
+ end
+
+ def consumeArchs
+ result = @lastArchs
+ @lastArchs = nil
+ result
+ end
+
+ def parseAndConsumeArchs
+ parseArchs
+ consumeArchs
+ end
+
+ def intersectArchs(left, right)
+ return left unless right
+ return right unless left
+
+ left.select {
+ | value |
+ right.find {
+ | otherValue |
+ value == otherValue
+ }
+ }
+ end
+
+ def parse
+ result = {}
+
+ loop {
+ break if @idx >= @tokens.length
+
+ if token == "custom"
+ consume("custom")
+ opcodeName = consumeIdentifier
+
+ parseError("Cannot overload a custom opcode") if result[opcodeName]
+
+ result[opcodeName] = Opcode.new(opcodeName, true)
+ else
+ opcodeArchs = parseAndConsumeArchs
+
+ opcodeName = consumeIdentifier
+
+ if result[opcodeName]
+ opcode = result[opcodeName]
+ parseError("Cannot overload a custom opcode") if opcode.custom
+ else
+ opcode = Opcode.new(opcodeName, false)
+ result[opcodeName] = opcode
+ end
+
+ signature = []
+ forms = []
+
+ if isRole(token)
+ loop {
+ role = consumeRole
+ consume(":")
+ type = consumeType
+ consume(":")
+ width = consumeWidth
+
+ signature << Arg.new(role, type, width)
+
+ break unless token == ","
+ consume(",")
+ }
+ end
+
+ while token == "/"
+ consume("/")
+ case token.string
+ when "branch"
+ opcode.attributes[:branch] = true
+ opcode.attributes[:terminal] = true
+ when "terminal"
+ opcode.attributes[:terminal] = true
+ when "effects"
+ opcode.attributes[:effects] = true
+ when "return"
+ opcode.attributes[:return] = true
+ opcode.attributes[:terminal] = true
+ else
+ parseError("Bad / directive")
+ end
+ advance
+ end
+
+ parseArchs
+ if isKind(token)
+ loop {
+ kinds = []
+ altName = nil
+ formArchs = consumeArchs
+ loop {
+ kinds << Kind.new(consumeKind)
+
+ if token == "*"
+ parseError("Can only apply * to Tmp") unless kinds[-1].name == "Tmp"
+ kinds[-1].custom = true
+ consume("*")
+ end
+
+ break unless token == ","
+ consume(",")
+ }
+
+ if token == "as"
+ consume("as")
+ altName = consumeIdentifier
+ end
+
+ parseError("Form has wrong number of arguments for overload") unless kinds.length == signature.length
+ kinds.each_with_index {
+ | kind, index |
+ if kind.name == "Imm" or kind.name == "BigImm" or kind.name == "BitImm" or kind.name == "BitImm64"
+ if signature[index].role != "U"
+ parseError("Form has an immediate for a non-use argument")
+ end
+ if signature[index].type != "G"
+ parseError("Form has an immediate for a non-general-purpose argument")
+ end
+ end
+ }
+ forms << Form.new(kinds, altName, intersectArchs(opcodeArchs, formArchs))
+
+ parseArchs
+ break unless isKind(token)
+ }
+ end
+
+ if signature.length == 0
+ raise unless forms.length == 0
+ forms << Form.new([], nil, opcodeArchs)
+ end
+
+ opcode.overloads << Overload.new(signature, forms)
+ end
+ }
+
+ result
+ end
+end
+
+$fileName = ARGV[0]
+
+parser = Parser.new(IO::read($fileName), $fileName)
+$opcodes = parser.parse
+
+def writeH(filename)
+ File.open("Air#{filename}.h", "w") {
+ | outp |
+
+ outp.puts "// Generated by opcode_generator.rb from #{$fileName} -- do not edit!"
+
+ outp.puts "#ifndef Air#{filename}_h"
+ outp.puts "#define Air#{filename}_h"
+
+ yield outp
+
+ outp.puts "#endif // Air#{filename}_h"
+ }
+end
+
+writeH("Opcode") {
+ | outp |
+ outp.puts "namespace JSC { namespace B3 { namespace Air {"
+ outp.puts "enum Opcode : int16_t {"
+ $opcodes.keys.sort.each {
+ | opcode |
+ outp.puts " #{opcode},"
+ }
+ outp.puts "};"
+
+ outp.puts "static const unsigned numOpcodes = #{$opcodes.keys.size};"
+ outp.puts "} } } // namespace JSC::B3::Air"
+
+ outp.puts "namespace WTF {"
+ outp.puts "class PrintStream;"
+ outp.puts "JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Opcode);"
+ outp.puts "} // namespace WTF"
+}
+
+# From here on, we don't try to emit properly indented code, since we're using a recursive pattern
+# matcher.
+
+def matchForms(outp, speed, forms, columnIndex, columnGetter, filter, callback)
+ return if forms.length == 0
+
+ if filter[forms]
+ return
+ end
+
+ if columnIndex >= forms[0].kinds.length
+ raise "Did not reduce to one form: #{forms.inspect}" unless forms.length == 1
+ callback[forms[0]]
+ outp.puts "break;"
+ return
+ end
+
+ groups = {}
+ forms.each {
+ | form |
+ kind = form.kinds[columnIndex].name
+ if groups[kind]
+ groups[kind] << form
+ else
+ groups[kind] = [form]
+ end
+ }
+
+ if speed == :fast and groups.length == 1
+ matchForms(outp, speed, forms, columnIndex + 1, columnGetter, filter, callback)
+ return
+ end
+
+ outp.puts "switch (#{columnGetter[columnIndex]}) {"
+ groups.each_pair {
+ | key, value |
+ outp.puts "#if USE(JSVALUE64)" if key == "BigImm" or key == "BitImm64"
+ Kind.argKinds(key).each {
+ | argKind |
+ outp.puts "case Arg::#{argKind}:"
+ }
+ matchForms(outp, speed, value, columnIndex + 1, columnGetter, filter, callback)
+ outp.puts "break;"
+ outp.puts "#endif // USE(JSVALUE64)" if key == "BigImm" or key == "BitImm64"
+ }
+ outp.puts "default:"
+ outp.puts "break;"
+ outp.puts "}"
+end
+
+def matchInstOverload(outp, speed, inst)
+ outp.puts "switch (#{inst}->kind.opcode) {"
+ $opcodes.values.each {
+ | opcode |
+ outp.puts "case #{opcode.name}:"
+ if opcode.custom
+ yield opcode, nil
+ else
+ needOverloadSwitch = ((opcode.overloads.size != 1) or speed == :safe)
+ outp.puts "switch (#{inst}->args.size()) {" if needOverloadSwitch
+ opcode.overloads.each {
+ | overload |
+ outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+ yield opcode, overload
+ outp.puts "break;" if needOverloadSwitch
+ }
+ if needOverloadSwitch
+ outp.puts "default:"
+ outp.puts "break;"
+ outp.puts "}"
+ end
+ end
+ outp.puts "break;"
+ }
+ outp.puts "default:"
+ outp.puts "break;"
+ outp.puts "}"
+end
+
+def matchInstOverloadForm(outp, speed, inst)
+ matchInstOverload(outp, speed, inst) {
+ | opcode, overload |
+ if opcode.custom
+ yield opcode, nil, nil
+ else
+ columnGetter = proc {
+ | columnIndex |
+ "#{inst}->args[#{columnIndex}].kind()"
+ }
+ filter = proc { false }
+ callback = proc {
+ | form |
+ yield opcode, overload, form
+ }
+ matchForms(outp, speed, overload.forms, 0, columnGetter, filter, callback)
+ end
+ }
+end
+
+def beginArchs(outp, archs)
+ return unless archs
+ if archs.empty?
+ outp.puts "#if 0"
+ return
+ end
+ outp.puts("#if " + archs.map {
+ | arch |
+ "CPU(#{arch})"
+ }.join(" || "))
+end
+
+def endArchs(outp, archs)
+ return unless archs
+ outp.puts "#endif"
+end
+
+writeH("OpcodeUtils") {
+ | outp |
+ outp.puts "#include \"AirCustom.h\""
+ outp.puts "#include \"AirInst.h\""
+ outp.puts "namespace JSC { namespace B3 { namespace Air {"
+
+ outp.puts "inline bool opgenHiddenTruth() { return true; }"
+ outp.puts "template<typename T>"
+ outp.puts "inline T* opgenHiddenPtrIdentity(T* pointer) { return pointer; }"
+ outp.puts "#define OPGEN_RETURN(value) do {\\"
+ outp.puts " if (opgenHiddenTruth())\\"
+ outp.puts " return value;\\"
+ outp.puts "} while (false)"
+
+ outp.puts "template<typename Functor>"
+ outp.puts "void Inst::forEachArg(const Functor& functor)"
+ outp.puts "{"
+ matchInstOverload(outp, :fast, "this") {
+ | opcode, overload |
+ if opcode.custom
+ outp.puts "#{opcode.name}Custom::forEachArg(*this, functor);"
+ else
+ overload.signature.each_with_index {
+ | arg, index |
+
+ role = nil
+ case arg.role
+ when "U"
+ role = "Use"
+ when "D"
+ role = "Def"
+ when "ZD"
+ role = "ZDef"
+ when "UD"
+ role = "UseDef"
+ when "UZD"
+ role = "UseZDef"
+ when "UA"
+ role = "UseAddr"
+ when "S"
+ role = "Scratch"
+ else
+ raise
+ end
+
+ outp.puts "functor(args[#{index}], Arg::#{role}, Arg::#{arg.type}P, #{arg.widthCode});"
+ }
+ end
+ }
+ outp.puts "}"
+
+ outp.puts "template<typename... Arguments>"
+ outp.puts "ALWAYS_INLINE bool isValidForm(Opcode opcode, Arguments... arguments)"
+ outp.puts "{"
+ outp.puts "Arg::Kind kinds[sizeof...(Arguments)] = { arguments... };"
+ outp.puts "switch (opcode) {"
+ $opcodes.values.each {
+ | opcode |
+ outp.puts "case #{opcode.name}:"
+ if opcode.custom
+ outp.puts "OPGEN_RETURN(#{opcode.name}Custom::isValidFormStatic(arguments...));"
+ else
+ outp.puts "switch (sizeof...(Arguments)) {"
+ opcode.overloads.each {
+ | overload |
+ outp.puts "case #{overload.signature.length}:"
+ columnGetter = proc { | columnIndex | "opgenHiddenPtrIdentity(kinds)[#{columnIndex}]" }
+ filter = proc { false }
+ callback = proc {
+ | form |
+ # This conservatively says that Stack is not a valid form for UseAddr,
+ # because it's only valid if it's not a spill slot. This is consistent with
+ # isValidForm() being conservative and it also happens to be practical since
+ # we don't really use isValidForm for deciding when Stack is safe.
+ overload.signature.length.times {
+ | index |
+ if overload.signature[index].role == "UA"
+ outp.puts "if (opgenHiddenPtrIdentity(kinds)[#{index}] == Arg::Stack)"
+ outp.puts " return false;"
+ end
+ }
+
+ notCustom = (not form.kinds.detect { | kind | kind.custom })
+ if notCustom
+ beginArchs(outp, form.archs)
+ outp.puts "OPGEN_RETURN(true);"
+ endArchs(outp, form.archs)
+ end
+ }
+ matchForms(outp, :safe, overload.forms, 0, columnGetter, filter, callback)
+ outp.puts "break;"
+ }
+ outp.puts "default:"
+ outp.puts "break;"
+ outp.puts "}"
+ end
+ outp.puts "break;"
+ }
+ outp.puts "default:"
+ outp.puts "break;"
+ outp.puts "}"
+ outp.puts "return false; "
+ outp.puts "}"
+
+ outp.puts "inline bool isDefinitelyTerminal(Opcode opcode)"
+ outp.puts "{"
+ outp.puts "switch (opcode) {"
+ didFindTerminals = false
+ $opcodes.values.each {
+ | opcode |
+ if opcode.attributes[:terminal]
+ outp.puts "case #{opcode.name}:"
+ didFindTerminals = true
+ end
+ }
+ if didFindTerminals
+ outp.puts "return true;"
+ end
+ outp.puts "default:"
+ outp.puts "return false;"
+ outp.puts "}"
+ outp.puts "}"
+
+ outp.puts "inline bool isReturn(Opcode opcode)"
+ outp.puts "{"
+ outp.puts "switch (opcode) {"
+ didFindReturns = false
+ $opcodes.values.each {
+ | opcode |
+ if opcode.attributes[:return]
+ outp.puts "case #{opcode.name}:"
+ didFindReturns = true
+ end
+ }
+ if didFindReturns
+ outp.puts "return true;"
+ end
+ outp.puts "default:"
+ outp.puts "return false;"
+ outp.puts "}"
+ outp.puts "}"
+
+ outp.puts "} } } // namespace JSC::B3::Air"
+}
+
+writeH("OpcodeGenerated") {
+ | outp |
+ outp.puts "#include \"AirInstInlines.h\""
+ outp.puts "#include \"wtf/PrintStream.h\""
+ outp.puts "namespace WTF {"
+ outp.puts "using namespace JSC::B3::Air;"
+ outp.puts "void printInternal(PrintStream& out, Opcode opcode)"
+ outp.puts "{"
+ outp.puts " switch (opcode) {"
+ $opcodes.keys.each {
+ | opcode |
+ outp.puts " case #{opcode}:"
+ outp.puts " out.print(\"#{opcode}\");"
+ outp.puts " return;"
+ }
+ outp.puts " }"
+ outp.puts " RELEASE_ASSERT_NOT_REACHED();"
+ outp.puts "}"
+ outp.puts "} // namespace WTF"
+ outp.puts "namespace JSC { namespace B3 { namespace Air {"
+ outp.puts "bool Inst::isValidForm()"
+ outp.puts "{"
+ matchInstOverloadForm(outp, :safe, "this") {
+ | opcode, overload, form |
+ if opcode.custom
+ outp.puts "OPGEN_RETURN(#{opcode.name}Custom::isValidForm(*this));"
+ else
+ beginArchs(outp, form.archs)
+ needsMoreValidation = false
+ overload.signature.length.times {
+ | index |
+ arg = overload.signature[index]
+ kind = form.kinds[index]
+ needsMoreValidation |= kind.custom
+
+ # Some kinds of Args reqire additional validation.
+ case kind.name
+ when "Tmp"
+ outp.puts "if (!args[#{index}].tmp().is#{arg.type}P())"
+ outp.puts "OPGEN_RETURN(false);"
+ when "Imm"
+ outp.puts "if (!Arg::isValidImmForm(args[#{index}].value()))"
+ outp.puts "OPGEN_RETURN(false);"
+ when "BitImm"
+ outp.puts "if (!Arg::isValidBitImmForm(args[#{index}].value()))"
+ outp.puts "OPGEN_RETURN(false);"
+ when "BitImm64"
+ outp.puts "if (!Arg::isValidBitImm64Form(args[#{index}].value()))"
+ outp.puts "OPGEN_RETURN(false);"
+ when "Addr"
+ if arg.role == "UA"
+ outp.puts "if (args[#{index}].isStack() && args[#{index}].stackSlot()->isSpill())"
+ outp.puts "OPGEN_RETURN(false);"
+ end
+
+ outp.puts "if (!Arg::isValidAddrForm(args[#{index}].offset()))"
+ outp.puts "OPGEN_RETURN(false);"
+ when "Index"
+ outp.puts "if (!Arg::isValidIndexForm(args[#{index}].scale(), args[#{index}].offset(), #{arg.widthCode}))"
+ outp.puts "OPGEN_RETURN(false);"
+ when "BigImm"
+ when "RelCond"
+ when "ResCond"
+ when "DoubleCond"
+ else
+ raise "Unexpected kind: #{kind.name}"
+ end
+ }
+ if needsMoreValidation
+ outp.puts "if (!is#{opcode.name}Valid(*this))"
+ outp.puts "OPGEN_RETURN(false);"
+ end
+ outp.puts "OPGEN_RETURN(true);"
+ endArchs(outp, form.archs)
+ end
+ }
+ outp.puts "return false;"
+ outp.puts "}"
+
+ outp.puts "bool Inst::admitsStack(unsigned argIndex)"
+ outp.puts "{"
+ outp.puts "switch (kind.opcode) {"
+ $opcodes.values.each {
+ | opcode |
+ outp.puts "case #{opcode.name}:"
+
+ if opcode.custom
+ outp.puts "OPGEN_RETURN(#{opcode.name}Custom::admitsStack(*this, argIndex));"
+ else
+ # Switch on the argIndex.
+ outp.puts "switch (argIndex) {"
+
+ numArgs = opcode.overloads.map {
+ | overload |
+ overload.signature.length
+ }.max
+
+ numArgs.times {
+ | argIndex |
+ outp.puts "case #{argIndex}:"
+
+ # Check if all of the forms of all of the overloads either do, or don't, admit an address
+ # at this index. We expect this to be a very common case.
+ numYes = 0
+ numNo = 0
+ opcode.overloads.each {
+ | overload |
+ useAddr = (overload.signature[argIndex] and
+ overload.signature[argIndex].role == "UA")
+ overload.forms.each {
+ | form |
+ if form.kinds[argIndex] == "Addr" and not useAddr
+ numYes += 1
+ else
+ numNo += 1
+ end
+ }
+ }
+
+ # Note that we deliberately test numYes first because if we end up with no forms, we want
+ # to say that Address is inadmissible.
+ if numYes == 0
+ outp.puts "OPGEN_RETURN(false);"
+ elsif numNo == 0
+ outp.puts "OPGEN_RETURN(true);"
+ else
+ # Now do the full test.
+
+ needOverloadSwitch = (opcode.overloads.size != 1)
+
+ outp.puts "switch (args.size()) {" if needOverloadSwitch
+ opcode.overloads.each {
+ | overload |
+
+ useAddr = (overload.signature[argIndex] and
+ overload.signature[argIndex].role == "UA")
+
+ # Again, check if all of them do what we want.
+ numYes = 0
+ numNo = 0
+ overload.forms.each {
+ | form |
+ if form.kinds[argIndex] == "Addr" and not useAddr
+ numYes += 1
+ else
+ numNo += 1
+ end
+ }
+
+ if numYes == 0
+ # Don't emit anything, just drop to default.
+ elsif numNo == 0
+ outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+ outp.puts "OPGEN_RETURN(true);"
+ outp.puts "break;" if needOverloadSwitch
+ else
+ outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+
+ # This is how we test the hypothesis that changing this argument to an
+ # address yields a valid form.
+ columnGetter = proc {
+ | columnIndex |
+ if columnIndex == argIndex
+ "Arg::Addr"
+ else
+ "args[#{columnIndex}].kind()"
+ end
+ }
+ filter = proc {
+ | forms |
+ numYes = 0
+
+ forms.each {
+ | form |
+ if form.kinds[argIndex] == "Addr"
+ numYes += 1
+ end
+ }
+
+ if numYes == 0
+ # Drop down, emit no code, since we cannot match.
+ true
+ else
+ # Keep going.
+ false
+ end
+ }
+ callback = proc {
+ | form |
+ beginArchs(outp, form.archs)
+ outp.puts "OPGEN_RETURN(true);"
+ endArchs(outp, form.archs)
+ }
+ matchForms(outp, :safe, overload.forms, 0, columnGetter, filter, callback)
+
+ outp.puts "break;" if needOverloadSwitch
+ end
+ }
+ if needOverloadSwitch
+ outp.puts "default:"
+ outp.puts "break;"
+ outp.puts "}"
+ end
+ end
+
+ outp.puts "break;"
+ }
+
+ outp.puts "default:"
+ outp.puts "break;"
+ outp.puts "}"
+ end
+
+ outp.puts "break;"
+ }
+ outp.puts "default:";
+ outp.puts "break;"
+ outp.puts "}"
+ outp.puts "return false;"
+ outp.puts "}"
+
+ outp.puts "bool Inst::isTerminal()"
+ outp.puts "{"
+ outp.puts "switch (kind.opcode) {"
+ foundTrue = false
+ $opcodes.values.each {
+ | opcode |
+ if opcode.attributes[:terminal]
+ outp.puts "case #{opcode.name}:"
+ foundTrue = true
+ end
+ }
+ if foundTrue
+ outp.puts "return true;"
+ end
+ $opcodes.values.each {
+ | opcode |
+ if opcode.custom
+ outp.puts "case #{opcode.name}:"
+ outp.puts "return #{opcode.name}Custom::isTerminal(*this);"
+ end
+ }
+ outp.puts "default:"
+ outp.puts "return false;"
+ outp.puts "}"
+ outp.puts "}"
+
+ outp.puts "bool Inst::hasNonArgNonControlEffects()"
+ outp.puts "{"
+ outp.puts "if (kind.traps)"
+ outp.puts "return true;"
+ outp.puts "switch (kind.opcode) {"
+ foundTrue = false
+ $opcodes.values.each {
+ | opcode |
+ if opcode.attributes[:effects]
+ outp.puts "case #{opcode.name}:"
+ foundTrue = true
+ end
+ }
+ if foundTrue
+ outp.puts "return true;"
+ end
+ $opcodes.values.each {
+ | opcode |
+ if opcode.custom
+ outp.puts "case #{opcode.name}:"
+ outp.puts "return #{opcode.name}Custom::hasNonArgNonControlEffects(*this);"
+ end
+ }
+ outp.puts "default:"
+ outp.puts "return false;"
+ outp.puts "}"
+ outp.puts "}"
+
+ outp.puts "bool Inst::hasNonArgEffects()"
+ outp.puts "{"
+ outp.puts "if (kind.traps)"
+ outp.puts "return true;"
+ outp.puts "switch (kind.opcode) {"
+ foundTrue = false
+ $opcodes.values.each {
+ | opcode |
+ if opcode.attributes[:terminal] or opcode.attributes[:effects]
+ outp.puts "case #{opcode.name}:"
+ foundTrue = true
+ end
+ }
+ if foundTrue
+ outp.puts "return true;"
+ end
+ $opcodes.values.each {
+ | opcode |
+ if opcode.custom
+ outp.puts "case #{opcode.name}:"
+ outp.puts "return #{opcode.name}Custom::hasNonArgEffects(*this);"
+ end
+ }
+ outp.puts "default:"
+ outp.puts "return false;"
+ outp.puts "}"
+ outp.puts "}"
+
+ outp.puts "CCallHelpers::Jump Inst::generate(CCallHelpers& jit, GenerationContext& context)"
+ outp.puts "{"
+ outp.puts "UNUSED_PARAM(jit);"
+ outp.puts "UNUSED_PARAM(context);"
+ outp.puts "CCallHelpers::Jump result;"
+ matchInstOverloadForm(outp, :fast, "this") {
+ | opcode, overload, form |
+ if opcode.custom
+ outp.puts "OPGEN_RETURN(#{opcode.name}Custom::generate(*this, jit, context));"
+ else
+ beginArchs(outp, form.archs)
+ if form.altName
+ methodName = form.altName
+ else
+ methodName = opcode.masmName
+ end
+ if opcode.attributes[:branch]
+ outp.print "result = "
+ end
+ outp.print "jit.#{methodName}("
+
+ form.kinds.each_with_index {
+ | kind, index |
+ if index != 0
+ outp.print ", "
+ end
+ case kind.name
+ when "Tmp"
+ if overload.signature[index].type == "G"
+ outp.print "args[#{index}].gpr()"
+ else
+ outp.print "args[#{index}].fpr()"
+ end
+ when "Imm", "BitImm"
+ outp.print "args[#{index}].asTrustedImm32()"
+ when "BigImm", "BitImm64"
+ outp.print "args[#{index}].asTrustedImm64()"
+ when "Addr"
+ outp.print "args[#{index}].asAddress()"
+ when "Index"
+ outp.print "args[#{index}].asBaseIndex()"
+ when "RelCond"
+ outp.print "args[#{index}].asRelationalCondition()"
+ when "ResCond"
+ outp.print "args[#{index}].asResultCondition()"
+ when "DoubleCond"
+ outp.print "args[#{index}].asDoubleCondition()"
+ end
+ }
+
+ outp.puts ");"
+ outp.puts "OPGEN_RETURN(result);"
+ endArchs(outp, form.archs)
+ end
+ }
+ outp.puts "RELEASE_ASSERT_NOT_REACHED();"
+ outp.puts "return result;"
+ outp.puts "}"
+
+ outp.puts "} } } // namespace JSC::B3::Air"
+}
+
+# This is a hack for JSAir. It's a joke.
+File.open("JSAir_opcode.js", "w") {
+ | outp |
+ outp.puts "\"use strict\";"
+ outp.puts "// Generated by opcode_generator.rb from #{$fileName} -- do not edit!"
+
+ $opcodes.values.each {
+ | opcode |
+ outp.puts "const #{opcode.name} = Symbol(#{opcode.name.inspect});"
+ }
+
+ outp.puts "function Inst_forEachArg(inst, func)"
+ outp.puts "{"
+ outp.puts "let replacement;"
+ outp.puts "switch (inst.opcode) {"
+ $opcodes.values.each {
+ | opcode |
+ outp.puts "case #{opcode.name}:"
+ if opcode.custom
+ outp.puts "#{opcode.name}Custom.forEachArg(inst, func);"
+ else
+ needOverloadSwitch = opcode.overloads.size != 1
+ outp.puts "switch (inst.args.length) {" if needOverloadSwitch
+ opcode.overloads.each {
+ | overload |
+ outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+ overload.signature.each_with_index {
+ | arg, index |
+ role = nil
+ case arg.role
+ when "U"
+ role = "Use"
+ when "D"
+ role = "Def"
+ when "ZD"
+ role = "ZDef"
+ when "UD"
+ role = "UseDef"
+ when "UZD"
+ role = "UseZDef"
+ when "UA"
+ role = "UseAddr"
+ when "S"
+ role = "Scratch"
+ else
+ raise
+ end
+
+ outp.puts "inst.visitArg(#{index}, func, Arg.#{role}, #{arg.type}P, #{arg.width});"
+ }
+ outp.puts "break;"
+ }
+ if needOverloadSwitch
+ outp.puts "default:"
+ outp.puts "throw new Error(\"Bad overload\");"
+ outp.puts "break;"
+ outp.puts "}"
+ end
+ end
+ outp.puts "break;"
+ }
+ outp.puts "default:"
+ outp.puts "throw \"Bad opcode\";"
+ outp.puts "}"
+ outp.puts "}"
+
+ outp.puts "function Inst_hasNonArgEffects(inst)"
+ outp.puts "{"
+ outp.puts "switch (inst.opcode) {"
+ foundTrue = false
+ $opcodes.values.each {
+ | opcode |
+ if opcode.attributes[:terminal] or opcode.attributes[:effects]
+ outp.puts "case #{opcode.name}:"
+ foundTrue = true
+ end
+ }
+ if foundTrue
+ outp.puts "return true;"
+ end
+ $opcodes.values.each {
+ | opcode |
+ if opcode.custom
+ outp.puts "case #{opcode.name}:"
+ outp.puts "return #{opcode.name}Custom.hasNonArgNonControlEffects(inst);"
+ end
+ }
+ outp.puts "default:"
+ outp.puts "return false;"
+ outp.puts "}"
+ outp.puts "}"
+
+ outp.puts "function opcodeCode(opcode)"
+ outp.puts "{"
+ outp.puts "switch (opcode) {"
+ $opcodes.keys.sort.each_with_index {
+ | opcode, index |
+ outp.puts "case #{opcode}:"
+ outp.puts "return #{index}"
+ }
+ outp.puts "default:"
+ outp.puts "throw new Error(\"bad opcode\");"
+ outp.puts "}"
+ outp.puts "}"
+}
+
diff --git a/Source/JavaScriptCore/b3/air/testair.cpp b/Source/JavaScriptCore/b3/air/testair.cpp
new file mode 100644
index 000000000..9f8a8d83e
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/testair.cpp
@@ -0,0 +1,1964 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "AirCode.h"
+#include "AirGenerate.h"
+#include "AirInstInlines.h"
+#include "AllowMacroScratchRegisterUsage.h"
+#include "B3Compilation.h"
+#include "B3Procedure.h"
+#include "CCallHelpers.h"
+#include "InitializeThreading.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "PureNaN.h"
+#include "VM.h"
+#include <cmath>
+#include <map>
+#include <string>
+#include <wtf/Lock.h>
+#include <wtf/NumberOfCores.h>
+#include <wtf/Threading.h>
+
+// We don't have a NO_RETURN_DUE_TO_EXIT, nor should we. That's ridiculous.
+static bool hiddenTruthBecauseNoReturnIsStupid() { return true; }
+
+static void usage()
+{
+ dataLog("Usage: testb3 [<filter>]\n");
+ if (hiddenTruthBecauseNoReturnIsStupid())
+ exit(1);
+}
+
+#if ENABLE(B3_JIT)
+
+using namespace JSC;
+using namespace JSC::B3::Air;
+
+namespace {
+
+StaticLock crashLock;
+
+// Nothing fancy for now; we just use the existing WTF assertion machinery.
+#define CHECK(x) do { \
+ if (!!(x)) \
+ break; \
+ crashLock.lock(); \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #x); \
+ CRASH(); \
+ } while (false)
+
+VM* vm;
+
+std::unique_ptr<B3::Compilation> compile(B3::Procedure& proc)
+{
+ prepareForGeneration(proc.code());
+ CCallHelpers jit(vm);
+ generate(proc.code(), jit);
+ LinkBuffer linkBuffer(*vm, jit, nullptr);
+
+ return std::make_unique<B3::Compilation>(
+ FINALIZE_CODE(linkBuffer, ("testair compilation")), proc.releaseByproducts());
+}
+
+template<typename T, typename... Arguments>
+T invoke(const B3::Compilation& code, Arguments... arguments)
+{
+ T (*function)(Arguments...) = bitwise_cast<T(*)(Arguments...)>(code.code().executableAddress());
+ return function(arguments...);
+}
+
+template<typename T, typename... Arguments>
+T compileAndRun(B3::Procedure& procedure, Arguments... arguments)
+{
+ return invoke<T>(*compile(procedure), arguments...);
+}
+
+void testSimple()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(Move, nullptr, Arg::imm(42), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ CHECK(compileAndRun<int>(proc) == 42);
+}
+
+// Use this to put a constant into a register without Air being able to see the constant.
+template<typename T>
+void loadConstantImpl(BasicBlock* block, T value, B3::Air::Opcode move, Tmp tmp, Tmp scratch)
+{
+ static StaticLock lock;
+ static std::map<T, T*>* map; // I'm not messing with HashMap's problems with integers.
+
+ LockHolder locker(lock);
+ if (!map)
+ map = new std::map<T, T*>();
+
+ if (!map->count(value))
+ (*map)[value] = new T(value);
+
+ T* ptr = (*map)[value];
+ block->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(ptr)), scratch);
+ block->append(move, nullptr, Arg::addr(scratch), tmp);
+}
+
+void loadConstant(BasicBlock* block, intptr_t value, Tmp tmp)
+{
+ loadConstantImpl<intptr_t>(block, value, Move, tmp, tmp);
+}
+
+void loadDoubleConstant(BasicBlock* block, double value, Tmp tmp, Tmp scratch)
+{
+ loadConstantImpl<double>(block, value, MoveDouble, tmp, scratch);
+}
+
+void testShuffleSimpleSwap()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32));
+
+ int32_t things[4];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 2);
+ CHECK(things[2] == 4);
+ CHECK(things[3] == 3);
+}
+
+void testShuffleSimpleShift()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32));
+
+ int32_t things[5];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 2);
+ CHECK(things[2] == 3);
+ CHECK(things[3] == 3);
+ CHECK(things[4] == 4);
+}
+
+void testShuffleLongShift()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ loadConstant(root, 7, Tmp(GPRInfo::regT6));
+ loadConstant(root, 8, Tmp(GPRInfo::regT7));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32));
+
+ int32_t things[8];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 2);
+ CHECK(things[3] == 3);
+ CHECK(things[4] == 4);
+ CHECK(things[5] == 5);
+ CHECK(things[6] == 6);
+ CHECK(things[7] == 7);
+}
+
+void testShuffleLongShiftBackwards()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ loadConstant(root, 7, Tmp(GPRInfo::regT6));
+ loadConstant(root, 8, Tmp(GPRInfo::regT7));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32));
+
+ int32_t things[8];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 2);
+ CHECK(things[3] == 3);
+ CHECK(things[4] == 4);
+ CHECK(things[5] == 5);
+ CHECK(things[6] == 6);
+ CHECK(things[7] == 7);
+}
+
+void testShuffleSimpleRotate()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32));
+
+ int32_t things[4];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 3);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 2);
+ CHECK(things[3] == 4);
+}
+
+void testShuffleSimpleBroadcast()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32));
+
+ int32_t things[4];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 1);
+ CHECK(things[3] == 1);
+}
+
+void testShuffleBroadcastAllRegs()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ const Vector<Reg>& regs = code.regsInPriorityOrder(Arg::GP);
+
+ BasicBlock* root = code.addBlock();
+ root->append(Move, nullptr, Arg::imm(35), Tmp(GPRInfo::regT0));
+ unsigned count = 1;
+ for (Reg reg : regs) {
+ if (reg != Reg(GPRInfo::regT0))
+ loadConstant(root, count++, Tmp(reg));
+ }
+ Inst& shuffle = root->append(Shuffle, nullptr);
+ for (Reg reg : regs) {
+ if (reg != Reg(GPRInfo::regT0))
+ shuffle.append(Tmp(GPRInfo::regT0), Tmp(reg), Arg::widthArg(Arg::Width32));
+ }
+
+ StackSlot* slot = code.addStackSlot(sizeof(int32_t) * regs.size(), StackSlotKind::Locked);
+ for (unsigned i = 0; i < regs.size(); ++i)
+ root->append(Move32, nullptr, Tmp(regs[i]), Arg::stack(slot, i * sizeof(int32_t)));
+
+ Vector<int32_t> things(regs.size(), 666);
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), base);
+ for (unsigned i = 0; i < regs.size(); ++i) {
+ root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
+ }
+
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ for (int32_t thing : things)
+ CHECK(thing == 35);
+}
+
+void testShuffleTreeShift()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ loadConstant(root, 7, Tmp(GPRInfo::regT6));
+ loadConstant(root, 8, Tmp(GPRInfo::regT7));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32));
+
+ int32_t things[8];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 1);
+ CHECK(things[3] == 2);
+ CHECK(things[4] == 2);
+ CHECK(things[5] == 3);
+ CHECK(things[6] == 3);
+ CHECK(things[7] == 4);
+}
+
+void testShuffleTreeShiftBackward()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ loadConstant(root, 7, Tmp(GPRInfo::regT6));
+ loadConstant(root, 8, Tmp(GPRInfo::regT7));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32));
+
+ int32_t things[8];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 1);
+ CHECK(things[3] == 2);
+ CHECK(things[4] == 2);
+ CHECK(things[5] == 3);
+ CHECK(things[6] == 3);
+ CHECK(things[7] == 4);
+}
+
+void testShuffleTreeShiftOtherBackward()
+{
+ // NOTE: This test was my original attempt at TreeShiftBackward but mistakes were made. So, this
+ // ends up being just a weird test. But weird tests are useful, so I kept it.
+
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ loadConstant(root, 7, Tmp(GPRInfo::regT6));
+ loadConstant(root, 8, Tmp(GPRInfo::regT7));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT7), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT7), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32));
+
+ int32_t things[8];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 8);
+ CHECK(things[2] == 8);
+ CHECK(things[3] == 7);
+ CHECK(things[4] == 7);
+ CHECK(things[5] == 6);
+ CHECK(things[6] == 6);
+ CHECK(things[7] == 5);
+}
+
+void testShuffleMultipleShifts()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+ int32_t things[6];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 3);
+ CHECK(things[3] == 3);
+ CHECK(things[4] == 3);
+ CHECK(things[5] == 1);
+}
+
+void testShuffleRotateWithFringe()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+ int32_t things[6];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 3);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 2);
+ CHECK(things[3] == 1);
+ CHECK(things[4] == 2);
+ CHECK(things[5] == 3);
+}
+
+void testShuffleRotateWithFringeInWeirdOrder()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32));
+
+ int32_t things[6];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 3);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 2);
+ CHECK(things[3] == 1);
+ CHECK(things[4] == 2);
+ CHECK(things[5] == 3);
+}
+
+void testShuffleRotateWithLongFringe()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+ int32_t things[6];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 3);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 2);
+ CHECK(things[3] == 1);
+ CHECK(things[4] == 4);
+ CHECK(things[5] == 5);
+}
+
+void testShuffleMultipleRotates()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32));
+
+ int32_t things[6];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 3);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 2);
+ CHECK(things[3] == 6);
+ CHECK(things[4] == 4);
+ CHECK(things[5] == 5);
+}
+
+void testShuffleShiftAndRotate()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ loadConstant(root, 4, Tmp(GPRInfo::regT3));
+ loadConstant(root, 5, Tmp(GPRInfo::regT4));
+ loadConstant(root, 6, Tmp(GPRInfo::regT5));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+ int32_t things[6];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 3);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 2);
+ CHECK(things[3] == 4);
+ CHECK(things[4] == 4);
+ CHECK(things[5] == 5);
+}
+
+void testShuffleShiftAllRegs()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ const Vector<Reg>& regs = code.regsInPriorityOrder(Arg::GP);
+
+ BasicBlock* root = code.addBlock();
+ for (unsigned i = 0; i < regs.size(); ++i)
+ loadConstant(root, 35 + i, Tmp(regs[i]));
+ Inst& shuffle = root->append(Shuffle, nullptr);
+ for (unsigned i = 1; i < regs.size(); ++i)
+ shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
+
+ StackSlot* slot = code.addStackSlot(sizeof(int32_t) * regs.size(), StackSlotKind::Locked);
+ for (unsigned i = 0; i < regs.size(); ++i)
+ root->append(Move32, nullptr, Tmp(regs[i]), Arg::stack(slot, i * sizeof(int32_t)));
+
+ Vector<int32_t> things(regs.size(), 666);
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), base);
+ for (unsigned i = 0; i < regs.size(); ++i) {
+ root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
+ }
+
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 35);
+ for (unsigned i = 1; i < regs.size(); ++i)
+ CHECK(things[i] == 35 + static_cast<int32_t>(i) - 1);
+}
+
+void testShuffleRotateAllRegs()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ const Vector<Reg>& regs = code.regsInPriorityOrder(Arg::GP);
+
+ BasicBlock* root = code.addBlock();
+ for (unsigned i = 0; i < regs.size(); ++i)
+ loadConstant(root, 35 + i, Tmp(regs[i]));
+ Inst& shuffle = root->append(Shuffle, nullptr);
+ for (unsigned i = 1; i < regs.size(); ++i)
+ shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
+ shuffle.append(Tmp(regs.last()), Tmp(regs[0]), Arg::widthArg(Arg::Width32));
+
+ StackSlot* slot = code.addStackSlot(sizeof(int32_t) * regs.size(), StackSlotKind::Locked);
+ for (unsigned i = 0; i < regs.size(); ++i)
+ root->append(Move32, nullptr, Tmp(regs[i]), Arg::stack(slot, i * sizeof(int32_t)));
+
+ Vector<int32_t> things(regs.size(), 666);
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), base);
+ for (unsigned i = 0; i < regs.size(); ++i) {
+ root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
+ }
+
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 35 + static_cast<int32_t>(regs.size()) - 1);
+ for (unsigned i = 1; i < regs.size(); ++i)
+ CHECK(things[i] == 35 + static_cast<int32_t>(i) - 1);
+}
+
+void testShuffleSimpleSwap64()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+ loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+ loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+ loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width64),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width64));
+
+ int64_t things[4];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 10000000000000000ll);
+ CHECK(things[1] == 20000000000000000ll);
+ CHECK(things[2] == 40000000000000000ll);
+ CHECK(things[3] == 30000000000000000ll);
+}
+
+void testShuffleSimpleShift64()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+ loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+ loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+ loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+ loadConstant(root, 50000000000000000ll, Tmp(GPRInfo::regT4));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width64),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width64));
+
+ int64_t things[5];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int64_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 10000000000000000ll);
+ CHECK(things[1] == 20000000000000000ll);
+ CHECK(things[2] == 30000000000000000ll);
+ CHECK(things[3] == 30000000000000000ll);
+ CHECK(things[4] == 40000000000000000ll);
+}
+
+void testShuffleSwapMixedWidth()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+ loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+ loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+ loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width64));
+
+ int64_t things[4];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 10000000000000000ll);
+ CHECK(things[1] == 20000000000000000ll);
+ CHECK(things[2] == 40000000000000000ll);
+ CHECK(things[3] == static_cast<uint32_t>(30000000000000000ll));
+}
+
+void testShuffleShiftMixedWidth()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+ loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+ loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+ loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+ loadConstant(root, 50000000000000000ll, Tmp(GPRInfo::regT4));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width64),
+ Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32));
+
+ int64_t things[5];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int64_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 10000000000000000ll);
+ CHECK(things[1] == 20000000000000000ll);
+ CHECK(things[2] == 30000000000000000ll);
+ CHECK(things[3] == 30000000000000000ll);
+ CHECK(things[4] == static_cast<uint32_t>(40000000000000000ll));
+}
+
+void testShuffleShiftMemory()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ int32_t memory[2];
+ memory[0] = 35;
+ memory[1] = 36;
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+ Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int32_t)),
+ Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32));
+
+ int32_t things[2];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 1);
+ CHECK(memory[0] == 35);
+ CHECK(memory[1] == 35);
+}
+
+void testShuffleShiftMemoryLong()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ int32_t memory[2];
+ memory[0] = 35;
+ memory[1] = 36;
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ loadConstant(root, 3, Tmp(GPRInfo::regT2));
+ root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT3));
+ root->append(
+ Shuffle, nullptr,
+
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+
+ Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT3), 0 * sizeof(int32_t)),
+ Arg::widthArg(Arg::Width32),
+
+ Arg::addr(Tmp(GPRInfo::regT3), 0 * sizeof(int32_t)),
+ Arg::addr(Tmp(GPRInfo::regT3), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32),
+
+ Arg::addr(Tmp(GPRInfo::regT3), 1 * sizeof(int32_t)), Tmp(GPRInfo::regT2),
+ Arg::widthArg(Arg::Width32));
+
+ int32_t things[3];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 1);
+ CHECK(things[2] == 36);
+ CHECK(memory[0] == 2);
+ CHECK(memory[1] == 35);
+}
+
+void testShuffleShiftMemoryAllRegs()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ int32_t memory[2];
+ memory[0] = 35;
+ memory[1] = 36;
+
+ Vector<Reg> regs = code.regsInPriorityOrder(Arg::GP);
+ regs.removeFirst(Reg(GPRInfo::regT0));
+
+ BasicBlock* root = code.addBlock();
+ for (unsigned i = 0; i < regs.size(); ++i)
+ loadConstant(root, i + 1, Tmp(regs[i]));
+ root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+ Inst& shuffle = root->append(
+ Shuffle, nullptr,
+
+ Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int32_t)),
+ Arg::widthArg(Arg::Width32),
+
+ Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int32_t)),
+ Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32),
+
+ Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int32_t)), Tmp(regs[1]),
+ Arg::widthArg(Arg::Width32));
+
+ for (unsigned i = 2; i < regs.size(); ++i)
+ shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
+
+ Vector<int32_t> things(regs.size(), 666);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ for (unsigned i = 0; i < regs.size(); ++i) {
+ root->append(
+ Move32, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int32_t)));
+ }
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 36);
+ for (unsigned i = 2; i < regs.size(); ++i)
+ CHECK(things[i] == static_cast<int32_t>(i));
+ CHECK(memory[0] == 1);
+ CHECK(memory[1] == 35);
+}
+
+void testShuffleShiftMemoryAllRegs64()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ int64_t memory[2];
+ memory[0] = 35000000000000ll;
+ memory[1] = 36000000000000ll;
+
+ Vector<Reg> regs = code.regsInPriorityOrder(Arg::GP);
+ regs.removeFirst(Reg(GPRInfo::regT0));
+
+ BasicBlock* root = code.addBlock();
+ for (unsigned i = 0; i < regs.size(); ++i)
+ loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+ root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+ Inst& shuffle = root->append(
+ Shuffle, nullptr,
+
+ Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+ Arg::widthArg(Arg::Width64),
+
+ Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+ Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+ Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+ Arg::widthArg(Arg::Width64));
+
+ for (unsigned i = 2; i < regs.size(); ++i)
+ shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
+
+ Vector<int64_t> things(regs.size(), 666);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ for (unsigned i = 0; i < regs.size(); ++i) {
+ root->append(
+ Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+ }
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1000000000000ll);
+ CHECK(things[1] == 36000000000000ll);
+ for (unsigned i = 2; i < regs.size(); ++i)
+ CHECK(things[i] == static_cast<int64_t>(i) * 1000000000000ll);
+ CHECK(memory[0] == 1000000000000ll);
+ CHECK(memory[1] == 35000000000000ll);
+}
+
+int64_t combineHiLo(int64_t high, int64_t low)
+{
+ union {
+ int64_t value;
+ int32_t halves[2];
+ } u;
+ u.value = high;
+ u.halves[0] = static_cast<int32_t>(low);
+ return u.value;
+}
+
+void testShuffleShiftMemoryAllRegsMixedWidth()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ int64_t memory[2];
+ memory[0] = 35000000000000ll;
+ memory[1] = 36000000000000ll;
+
+ Vector<Reg> regs = code.regsInPriorityOrder(Arg::GP);
+ regs.removeFirst(Reg(GPRInfo::regT0));
+
+ BasicBlock* root = code.addBlock();
+ for (unsigned i = 0; i < regs.size(); ++i)
+ loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+ root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+ Inst& shuffle = root->append(
+ Shuffle, nullptr,
+
+ Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+ Arg::widthArg(Arg::Width32),
+
+ Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+ Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+ Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+ Arg::widthArg(Arg::Width32));
+
+ for (unsigned i = 2; i < regs.size(); ++i) {
+ shuffle.append(
+ Tmp(regs[i - 1]), Tmp(regs[i]),
+ (i & 1) ? Arg::widthArg(Arg::Width32) : Arg::widthArg(Arg::Width64));
+ }
+
+ Vector<int64_t> things(regs.size(), 666);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ for (unsigned i = 0; i < regs.size(); ++i) {
+ root->append(
+ Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+ }
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1000000000000ll);
+ CHECK(things[1] == static_cast<uint32_t>(36000000000000ll));
+ for (unsigned i = 2; i < regs.size(); ++i) {
+ int64_t value = static_cast<int64_t>(i) * 1000000000000ll;
+ CHECK(things[i] == ((i & 1) ? static_cast<uint32_t>(value) : value));
+ }
+ CHECK(memory[0] == combineHiLo(35000000000000ll, 1000000000000ll));
+ CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleRotateMemory()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ int32_t memory[2];
+ memory[0] = 35;
+ memory[1] = 36;
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2, Tmp(GPRInfo::regT1));
+ root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+ root->append(
+ Shuffle, nullptr,
+
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+
+ Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int32_t)),
+ Arg::widthArg(Arg::Width32),
+
+ Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int32_t)),
+ Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32),
+
+ Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int32_t)), Tmp(GPRInfo::regT0),
+ Arg::widthArg(Arg::Width32));
+
+ int32_t things[2];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+ root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 36);
+ CHECK(things[1] == 1);
+ CHECK(memory[0] == 2);
+ CHECK(memory[1] == 35);
+}
+
+void testShuffleRotateMemory64()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ int64_t memory[2];
+ memory[0] = 35000000000000ll;
+ memory[1] = 36000000000000ll;
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1000000000000ll, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2000000000000ll, Tmp(GPRInfo::regT1));
+ root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+ root->append(
+ Shuffle, nullptr,
+
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width64),
+
+ Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+ Arg::widthArg(Arg::Width64),
+
+ Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+ Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+ Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Tmp(GPRInfo::regT0),
+ Arg::widthArg(Arg::Width64));
+
+ int64_t things[2];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 36000000000000ll);
+ CHECK(things[1] == 1000000000000ll);
+ CHECK(memory[0] == 2000000000000ll);
+ CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleRotateMemoryMixedWidth()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ int64_t memory[2];
+ memory[0] = 35000000000000ll;
+ memory[1] = 36000000000000ll;
+
+ BasicBlock* root = code.addBlock();
+ loadConstant(root, 1000000000000ll, Tmp(GPRInfo::regT0));
+ loadConstant(root, 2000000000000ll, Tmp(GPRInfo::regT1));
+ root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+ root->append(
+ Shuffle, nullptr,
+
+ Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+
+ Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+ Arg::widthArg(Arg::Width64),
+
+ Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+ Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width32),
+
+ Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Tmp(GPRInfo::regT0),
+ Arg::widthArg(Arg::Width64));
+
+ int64_t things[2];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+ root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 36000000000000ll);
+ CHECK(things[1] == static_cast<uint32_t>(1000000000000ll));
+ CHECK(memory[0] == 2000000000000ll);
+ CHECK(memory[1] == combineHiLo(36000000000000ll, 35000000000000ll));
+}
+
+void testShuffleRotateMemoryAllRegs64()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ int64_t memory[2];
+ memory[0] = 35000000000000ll;
+ memory[1] = 36000000000000ll;
+
+ Vector<Reg> regs = code.regsInPriorityOrder(Arg::GP);
+ regs.removeFirst(Reg(GPRInfo::regT0));
+
+ BasicBlock* root = code.addBlock();
+ for (unsigned i = 0; i < regs.size(); ++i)
+ loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+ root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+ Inst& shuffle = root->append(
+ Shuffle, nullptr,
+
+ Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+ Arg::widthArg(Arg::Width64),
+
+ Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+ Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+ Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+ Arg::widthArg(Arg::Width64),
+
+ regs.last(), regs[0], Arg::widthArg(Arg::Width64));
+
+ for (unsigned i = 2; i < regs.size(); ++i)
+ shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
+
+ Vector<int64_t> things(regs.size(), 666);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ for (unsigned i = 0; i < regs.size(); ++i) {
+ root->append(
+ Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+ }
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == static_cast<int64_t>(regs.size()) * 1000000000000ll);
+ CHECK(things[1] == 36000000000000ll);
+ for (unsigned i = 2; i < regs.size(); ++i)
+ CHECK(things[i] == static_cast<int64_t>(i) * 1000000000000ll);
+ CHECK(memory[0] == 1000000000000ll);
+ CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleRotateMemoryAllRegsMixedWidth()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ int64_t memory[2];
+ memory[0] = 35000000000000ll;
+ memory[1] = 36000000000000ll;
+
+ Vector<Reg> regs = code.regsInPriorityOrder(Arg::GP);
+ regs.removeFirst(Reg(GPRInfo::regT0));
+
+ BasicBlock* root = code.addBlock();
+ for (unsigned i = 0; i < regs.size(); ++i)
+ loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+ root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+ Inst& shuffle = root->append(
+ Shuffle, nullptr,
+
+ Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+ Arg::widthArg(Arg::Width32),
+
+ Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+ Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+ Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+ Arg::widthArg(Arg::Width32),
+
+ regs.last(), regs[0], Arg::widthArg(Arg::Width32));
+
+ for (unsigned i = 2; i < regs.size(); ++i)
+ shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
+
+ Vector<int64_t> things(regs.size(), 666);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ for (unsigned i = 0; i < regs.size(); ++i) {
+ root->append(
+ Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+ }
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == static_cast<uint32_t>(static_cast<int64_t>(regs.size()) * 1000000000000ll));
+ CHECK(things[1] == static_cast<uint32_t>(36000000000000ll));
+ for (unsigned i = 2; i < regs.size(); ++i)
+ CHECK(things[i] == static_cast<int64_t>(i) * 1000000000000ll);
+ CHECK(memory[0] == combineHiLo(35000000000000ll, 1000000000000ll));
+ CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleSwapDouble()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadDoubleConstant(root, 1, Tmp(FPRInfo::fpRegT0), Tmp(GPRInfo::regT0));
+ loadDoubleConstant(root, 2, Tmp(FPRInfo::fpRegT1), Tmp(GPRInfo::regT0));
+ loadDoubleConstant(root, 3, Tmp(FPRInfo::fpRegT2), Tmp(GPRInfo::regT0));
+ loadDoubleConstant(root, 4, Tmp(FPRInfo::fpRegT3), Tmp(GPRInfo::regT0));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(FPRInfo::fpRegT2), Tmp(FPRInfo::fpRegT3), Arg::widthArg(Arg::Width64),
+ Tmp(FPRInfo::fpRegT3), Tmp(FPRInfo::fpRegT2), Arg::widthArg(Arg::Width64));
+
+ double things[4];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT0), Arg::addr(base, 0 * sizeof(double)));
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT1), Arg::addr(base, 1 * sizeof(double)));
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT2), Arg::addr(base, 2 * sizeof(double)));
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT3), Arg::addr(base, 3 * sizeof(double)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 2);
+ CHECK(things[2] == 4);
+ CHECK(things[3] == 3);
+}
+
+void testShuffleShiftDouble()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ loadDoubleConstant(root, 1, Tmp(FPRInfo::fpRegT0), Tmp(GPRInfo::regT0));
+ loadDoubleConstant(root, 2, Tmp(FPRInfo::fpRegT1), Tmp(GPRInfo::regT0));
+ loadDoubleConstant(root, 3, Tmp(FPRInfo::fpRegT2), Tmp(GPRInfo::regT0));
+ loadDoubleConstant(root, 4, Tmp(FPRInfo::fpRegT3), Tmp(GPRInfo::regT0));
+ root->append(
+ Shuffle, nullptr,
+ Tmp(FPRInfo::fpRegT2), Tmp(FPRInfo::fpRegT3), Arg::widthArg(Arg::Width64));
+
+ double things[4];
+ Tmp base = code.newTmp(Arg::GP);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT0), Arg::addr(base, 0 * sizeof(double)));
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT1), Arg::addr(base, 1 * sizeof(double)));
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT2), Arg::addr(base, 2 * sizeof(double)));
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT3), Arg::addr(base, 3 * sizeof(double)));
+ root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+ root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+ memset(things, 0, sizeof(things));
+
+ CHECK(!compileAndRun<int>(proc));
+
+ CHECK(things[0] == 1);
+ CHECK(things[1] == 2);
+ CHECK(things[2] == 3);
+ CHECK(things[3] == 3);
+}
+
+#if CPU(X86) || CPU(X86_64)
+void testX86VMULSD()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(FPRInfo::argumentFPR1), Tmp(FPRInfo::argumentFPR2));
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR2), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ CHECK(compileAndRun<double>(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDDestRex()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm15));
+ root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ CHECK(compileAndRun<double>(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDOp1DestRex()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14));
+ root->append(MulDouble, nullptr, Tmp(X86Registers::xmm14), Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm15));
+ root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ CHECK(compileAndRun<double>(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDOp2DestRex()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm14));
+ root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14), Tmp(X86Registers::xmm15));
+ root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ CHECK(compileAndRun<double>(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDOpsDestRex()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14));
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm13));
+ root->append(MulDouble, nullptr, Tmp(X86Registers::xmm14), Tmp(X86Registers::xmm13), Tmp(X86Registers::xmm15));
+ root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ CHECK(compileAndRun<double>(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDAddr()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(GPRInfo::argumentGPR0), - 16), Tmp(FPRInfo::argumentFPR2));
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR2), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ double secondArg = 4.2;
+ CHECK(compileAndRun<double>(proc, 2.4, &secondArg + 2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDAddrOpRexAddr()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13));
+ root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(X86Registers::r13), - 16), Tmp(FPRInfo::argumentFPR2));
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR2), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ double secondArg = 4.2;
+ CHECK(compileAndRun<double>(proc, 2.4, &secondArg + 2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDDestRexAddr()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(GPRInfo::argumentGPR0), 16), Tmp(X86Registers::xmm15));
+ root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ double secondArg = 4.2;
+ CHECK(compileAndRun<double>(proc, 2.4, &secondArg - 2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDRegOpDestRexAddr()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14));
+ root->append(MulDouble, nullptr, Arg::addr(Tmp(GPRInfo::argumentGPR0)), Tmp(X86Registers::xmm14), Tmp(X86Registers::xmm15));
+ root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ double secondArg = 4.2;
+ CHECK(compileAndRun<double>(proc, 2.4, &secondArg, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDAddrOpDestRexAddr()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13));
+ root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(X86Registers::r13), 8), Tmp(X86Registers::xmm15));
+ root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ double secondArg = 4.2;
+ CHECK(compileAndRun<double>(proc, 2.4, &secondArg - 1, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDBaseNeedsRex()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13));
+ root->append(MulDouble, nullptr, Arg::index(Tmp(X86Registers::r13), Tmp(GPRInfo::argumentGPR1)), Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm0));
+ root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm0), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ double secondArg = 4.2;
+ uint64_t index = 8;
+ CHECK(compileAndRun<double>(proc, 2.4, &secondArg - 1, index, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDIndexNeedsRex()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR1), Tmp(X86Registers::r13));
+ root->append(MulDouble, nullptr, Arg::index(Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13)), Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm0));
+ root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm0), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ double secondArg = 4.2;
+ uint64_t index = - 8;
+ CHECK(compileAndRun<double>(proc, 2.4, &secondArg + 1, index, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDBaseIndexNeedRex()
+{
+ B3::Procedure proc;
+ Code& code = proc.code();
+
+ BasicBlock* root = code.addBlock();
+ root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r12));
+ root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR1), Tmp(X86Registers::r13));
+ root->append(MulDouble, nullptr, Arg::index(Tmp(X86Registers::r12), Tmp(X86Registers::r13)), Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm0));
+ root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm0), Tmp(FPRInfo::returnValueFPR));
+ root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+ double secondArg = 4.2;
+ uint64_t index = 16;
+ CHECK(compileAndRun<double>(proc, 2.4, &secondArg - 2, index, pureNaN()) == 2.4 * 4.2);
+}
+
+#endif
+
+#define RUN(test) do { \
+ if (!shouldRun(#test)) \
+ break; \
+ tasks.append( \
+ createSharedTask<void()>( \
+ [&] () { \
+ dataLog(#test "...\n"); \
+ test; \
+ dataLog(#test ": OK!\n"); \
+ })); \
+ } while (false);
+
+void run(const char* filter)
+{
+ JSC::initializeThreading();
+ vm = &VM::create(LargeHeap).leakRef();
+
+ Deque<RefPtr<SharedTask<void()>>> tasks;
+
+ auto shouldRun = [&] (const char* testName) -> bool {
+ return !filter || !!strcasestr(testName, filter);
+ };
+
+ RUN(testSimple());
+
+ RUN(testShuffleSimpleSwap());
+ RUN(testShuffleSimpleShift());
+ RUN(testShuffleLongShift());
+ RUN(testShuffleLongShiftBackwards());
+ RUN(testShuffleSimpleRotate());
+ RUN(testShuffleSimpleBroadcast());
+ RUN(testShuffleBroadcastAllRegs());
+ RUN(testShuffleTreeShift());
+ RUN(testShuffleTreeShiftBackward());
+ RUN(testShuffleTreeShiftOtherBackward());
+ RUN(testShuffleMultipleShifts());
+ RUN(testShuffleRotateWithFringe());
+ RUN(testShuffleRotateWithFringeInWeirdOrder());
+ RUN(testShuffleRotateWithLongFringe());
+ RUN(testShuffleMultipleRotates());
+ RUN(testShuffleShiftAndRotate());
+ RUN(testShuffleShiftAllRegs());
+ RUN(testShuffleRotateAllRegs());
+ RUN(testShuffleSimpleSwap64());
+ RUN(testShuffleSimpleShift64());
+ RUN(testShuffleSwapMixedWidth());
+ RUN(testShuffleShiftMixedWidth());
+ RUN(testShuffleShiftMemory());
+ RUN(testShuffleShiftMemoryLong());
+ RUN(testShuffleShiftMemoryAllRegs());
+ RUN(testShuffleShiftMemoryAllRegs64());
+ RUN(testShuffleShiftMemoryAllRegsMixedWidth());
+ RUN(testShuffleRotateMemory());
+ RUN(testShuffleRotateMemory64());
+ RUN(testShuffleRotateMemoryMixedWidth());
+ RUN(testShuffleRotateMemoryAllRegs64());
+ RUN(testShuffleRotateMemoryAllRegsMixedWidth());
+ RUN(testShuffleSwapDouble());
+ RUN(testShuffleShiftDouble());
+
+#if CPU(X86) || CPU(X86_64)
+ RUN(testX86VMULSD());
+ RUN(testX86VMULSDDestRex());
+ RUN(testX86VMULSDOp1DestRex());
+ RUN(testX86VMULSDOp2DestRex());
+ RUN(testX86VMULSDOpsDestRex());
+
+ RUN(testX86VMULSDAddr());
+ RUN(testX86VMULSDAddrOpRexAddr());
+ RUN(testX86VMULSDDestRexAddr());
+ RUN(testX86VMULSDRegOpDestRexAddr());
+ RUN(testX86VMULSDAddrOpDestRexAddr());
+
+ RUN(testX86VMULSDBaseNeedsRex());
+ RUN(testX86VMULSDIndexNeedsRex());
+ RUN(testX86VMULSDBaseIndexNeedRex());
+#endif
+
+ if (tasks.isEmpty())
+ usage();
+
+ Lock lock;
+
+ Vector<ThreadIdentifier> threads;
+ for (unsigned i = filter ? 1 : WTF::numberOfProcessorCores(); i--;) {
+ threads.append(
+ createThread(
+ "testb3 thread",
+ [&] () {
+ for (;;) {
+ RefPtr<SharedTask<void()>> task;
+ {
+ LockHolder locker(lock);
+ if (tasks.isEmpty())
+ return;
+ task = tasks.takeFirst();
+ }
+
+ task->run();
+ }
+ }));
+ }
+
+ for (ThreadIdentifier thread : threads)
+ waitForThreadCompletion(thread);
+ crashLock.lock();
+}
+
+} // anonymois namespace
+
+#else // ENABLE(B3_JIT)
+
+static void run(const char*)
+{
+ dataLog("B3 JIT is not enabled.\n");
+}
+
+#endif // ENABLE(B3_JIT)
+
+int main(int argc, char** argv)
+{
+ const char* filter = nullptr;
+ switch (argc) {
+ case 1:
+ break;
+ case 2:
+ filter = argv[1];
+ break;
+ default:
+ usage();
+ break;
+ }
+
+ run(filter);
+ return 0;
+}
diff --git a/Source/JavaScriptCore/b3/testb3.cpp b/Source/JavaScriptCore/b3/testb3.cpp
new file mode 100644
index 000000000..a2eebe8eb
--- /dev/null
+++ b/Source/JavaScriptCore/b3/testb3.cpp
@@ -0,0 +1,15923 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirValidate.h"
+#include "AllowMacroScratchRegisterUsage.h"
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3CCallValue.h"
+#include "B3Compilation.h"
+#include "B3Compile.h"
+#include "B3ComputeDivisionMagic.h"
+#include "B3Const32Value.h"
+#include "B3ConstPtrValue.h"
+#include "B3Effects.h"
+#include "B3FenceValue.h"
+#include "B3Generate.h"
+#include "B3LowerToAir.h"
+#include "B3MathExtras.h"
+#include "B3MemoryValue.h"
+#include "B3MoveConstants.h"
+#include "B3Procedure.h"
+#include "B3ReduceStrength.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3UseCounts.h"
+#include "B3Validate.h"
+#include "B3ValueInlines.h"
+#include "B3VariableValue.h"
+#include "B3WasmAddressValue.h"
+#include "B3WasmBoundsCheckValue.h"
+#include "CCallHelpers.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "InitializeThreading.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "PureNaN.h"
+#include "VM.h"
+#include <cmath>
+#include <string>
+#include <wtf/ListDump.h>
+#include <wtf/Lock.h>
+#include <wtf/NumberOfCores.h>
+#include <wtf/Threading.h>
+
+// We don't have a NO_RETURN_DUE_TO_EXIT, nor should we. That's ridiculous.
+static bool hiddenTruthBecauseNoReturnIsStupid() { return true; }
+
+static void usage()
+{
+ dataLog("Usage: testb3 [<filter>]\n");
+ if (hiddenTruthBecauseNoReturnIsStupid())
+ exit(1);
+}
+
+#if ENABLE(B3_JIT)
+
+using namespace JSC;
+using namespace JSC::B3;
+
+namespace {
+
+bool shouldBeVerbose()
+{
+ return shouldDumpIR(B3Mode);
+}
+
+StaticLock crashLock;
+
+// Nothing fancy for now; we just use the existing WTF assertion machinery.
+#define CHECK(x) do { \
+ if (!!(x)) \
+ break; \
+ crashLock.lock(); \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #x); \
+ CRASH(); \
+ } while (false)
+
+#define CHECK_EQ(x, y) do { \
+ auto __x = (x); \
+ auto __y = (y); \
+ if (__x == __y) \
+ break; \
+ crashLock.lock(); \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, toCString(#x " == " #y, " (" #x " == ", __x, ", " #y " == ", __y, ")").data()); \
+ CRASH(); \
+ } while (false)
+
+VM* vm;
+
+std::unique_ptr<Compilation> compile(Procedure& procedure, unsigned optLevel = 1)
+{
+ return std::make_unique<Compilation>(B3::compile(*vm, procedure, optLevel));
+}
+
+template<typename T, typename... Arguments>
+T invoke(MacroAssemblerCodePtr ptr, Arguments... arguments)
+{
+ T (*function)(Arguments...) = bitwise_cast<T(*)(Arguments...)>(ptr.executableAddress());
+ return function(arguments...);
+}
+
+template<typename T, typename... Arguments>
+T invoke(const Compilation& code, Arguments... arguments)
+{
+ return invoke<T>(code.code(), arguments...);
+}
+
+template<typename T, typename... Arguments>
+T compileAndRun(Procedure& procedure, Arguments... arguments)
+{
+ return invoke<T>(*compile(procedure), arguments...);
+}
+
+void lowerToAirForTesting(Procedure& proc)
+{
+ proc.resetReachability();
+
+ if (shouldBeVerbose())
+ dataLog("B3 before lowering:\n", proc);
+
+ validate(proc);
+ lowerToAir(proc);
+
+ if (shouldBeVerbose())
+ dataLog("Air after lowering:\n", proc.code());
+
+ Air::validate(proc.code());
+}
+
+template<typename Func>
+void checkDisassembly(Compilation& compilation, const Func& func, CString failText)
+{
+ CString disassembly = compilation.disassembly();
+ if (func(disassembly.data()))
+ return;
+
+ crashLock.lock();
+ dataLog("Bad lowering! ", failText, "\n");
+ dataLog(disassembly);
+ CRASH();
+}
+
+void checkUsesInstruction(Compilation& compilation, const char* text)
+{
+ checkDisassembly(
+ compilation,
+ [&] (const char* disassembly) -> bool {
+ return strstr(disassembly, text);
+ },
+ toCString("Expected to find ", text, " but didnt!"));
+}
+
+void checkDoesNotUseInstruction(Compilation& compilation, const char* text)
+{
+ checkDisassembly(
+ compilation,
+ [&] (const char* disassembly) -> bool {
+ return !strstr(disassembly, text);
+ },
+ toCString("Did not expected to find ", text, " but it's there!"));
+}
+
+template<typename Type>
+struct Operand {
+ const char* name;
+ Type value;
+};
+
+typedef Operand<int64_t> Int64Operand;
+typedef Operand<int32_t> Int32Operand;
+
+template<typename FloatType>
+void populateWithInterestingValues(Vector<Operand<FloatType>>& operands)
+{
+ operands.append({ "0.", static_cast<FloatType>(0.) });
+ operands.append({ "-0.", static_cast<FloatType>(-0.) });
+ operands.append({ "0.4", static_cast<FloatType>(0.5) });
+ operands.append({ "-0.4", static_cast<FloatType>(-0.5) });
+ operands.append({ "0.5", static_cast<FloatType>(0.5) });
+ operands.append({ "-0.5", static_cast<FloatType>(-0.5) });
+ operands.append({ "0.6", static_cast<FloatType>(0.5) });
+ operands.append({ "-0.6", static_cast<FloatType>(-0.5) });
+ operands.append({ "1.", static_cast<FloatType>(1.) });
+ operands.append({ "-1.", static_cast<FloatType>(-1.) });
+ operands.append({ "2.", static_cast<FloatType>(2.) });
+ operands.append({ "-2.", static_cast<FloatType>(-2.) });
+ operands.append({ "M_PI", static_cast<FloatType>(M_PI) });
+ operands.append({ "-M_PI", static_cast<FloatType>(-M_PI) });
+ operands.append({ "min", std::numeric_limits<FloatType>::min() });
+ operands.append({ "max", std::numeric_limits<FloatType>::max() });
+ operands.append({ "lowest", std::numeric_limits<FloatType>::lowest() });
+ operands.append({ "epsilon", std::numeric_limits<FloatType>::epsilon() });
+ operands.append({ "infiniti", std::numeric_limits<FloatType>::infinity() });
+ operands.append({ "-infiniti", - std::numeric_limits<FloatType>::infinity() });
+ operands.append({ "PNaN", static_cast<FloatType>(PNaN) });
+}
+
+template<typename FloatType>
+Vector<Operand<FloatType>> floatingPointOperands()
+{
+ Vector<Operand<FloatType>> operands;
+ populateWithInterestingValues(operands);
+ return operands;
+};
+
+static Vector<Int64Operand> int64Operands()
+{
+ Vector<Int64Operand> operands;
+ operands.append({ "0", 0 });
+ operands.append({ "1", 1 });
+ operands.append({ "-1", -1 });
+ operands.append({ "42", 42 });
+ operands.append({ "-42", -42 });
+ operands.append({ "int64-max", std::numeric_limits<int64_t>::max() });
+ operands.append({ "int64-min", std::numeric_limits<int64_t>::min() });
+ operands.append({ "int32-max", std::numeric_limits<int32_t>::max() });
+ operands.append({ "int32-min", std::numeric_limits<int32_t>::min() });
+
+ return operands;
+}
+
+static Vector<Int32Operand> int32Operands()
+{
+ Vector<Int32Operand> operands({
+ { "0", 0 },
+ { "1", 1 },
+ { "-1", -1 },
+ { "42", 42 },
+ { "-42", -42 },
+ { "int32-max", std::numeric_limits<int32_t>::max() },
+ { "int32-min", std::numeric_limits<int32_t>::min() }
+ });
+ return operands;
+}
+
+void add32(CCallHelpers& jit, GPRReg src1, GPRReg src2, GPRReg dest)
+{
+ if (src2 == dest)
+ jit.add32(src1, dest);
+ else {
+ jit.move(src1, dest);
+ jit.add32(src2, dest);
+ }
+}
+
+void test42()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* const42 = root->appendNew<Const32Value>(proc, Origin(), 42);
+ root->appendNewControlValue(proc, Return, Origin(), const42);
+
+ CHECK(compileAndRun<int>(proc) == 42);
+}
+
+void testLoad42()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int x = 42;
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &x)));
+
+ CHECK(compileAndRun<int>(proc) == 42);
+}
+
+void testLoadWithOffsetImpl(int32_t offset64, int32_t offset32)
+{
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int64_t x = -42;
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int64, Origin(),
+ base,
+ offset64));
+
+ char* address = reinterpret_cast<char*>(&x) - offset64;
+ CHECK(compileAndRun<int64_t>(proc, address) == -42);
+ }
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int32_t x = -42;
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ base,
+ offset32));
+
+ char* address = reinterpret_cast<char*>(&x) - offset32;
+ CHECK(compileAndRun<int32_t>(proc, address) == -42);
+ }
+}
+
+void testLoadOffsetImm9Max()
+{
+ testLoadWithOffsetImpl(255, 255);
+}
+
+void testLoadOffsetImm9MaxPlusOne()
+{
+ testLoadWithOffsetImpl(256, 256);
+}
+
+void testLoadOffsetImm9MaxPlusTwo()
+{
+ testLoadWithOffsetImpl(257, 257);
+}
+
+void testLoadOffsetImm9Min()
+{
+ testLoadWithOffsetImpl(-256, -256);
+}
+
+void testLoadOffsetImm9MinMinusOne()
+{
+ testLoadWithOffsetImpl(-257, -257);
+}
+
+void testLoadOffsetScaledUnsignedImm12Max()
+{
+ testLoadWithOffsetImpl(32760, 16380);
+}
+
+void testLoadOffsetScaledUnsignedOverImm12Max()
+{
+ testLoadWithOffsetImpl(32760, 32760);
+ testLoadWithOffsetImpl(32761, 16381);
+ testLoadWithOffsetImpl(32768, 16384);
+}
+
+void testArg(int argument)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+
+ CHECK(compileAndRun<int>(proc, argument) == argument);
+}
+
+void testReturnConst64(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), value));
+
+ CHECK(compileAndRun<int64_t>(proc) == value);
+}
+
+void testReturnVoid()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(proc, Return, Origin());
+ compileAndRun<void>(proc);
+}
+
+void testAddArg(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Add, Origin(), value, value));
+
+ CHECK(compileAndRun<int>(proc, a) == a + a);
+}
+
+void testAddArgs(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ CHECK(compileAndRun<int>(proc, a, b) == a + b);
+}
+
+void testAddArgImm(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int>(proc, a) == a + b);
+}
+
+void testAddImmArg(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+
+ CHECK(compileAndRun<int>(proc, b) == a + b);
+}
+
+void testAddArgMem(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ load);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int64_t inputOutput = b;
+ CHECK(!compileAndRun<int64_t>(proc, a, &inputOutput));
+ CHECK(inputOutput == a + b);
+}
+
+void testAddMemArg(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(),
+ load,
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int64_t>(proc, &a, b) == a + b);
+}
+
+void testAddImmMem(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ load);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int64_t inputOutput = b;
+ CHECK(!compileAndRun<int>(proc, &inputOutput));
+ CHECK(inputOutput == a + b);
+}
+
+void testAddArg32(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Add, Origin(), value, value));
+
+ CHECK(compileAndRun<int>(proc, a) == a + a);
+}
+
+void testAddArgs32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int>(proc, a, b) == a + b);
+}
+
+void testAddArgMem32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), argument, load);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int32_t inputOutput = b;
+ CHECK(!compileAndRun<int32_t>(proc, a, &inputOutput));
+ CHECK(inputOutput == a + b);
+}
+
+void testAddMemArg32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), load, argument);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int32_t>(proc, &a, b) == a + b);
+}
+
+void testAddImmMem32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ load);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int32_t inputOutput = b;
+ CHECK(!compileAndRun<int>(proc, &inputOutput));
+ CHECK(inputOutput == a + b);
+}
+
+void testAddArgZeroImmZDef()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* constZero = root->appendNew<Const32Value>(proc, Origin(), 0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ arg,
+ constZero));
+
+ auto code = compile(proc, 0);
+ CHECK(invoke<int64_t>(*code, 0x0123456789abcdef) == 0x89abcdef);
+}
+
+void testAddLoadTwice()
+{
+ auto test = [&] (unsigned optLevel) {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int32_t value = 42;
+ Value* load = root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &value));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Add, Origin(), load, load));
+
+ auto code = compile(proc, optLevel);
+ CHECK(invoke<int32_t>(*code) == 42 * 2);
+ };
+
+ test(0);
+ test(1);
+}
+
+void testAddArgDouble(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Add, Origin(), value, value));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), a + a));
+}
+
+void testAddArgsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* valueB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Add, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a, b), a + b));
+}
+
+void testAddArgImmDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* valueB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Add, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), a + b));
+}
+
+void testAddImmArgDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* valueB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Add, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, b), a + b));
+}
+
+void testAddImmsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* valueB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Add, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), a + b));
+}
+
+void testAddArgFloat(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), floatValue, floatValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a + a)));
+}
+
+void testAddArgsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), floatValue1, floatValue2);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a + b)));
+}
+
+void testAddFPRArgsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0));
+ Value* argument2 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1));
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), argument1, argument2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, a, b), a + b));
+}
+
+void testAddArgImmFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* constValue = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), floatValue, constValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a + b)));
+}
+
+void testAddImmArgFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* constValue = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), constValue, floatValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a + b)));
+}
+
+void testAddImmsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* constValue1 = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* constValue2 = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), constValue1, constValue2);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc), bitwise_cast<int32_t>(a + b)));
+}
+
+void testAddArgFloatWithUselessDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentInt32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argumentInt32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), asDouble, asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a + a)));
+}
+
+void testAddArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* asDouble1 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue1);
+ Value* asDouble2 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue2);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), asDouble1, asDouble2);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a + b)));
+}
+
+void testAddArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* asDouble1 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue1);
+ Value* asDouble2 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue2);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), asDouble1, asDouble2);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* doubleAddress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, doubleAddress);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ double effect = 0;
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b), &effect), bitwise_cast<int32_t>(a + b)));
+ CHECK(isIdentical(effect, static_cast<double>(a) + static_cast<double>(b)));
+}
+
+void testMulArg(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<Value>(
+ proc, Trunc, Origin(), root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mul, Origin(), value, value));
+
+ CHECK(compileAndRun<int>(proc, a) == a * a);
+}
+
+void testMulArgStore(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ int mulSlot;
+ int valueSlot;
+
+ Value* value = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* mul = root->appendNew<Value>(proc, Mul, Origin(), value, value);
+
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(), value,
+ root->appendNew<ConstPtrValue>(proc, Origin(), &valueSlot));
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(), mul,
+ root->appendNew<ConstPtrValue>(proc, Origin(), &mulSlot));
+
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, a));
+ CHECK(mulSlot == a * a);
+ CHECK(valueSlot == a);
+}
+
+void testMulAddArg(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(proc, Mul, Origin(), value, value),
+ value));
+
+ CHECK(compileAndRun<int>(proc, a) == a * a + a);
+}
+
+void testMulArgs(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Mul, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ CHECK(compileAndRun<int>(proc, a, b) == a * b);
+}
+
+void testMulArgImm(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Mul, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == a * b);
+}
+
+void testMulImmArg(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Mul, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+
+ CHECK(compileAndRun<int>(proc, b) == a * b);
+}
+
+void testMulArgs32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Mul, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int>(proc, a, b) == a * b);
+}
+
+void testMulLoadTwice()
+{
+ auto test = [&] (unsigned optLevel) {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int32_t value = 42;
+ Value* load = root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &value));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mul, Origin(), load, load));
+
+ auto code = compile(proc, optLevel);
+ CHECK(invoke<int32_t>(*code) == 42 * 42);
+ };
+
+ test(0);
+ test(1);
+}
+
+void testMulAddArgsLeft()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ Value* multiplied = root->appendNew<Value>(proc, Mul, Origin(), arg0, arg1);
+ Value* added = root->appendNew<Value>(proc, Add, Origin(), multiplied, arg2);
+ root->appendNewControlValue(proc, Return, Origin(), added);
+
+ auto code = compile(proc);
+
+ auto testValues = int64Operands();
+ for (auto a : testValues) {
+ for (auto b : testValues) {
+ for (auto c : testValues) {
+ CHECK(invoke<int64_t>(*code, a.value, b.value, c.value) == a.value * b.value + c.value);
+ }
+ }
+ }
+}
+
+void testMulAddArgsRight()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ Value* multiplied = root->appendNew<Value>(proc, Mul, Origin(), arg1, arg2);
+ Value* added = root->appendNew<Value>(proc, Add, Origin(), arg0, multiplied);
+ root->appendNewControlValue(proc, Return, Origin(), added);
+
+ auto code = compile(proc);
+
+ auto testValues = int64Operands();
+ for (auto a : testValues) {
+ for (auto b : testValues) {
+ for (auto c : testValues) {
+ CHECK(invoke<int64_t>(*code, a.value, b.value, c.value) == a.value + b.value * c.value);
+ }
+ }
+ }
+}
+
+void testMulAddArgsLeft32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* arg2 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2));
+ Value* multiplied = root->appendNew<Value>(proc, Mul, Origin(), arg0, arg1);
+ Value* added = root->appendNew<Value>(proc, Add, Origin(), multiplied, arg2);
+ root->appendNewControlValue(proc, Return, Origin(), added);
+
+ auto code = compile(proc);
+
+ auto testValues = int32Operands();
+ for (auto a : testValues) {
+ for (auto b : testValues) {
+ for (auto c : testValues) {
+ CHECK(invoke<int32_t>(*code, a.value, b.value, c.value) == a.value * b.value + c.value);
+ }
+ }
+ }
+}
+
+void testMulAddArgsRight32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* arg2 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2));
+ Value* multiplied = root->appendNew<Value>(proc, Mul, Origin(), arg1, arg2);
+ Value* added = root->appendNew<Value>(proc, Add, Origin(), arg0, multiplied);
+ root->appendNewControlValue(proc, Return, Origin(), added);
+
+ auto code = compile(proc);
+
+ auto testValues = int32Operands();
+ for (auto a : testValues) {
+ for (auto b : testValues) {
+ for (auto c : testValues) {
+ CHECK(invoke<int32_t>(*code, a.value, b.value, c.value) == a.value + b.value * c.value);
+ }
+ }
+ }
+}
+
+void testMulSubArgsLeft()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ Value* multiplied = root->appendNew<Value>(proc, Mul, Origin(), arg0, arg1);
+ Value* added = root->appendNew<Value>(proc, Sub, Origin(), multiplied, arg2);
+ root->appendNewControlValue(proc, Return, Origin(), added);
+
+ auto code = compile(proc);
+
+ auto testValues = int64Operands();
+ for (auto a : testValues) {
+ for (auto b : testValues) {
+ for (auto c : testValues) {
+ CHECK(invoke<int64_t>(*code, a.value, b.value, c.value) == a.value * b.value - c.value);
+ }
+ }
+ }
+}
+
+void testMulSubArgsRight()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ Value* multiplied = root->appendNew<Value>(proc, Mul, Origin(), arg1, arg2);
+ Value* added = root->appendNew<Value>(proc, Sub, Origin(), arg0, multiplied);
+ root->appendNewControlValue(proc, Return, Origin(), added);
+
+ auto code = compile(proc);
+
+ auto testValues = int64Operands();
+ for (auto a : testValues) {
+ for (auto b : testValues) {
+ for (auto c : testValues) {
+ CHECK(invoke<int64_t>(*code, a.value, b.value, c.value) == a.value - b.value * c.value);
+ }
+ }
+ }
+}
+
+void testMulSubArgsLeft32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* arg2 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2));
+ Value* multiplied = root->appendNew<Value>(proc, Mul, Origin(), arg0, arg1);
+ Value* added = root->appendNew<Value>(proc, Sub, Origin(), multiplied, arg2);
+ root->appendNewControlValue(proc, Return, Origin(), added);
+
+ auto code = compile(proc);
+
+ auto testValues = int32Operands();
+ for (auto a : testValues) {
+ for (auto b : testValues) {
+ for (auto c : testValues) {
+ CHECK(invoke<int32_t>(*code, a.value, b.value, c.value) == a.value * b.value - c.value);
+ }
+ }
+ }
+}
+
+void testMulSubArgsRight32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* arg2 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2));
+ Value* multiplied = root->appendNew<Value>(proc, Mul, Origin(), arg1, arg2);
+ Value* added = root->appendNew<Value>(proc, Sub, Origin(), arg0, multiplied);
+ root->appendNewControlValue(proc, Return, Origin(), added);
+
+ auto code = compile(proc);
+
+ auto testValues = int32Operands();
+ for (auto a : testValues) {
+ for (auto b : testValues) {
+ for (auto c : testValues) {
+ CHECK(invoke<int32_t>(*code, a.value, b.value, c.value) == a.value - b.value * c.value);
+ }
+ }
+ }
+}
+
+void testMulNegArgs()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* multiplied = root->appendNew<Value>(proc, Mul, Origin(), arg0, arg1);
+ Value* zero = root->appendNew<Const64Value>(proc, Origin(), 0);
+ Value* added = root->appendNew<Value>(proc, Sub, Origin(), zero, multiplied);
+ root->appendNewControlValue(proc, Return, Origin(), added);
+
+ auto code = compile(proc);
+
+ auto testValues = int64Operands();
+ for (auto a : testValues) {
+ for (auto b : testValues) {
+ CHECK(invoke<int64_t>(*code, a.value, b.value) == -(a.value * b.value));
+ }
+ }
+}
+
+void testMulNegArgs32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* multiplied = root->appendNew<Value>(proc, Mul, Origin(), arg0, arg1);
+ Value* zero = root->appendNew<Const32Value>(proc, Origin(), 0);
+ Value* added = root->appendNew<Value>(proc, Sub, Origin(), zero, multiplied);
+ root->appendNewControlValue(proc, Return, Origin(), added);
+
+ auto code = compile(proc);
+
+ auto testValues = int32Operands();
+ for (auto a : testValues) {
+ for (auto b : testValues) {
+ CHECK(invoke<int32_t>(*code, a.value, b.value) == -(a.value * b.value));
+ }
+ }
+}
+
+void testMulArgDouble(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mul, Origin(), value, value));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), a * a));
+}
+
+void testMulArgsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* valueB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mul, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a, b), a * b));
+}
+
+void testMulArgImmDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* valueB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mul, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), a * b));
+}
+
+void testMulImmArgDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* valueB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mul, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, b), a * b));
+}
+
+void testMulImmsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* valueB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mul, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), a * b));
+}
+
+void testMulArgFloat(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* result = root->appendNew<Value>(proc, Mul, Origin(), floatValue, floatValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a * a)));
+}
+
+void testMulArgsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* result = root->appendNew<Value>(proc, Mul, Origin(), floatValue1, floatValue2);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a * b)));
+}
+
+void testMulArgImmFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* constValue = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, Mul, Origin(), floatValue, constValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a * b)));
+}
+
+void testMulImmArgFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* constValue = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* result = root->appendNew<Value>(proc, Mul, Origin(), constValue, floatValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a * b)));
+}
+
+void testMulImmsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* constValue1 = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* constValue2 = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, Mul, Origin(), constValue1, constValue2);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc), bitwise_cast<int32_t>(a * b)));
+}
+
+void testMulArgFloatWithUselessDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentInt32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argumentInt32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Mul, Origin(), asDouble, asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a * a)));
+}
+
+void testMulArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* asDouble1 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue1);
+ Value* asDouble2 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue2);
+ Value* result = root->appendNew<Value>(proc, Mul, Origin(), asDouble1, asDouble2);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a * b)));
+}
+
+void testMulArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* asDouble1 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue1);
+ Value* asDouble2 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue2);
+ Value* result = root->appendNew<Value>(proc, Mul, Origin(), asDouble1, asDouble2);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* doubleMulress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, doubleMulress);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ double effect = 0;
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b), &effect), bitwise_cast<int32_t>(a * b)));
+ CHECK(isIdentical(effect, static_cast<double>(a) * static_cast<double>(b)));
+}
+
+void testDivArgDouble(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Div, Origin(), value, value));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), a / a));
+}
+
+void testDivArgsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* valueB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Div, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a, b), a / b));
+}
+
+void testDivArgImmDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* valueB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Div, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), a / b));
+}
+
+void testDivImmArgDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* valueB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Div, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, b), a / b));
+}
+
+void testDivImmsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* valueB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Div, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), a / b));
+}
+
+void testDivArgFloat(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* result = root->appendNew<Value>(proc, Div, Origin(), floatValue, floatValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a / a)));
+}
+
+void testDivArgsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* result = root->appendNew<Value>(proc, Div, Origin(), floatValue1, floatValue2);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a / b)));
+}
+
+void testDivArgImmFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* constValue = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, Div, Origin(), floatValue, constValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a / b)));
+}
+
+void testDivImmArgFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* constValue = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* result = root->appendNew<Value>(proc, Div, Origin(), constValue, floatValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a / b)));
+}
+
+void testDivImmsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* constValue1 = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* constValue2 = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, Div, Origin(), constValue1, constValue2);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc), bitwise_cast<int32_t>(a / b)));
+}
+
+void testModArgDouble(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mod, Origin(), value, value));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), fmod(a, a)));
+}
+
+void testModArgsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* valueB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mod, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a, b), fmod(a, b)));
+}
+
+void testModArgImmDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* valueB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mod, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), fmod(a, b)));
+}
+
+void testModImmArgDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* valueB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mod, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, b), fmod(a, b)));
+}
+
+void testModImmsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* valueB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Mod, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), fmod(a, b)));
+}
+
+void testModArgFloat(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), floatValue, floatValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(static_cast<float>(fmod(a, a)))));
+}
+
+void testModArgsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), floatValue1, floatValue2);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(static_cast<float>(fmod(a, b)))));
+}
+
+void testModArgImmFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* constValue = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), floatValue, constValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(static_cast<float>(fmod(a, b)))));
+}
+
+void testModImmArgFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* constValue = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), constValue, floatValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(static_cast<float>(fmod(a, b)))));
+}
+
+void testModImmsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* constValue1 = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* constValue2 = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), constValue1, constValue2);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc), bitwise_cast<int32_t>(static_cast<float>(fmod(a, b)))));
+}
+
+void testDivArgFloatWithUselessDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentInt32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argumentInt32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Div, Origin(), asDouble, asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a / a)));
+}
+
+void testDivArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* asDouble1 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue1);
+ Value* asDouble2 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue2);
+ Value* result = root->appendNew<Value>(proc, Div, Origin(), asDouble1, asDouble2);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a / b)));
+}
+
+void testDivArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* asDouble1 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue1);
+ Value* asDouble2 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue2);
+ Value* result = root->appendNew<Value>(proc, Div, Origin(), asDouble1, asDouble2);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* doubleDivress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, doubleDivress);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ double effect = 0;
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b), &effect), bitwise_cast<int32_t>(a / b)));
+ CHECK(isIdentical(effect, static_cast<double>(a) / static_cast<double>(b)));
+}
+
+void testUDivArgsInt32(uint32_t a, uint32_t b)
+{
+ // UDiv with denominator == 0 is invalid.
+ if (!b)
+ return;
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* result = root->appendNew<Value>(proc, UDiv, Origin(), argument1, argument2);
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ CHECK_EQ(compileAndRun<uint32_t>(proc, a, b), a / b);
+}
+
+void testUDivArgsInt64(uint64_t a, uint64_t b)
+{
+ // UDiv with denominator == 0 is invalid.
+ if (!b)
+ return;
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argument2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* result = root->appendNew<Value>(proc, UDiv, Origin(), argument1, argument2);
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ CHECK_EQ(compileAndRun<uint64_t>(proc, a, b), a / b);
+}
+
+void testUModArgsInt32(uint32_t a, uint32_t b)
+{
+ // UMod with denominator == 0 is invalid.
+ if (!b)
+ return;
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* result = root->appendNew<Value>(proc, UMod, Origin(), argument1, argument2);
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ CHECK_EQ(compileAndRun<uint32_t>(proc, a, b), a % b);
+}
+
+void testUModArgsInt64(uint64_t a, uint64_t b)
+{
+ // UMod with denominator == 0 is invalid.
+ if (!b)
+ return;
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argument2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* result = root->appendNew<Value>(proc, UMod, Origin(), argument1, argument2);
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ CHECK_EQ(compileAndRun<uint64_t>(proc, a, b), a % b);
+}
+
+void testSubArg(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Sub, Origin(), value, value));
+
+ CHECK(!compileAndRun<int>(proc, a));
+}
+
+void testSubArgs(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ CHECK(compileAndRun<int>(proc, a, b) == a - b);
+}
+
+void testSubArgImm(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == a - b);
+}
+
+void testNegValueSubOne(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* negArgument = root->appendNew<Value>(proc, Sub, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), 0),
+ argument);
+ Value* negArgumentMinusOne = root->appendNew<Value>(proc, Sub, Origin(),
+ negArgument,
+ root->appendNew<Const64Value>(proc, Origin(), 1));
+ root->appendNewControlValue(proc, Return, Origin(), negArgumentMinusOne);
+ CHECK(compileAndRun<int>(proc, a) == -a - 1);
+}
+
+void testSubImmArg(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+
+ CHECK(compileAndRun<int>(proc, b) == a - b);
+}
+
+void testSubArgMem(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ load);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int64_t>(proc, a, &b) == a - b);
+}
+
+void testSubMemArg(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(),
+ load,
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int64_t inputOutput = a;
+ CHECK(!compileAndRun<int64_t>(proc, &inputOutput, b));
+ CHECK(inputOutput == a - b);
+}
+
+void testSubImmMem(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ load);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int64_t inputOutput = b;
+ CHECK(!compileAndRun<int>(proc, &inputOutput));
+ CHECK(inputOutput == a - b);
+}
+
+void testSubMemImm(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(),
+ load,
+ root->appendNew<Const64Value>(proc, Origin(), b));
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int64_t inputOutput = a;
+ CHECK(!compileAndRun<int>(proc, &inputOutput));
+ CHECK(inputOutput == a - b);
+}
+
+
+void testSubArgs32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int>(proc, a, b) == a - b);
+}
+
+void testSubArgImm32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int>(proc, a) == a - b);
+}
+
+void testSubImmArg32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(compileAndRun<int>(proc, b) == a - b);
+}
+
+void testSubMemArg32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), load, argument);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int32_t inputOutput = a;
+ CHECK(!compileAndRun<int32_t>(proc, &inputOutput, b));
+ CHECK(inputOutput == a - b);
+}
+
+void testSubArgMem32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), argument, load);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int32_t>(proc, a, &b) == a - b);
+}
+
+void testSubImmMem32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ load);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int32_t inputOutput = b;
+ CHECK(!compileAndRun<int>(proc, &inputOutput));
+ CHECK(inputOutput == a - b);
+}
+
+void testSubMemImm32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(),
+ load,
+ root->appendNew<Const32Value>(proc, Origin(), b));
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int32_t inputOutput = a;
+ CHECK(!compileAndRun<int>(proc, &inputOutput));
+ CHECK(inputOutput == a - b);
+}
+
+void testNegValueSubOne32(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* negArgument = root->appendNew<Value>(proc, Sub, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0),
+ argument);
+ Value* negArgumentMinusOne = root->appendNew<Value>(proc, Sub, Origin(),
+ negArgument,
+ root->appendNew<Const32Value>(proc, Origin(), 1));
+ root->appendNewControlValue(proc, Return, Origin(), negArgumentMinusOne);
+ CHECK(compileAndRun<int>(proc, a) == -a - 1);
+}
+
+void testSubArgDouble(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Sub, Origin(), value, value));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), a - a));
+}
+
+void testSubArgsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* valueB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Sub, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a, b), a - b));
+}
+
+void testSubArgImmDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* valueB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Sub, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), a - b));
+}
+
+void testSubImmArgDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* valueB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Sub, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, b), a - b));
+}
+
+void testSubImmsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* valueA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* valueB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Sub, Origin(), valueA, valueB));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), a - b));
+}
+
+void testSubArgFloat(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), floatValue, floatValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a - a)));
+}
+
+void testSubArgsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), floatValue1, floatValue2);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a - b)));
+}
+
+void testSubArgImmFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* constValue = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), floatValue, constValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a - b)));
+}
+
+void testSubImmArgFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* constValue = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), constValue, floatValue);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a - b)));
+}
+
+void testSubImmsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* constValue1 = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* constValue2 = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), constValue1, constValue2);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc), bitwise_cast<int32_t>(a - b)));
+}
+
+void testSubArgFloatWithUselessDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentInt32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argumentInt32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), asDouble, asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(a - a)));
+}
+
+void testSubArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* asDouble1 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue1);
+ Value* asDouble2 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue2);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), asDouble1, asDouble2);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitwise_cast<int32_t>(a - b)));
+}
+
+void testSubArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* asDouble1 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue1);
+ Value* asDouble2 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue2);
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), asDouble1, asDouble2);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* doubleSubress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, doubleSubress);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ double effect = 0;
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b), &effect), bitwise_cast<int32_t>(a - b)));
+ CHECK(isIdentical(effect, static_cast<double>(a) - static_cast<double>(b)));
+}
+
+void testTernarySubInstructionSelection(B3::Opcode valueModifier, Type valueType, Air::Opcode expectedOpcode)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* left = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* right = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+
+ if (valueModifier == Trunc) {
+ left = root->appendNew<Value>(proc, valueModifier, valueType, Origin(), left);
+ right = root->appendNew<Value>(proc, valueModifier, valueType, Origin(), right);
+ }
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Sub, Origin(), left, right));
+
+ lowerToAirForTesting(proc);
+
+ auto block = proc.code()[0];
+ unsigned numberOfSubInstructions = 0;
+ for (auto instruction : *block) {
+ if (instruction.kind.opcode == expectedOpcode) {
+ CHECK_EQ(instruction.args.size(), 3ul);
+ CHECK_EQ(instruction.args[0].kind(), Air::Arg::Tmp);
+ CHECK_EQ(instruction.args[1].kind(), Air::Arg::Tmp);
+ CHECK_EQ(instruction.args[2].kind(), Air::Arg::Tmp);
+ numberOfSubInstructions++;
+ }
+ }
+ CHECK_EQ(numberOfSubInstructions, 1ul);
+}
+
+void testNegDouble(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Neg, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0)));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), -a));
+}
+
+void testNegFloat(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Neg, Origin(), floatValue));
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a)), -a));
+}
+
+void testNegFloatWithUselessDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentInt32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argumentInt32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Neg, Origin(), asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a)), -a));
+}
+
+void testBitAndArgs(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ CHECK(compileAndRun<int64_t>(proc, a, b) == (a & b));
+}
+
+void testBitAndSameArg(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ argument,
+ argument));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == a);
+}
+
+void testBitAndImms(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<Const64Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc) == (a & b));
+}
+
+void testBitAndArgImm(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == (a & b));
+}
+
+void testBitAndImmArg(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+
+ CHECK(compileAndRun<int64_t>(proc, b) == (a & b));
+}
+
+void testBitAndBitAndArgImmImm(int64_t a, int64_t b, int64_t c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitAnd = root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), b));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ innerBitAnd,
+ root->appendNew<Const64Value>(proc, Origin(), c)));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == ((a & b) & c));
+}
+
+void testBitAndImmBitAndArgImm(int64_t a, int64_t b, int64_t c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitAnd = root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), c));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ innerBitAnd));
+
+ CHECK(compileAndRun<int64_t>(proc, b) == (a & (b & c)));
+}
+
+void testBitAndArgs32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int>(proc, a, b) == (a & b));
+}
+
+void testBitAndSameArg32(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ argument,
+ argument));
+
+ CHECK(compileAndRun<int>(proc, a) == a);
+}
+
+void testBitAndImms32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int>(proc) == (a & b));
+}
+
+void testBitAndArgImm32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int>(proc, a) == (a & b));
+}
+
+void testBitAndImmArg32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(compileAndRun<int>(proc, b) == (a & b));
+}
+
+void testBitAndBitAndArgImmImm32(int a, int b, int c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitAnd = root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), b));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ innerBitAnd,
+ root->appendNew<Const32Value>(proc, Origin(), c)));
+
+ CHECK(compileAndRun<int>(proc, a) == ((a & b) & c));
+}
+
+void testBitAndImmBitAndArgImm32(int a, int b, int c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitAnd = root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), c));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ innerBitAnd));
+
+ CHECK(compileAndRun<int>(proc, b) == (a & (b & c)));
+}
+
+void testBitAndWithMaskReturnsBooleans(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* equal = root->appendNew<Value>(proc, Equal, Origin(), arg0, arg1);
+ Value* maskedEqual = root->appendNew<Value>(proc, BitAnd, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0x5),
+ equal);
+ Value* inverted = root->appendNew<Value>(proc, BitXor, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0x1),
+ maskedEqual);
+ Value* select = root->appendNew<Value>(proc, Select, Origin(), inverted,
+ root->appendNew<Const64Value>(proc, Origin(), 42),
+ root->appendNew<Const64Value>(proc, Origin(), -5));
+
+ root->appendNewControlValue(proc, Return, Origin(), select);
+
+ int64_t expected = (a == b) ? -5 : 42;
+ CHECK(compileAndRun<int64_t>(proc, a, b) == expected);
+}
+
+double bitAndDouble(double a, double b)
+{
+ return bitwise_cast<double>(bitwise_cast<uint64_t>(a) & bitwise_cast<uint64_t>(b));
+}
+
+void testBitAndArgDouble(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* result = root->appendNew<Value>(proc, BitAnd, Origin(), argument, argument);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), bitAndDouble(a, a)));
+}
+
+void testBitAndArgsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* argumentB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* result = root->appendNew<Value>(proc, BitAnd, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a, b), bitAndDouble(a, b)));
+}
+
+void testBitAndArgImmDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* argumentB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, BitAnd, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a, b), bitAndDouble(a, b)));
+}
+
+void testBitAndImmsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* argumentB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, BitAnd, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<double>(proc), bitAndDouble(a, b)));
+}
+
+float bitAndFloat(float a, float b)
+{
+ return bitwise_cast<float>(bitwise_cast<uint32_t>(a) & bitwise_cast<uint32_t>(b));
+}
+
+void testBitAndArgFloat(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* result = root->appendNew<Value>(proc, BitAnd, Origin(), argument, argument);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a)), bitAndFloat(a, a)));
+}
+
+void testBitAndArgsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* argumentB = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* result = root->appendNew<Value>(proc, BitAnd, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitAndFloat(a, b)));
+}
+
+void testBitAndArgImmFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* argumentB = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, BitAnd, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitAndFloat(a, b)));
+}
+
+void testBitAndImmsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* argumentB = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, BitAnd, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<float>(proc), bitAndFloat(a, b)));
+}
+
+void testBitAndArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* argumentB = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* argumentAasDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), argumentA);
+ Value* argumentBasDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), argumentB);
+ Value* doubleResult = root->appendNew<Value>(proc, BitAnd, Origin(), argumentAasDouble, argumentBasDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), doubleResult);
+ root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+ double doubleA = a;
+ double doubleB = b;
+ float expected = static_cast<float>(bitAndDouble(doubleA, doubleB));
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), expected));
+}
+
+void testBitOrArgs(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ CHECK(compileAndRun<int64_t>(proc, a, b) == (a | b));
+}
+
+void testBitOrSameArg(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ argument,
+ argument));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == a);
+}
+
+void testBitOrImms(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<Const64Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc) == (a | b));
+}
+
+void testBitOrArgImm(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == (a | b));
+}
+
+void testBitOrImmArg(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+
+ CHECK(compileAndRun<int64_t>(proc, b) == (a | b));
+}
+
+void testBitOrBitOrArgImmImm(int64_t a, int64_t b, int64_t c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitOr = root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), b));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ innerBitOr,
+ root->appendNew<Const64Value>(proc, Origin(), c)));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == ((a | b) | c));
+}
+
+void testBitOrImmBitOrArgImm(int64_t a, int64_t b, int64_t c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitOr = root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), c));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ innerBitOr));
+
+ CHECK(compileAndRun<int64_t>(proc, b) == (a | (b | c)));
+}
+
+void testBitOrArgs32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int>(proc, a, b) == (a | b));
+}
+
+void testBitOrSameArg32(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ argument,
+ argument));
+
+ CHECK(compileAndRun<int>(proc, a) == a);
+}
+
+void testBitOrImms32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int>(proc) == (a | b));
+}
+
+void testBitOrArgImm32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int>(proc, a) == (a | b));
+}
+
+void testBitOrImmArg32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(compileAndRun<int>(proc, b) == (a | b));
+}
+
+void testBitOrBitOrArgImmImm32(int a, int b, int c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitOr = root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), b));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ innerBitOr,
+ root->appendNew<Const32Value>(proc, Origin(), c)));
+
+ CHECK(compileAndRun<int>(proc, a) == ((a | b) | c));
+}
+
+void testBitOrImmBitOrArgImm32(int a, int b, int c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitOr = root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), c));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ innerBitOr));
+
+ CHECK(compileAndRun<int>(proc, b) == (a | (b | c)));
+}
+
+double bitOrDouble(double a, double b)
+{
+ return bitwise_cast<double>(bitwise_cast<uint64_t>(a) | bitwise_cast<uint64_t>(b));
+}
+
+void testBitOrArgDouble(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* result = root->appendNew<Value>(proc, BitOr, Origin(), argument, argument);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), bitOrDouble(a, a)));
+}
+
+void testBitOrArgsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* argumentB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* result = root->appendNew<Value>(proc, BitOr, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a, b), bitOrDouble(a, b)));
+}
+
+void testBitOrArgImmDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* argumentB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, BitOr, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a, b), bitOrDouble(a, b)));
+}
+
+void testBitOrImmsDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* argumentB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, BitOr, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<double>(proc), bitOrDouble(a, b)));
+}
+
+float bitOrFloat(float a, float b)
+{
+ return bitwise_cast<float>(bitwise_cast<uint32_t>(a) | bitwise_cast<uint32_t>(b));
+}
+
+void testBitOrArgFloat(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* result = root->appendNew<Value>(proc, BitOr, Origin(), argument, argument);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a)), bitOrFloat(a, a)));
+}
+
+void testBitOrArgsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* argumentB = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* result = root->appendNew<Value>(proc, BitOr, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitOrFloat(a, b)));
+}
+
+void testBitOrArgImmFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* argumentB = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, BitOr, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), bitOrFloat(a, b)));
+}
+
+void testBitOrImmsFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* argumentB = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+ Value* result = root->appendNew<Value>(proc, BitOr, Origin(), argumentA, argumentB);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(isIdentical(compileAndRun<float>(proc), bitOrFloat(a, b)));
+}
+
+void testBitOrArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentA = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* argumentB = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* argumentAasDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), argumentA);
+ Value* argumentBasDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), argumentB);
+ Value* doubleResult = root->appendNew<Value>(proc, BitOr, Origin(), argumentAasDouble, argumentBasDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), doubleResult);
+ root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+ double doubleA = a;
+ double doubleB = b;
+ float expected = static_cast<float>(bitOrDouble(doubleA, doubleB));
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), expected));
+}
+
+void testBitXorArgs(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ CHECK(compileAndRun<int64_t>(proc, a, b) == (a ^ b));
+}
+
+void testBitXorSameArg(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ argument,
+ argument));
+
+ CHECK(!compileAndRun<int64_t>(proc, a));
+}
+
+void testBitXorImms(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<Const64Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc) == (a ^ b));
+}
+
+void testBitXorArgImm(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == (a ^ b));
+}
+
+void testBitXorImmArg(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+
+ CHECK(compileAndRun<int64_t>(proc, b) == (a ^ b));
+}
+
+void testBitXorBitXorArgImmImm(int64_t a, int64_t b, int64_t c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitXor = root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), b));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ innerBitXor,
+ root->appendNew<Const64Value>(proc, Origin(), c)));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == ((a ^ b) ^ c));
+}
+
+void testBitXorImmBitXorArgImm(int64_t a, int64_t b, int64_t c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitXor = root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), c));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ innerBitXor));
+
+ CHECK(compileAndRun<int64_t>(proc, b) == (a ^ (b ^ c)));
+}
+
+void testBitXorArgs32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int>(proc, a, b) == (a ^ b));
+}
+
+void testBitXorSameArg32(int a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ argument,
+ argument));
+
+ CHECK(!compileAndRun<int>(proc, a));
+}
+
+void testBitXorImms32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int>(proc) == (a ^ b));
+}
+
+void testBitXorArgImm32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int>(proc, a) == (a ^ b));
+}
+
+void testBitXorImmArg32(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(compileAndRun<int>(proc, b) == (a ^ b));
+}
+
+void testBitXorBitXorArgImmImm32(int a, int b, int c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitXor = root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), b));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ innerBitXor,
+ root->appendNew<Const32Value>(proc, Origin(), c)));
+
+ CHECK(compileAndRun<int>(proc, a) == ((a ^ b) ^ c));
+}
+
+void testBitXorImmBitXorArgImm32(int a, int b, int c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* innerBitXor = root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), c));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ innerBitXor));
+
+ CHECK(compileAndRun<int>(proc, b) == (a ^ (b ^ c)));
+}
+
+void testBitNotArg(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), -1),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+
+ CHECK(isIdentical(compileAndRun<int64_t>(proc, a), static_cast<int64_t>((static_cast<uint64_t>(a) ^ 0xffffffffffffffff))));
+}
+
+void testBitNotImm(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), -1),
+ root->appendNew<Const64Value>(proc, Origin(), a)));
+
+ CHECK(isIdentical(compileAndRun<int64_t>(proc, a), static_cast<int64_t>((static_cast<uint64_t>(a) ^ 0xffffffffffffffff))));
+}
+
+void testBitNotMem(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* notLoad = root->appendNew<Value>(proc, BitXor, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), -1),
+ load);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), notLoad, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int64_t input = a;
+ compileAndRun<int32_t>(proc, &input);
+ CHECK(isIdentical(input, static_cast<int64_t>((static_cast<uint64_t>(a) ^ 0xffffffffffffffff))));
+}
+
+void testBitNotArg32(int32_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, BitXor, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), -1),
+ argument));
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, a), static_cast<int32_t>((static_cast<uint32_t>(a) ^ 0xffffffff))));
+}
+
+void testBitNotImm32(int32_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitXor, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), -1),
+ root->appendNew<Const32Value>(proc, Origin(), a)));
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, a), static_cast<int32_t>((static_cast<uint32_t>(a) ^ 0xffffffff))));
+}
+
+void testBitNotMem32(int32_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* notLoad = root->appendNew<Value>(proc, BitXor, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), -1),
+ load);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), notLoad, address);
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ int32_t input = a;
+ compileAndRun<int32_t>(proc, &input);
+ CHECK(isIdentical(input, static_cast<int32_t>((static_cast<uint32_t>(a) ^ 0xffffffff))));
+}
+
+void testBitNotOnBooleanAndBranch32(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* arg1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* argsAreEqual = root->appendNew<Value>(proc, Equal, Origin(), arg1, arg2);
+ Value* argsAreNotEqual = root->appendNew<Value>(proc, BitXor, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), -1),
+ argsAreEqual);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ argsAreNotEqual,
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 42));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), -42));
+
+ int32_t expectedValue = (a != b) ? 42 : -42;
+ CHECK(compileAndRun<int32_t>(proc, a, b) == expectedValue);
+}
+
+void testShlArgs(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int64_t>(proc, a, b) == (a << b));
+}
+
+void testShlImms(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc) == (a << b));
+}
+
+void testShlArgImm(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == (a << b));
+}
+
+void testShlArg32(int32_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Shl, Origin(), value, value));
+
+ CHECK(compileAndRun<int32_t>(proc, a) == (a << a));
+}
+
+void testShlArgs32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int32_t>(proc, a, b) == (a << b));
+}
+
+void testShlImms32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int32_t>(proc) == (a << b));
+}
+
+void testShlArgImm32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int32_t>(proc, a) == (a << b));
+}
+
+void testSShrArgs(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SShr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int64_t>(proc, a, b) == (a >> b));
+}
+
+void testSShrImms(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SShr, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc) == (a >> b));
+}
+
+void testSShrArgImm(int64_t a, int64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SShr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int64_t>(proc, a) == (a >> b));
+}
+
+void testSShrArg32(int32_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, SShr, Origin(), value, value));
+
+ CHECK(compileAndRun<int32_t>(proc, a) == (a >> (a & 31)));
+}
+
+void testSShrArgs32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SShr, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int32_t>(proc, a, b) == (a >> b));
+}
+
+void testSShrImms32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SShr, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int32_t>(proc) == (a >> b));
+}
+
+void testSShrArgImm32(int32_t a, int32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SShr, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<int32_t>(proc, a) == (a >> b));
+}
+
+void testZShrArgs(uint64_t a, uint64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, ZShr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<uint64_t>(proc, a, b) == (a >> b));
+}
+
+void testZShrImms(uint64_t a, uint64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, ZShr, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), a),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<uint64_t>(proc) == (a >> b));
+}
+
+void testZShrArgImm(uint64_t a, uint64_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, ZShr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<uint64_t>(proc, a) == (a >> b));
+}
+
+void testZShrArg32(uint32_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, ZShr, Origin(), value, value));
+
+ CHECK(compileAndRun<uint32_t>(proc, a) == (a >> (a & 31)));
+}
+
+void testZShrArgs32(uint32_t a, uint32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, ZShr, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<uint32_t>(proc, a, b) == (a >> b));
+}
+
+void testZShrImms32(uint32_t a, uint32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, ZShr, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), a),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<uint32_t>(proc) == (a >> b));
+}
+
+void testZShrArgImm32(uint32_t a, uint32_t b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, ZShr, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), b)));
+
+ CHECK(compileAndRun<uint32_t>(proc, a) == (a >> b));
+}
+
+template<typename IntegerType>
+static unsigned countLeadingZero(IntegerType value)
+{
+ unsigned bitCount = sizeof(IntegerType) * 8;
+ if (!value)
+ return bitCount;
+
+ unsigned counter = 0;
+ while (!(static_cast<uint64_t>(value) & (1l << (bitCount - 1)))) {
+ value <<= 1;
+ ++counter;
+ }
+ return counter;
+}
+
+void testClzArg64(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* clzValue = root->appendNew<Value>(proc, Clz, Origin(), argument);
+ root->appendNewControlValue(proc, Return, Origin(), clzValue);
+ CHECK(compileAndRun<unsigned>(proc, a) == countLeadingZero(a));
+}
+
+void testClzMem64(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* value = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* clzValue = root->appendNew<Value>(proc, Clz, Origin(), value);
+ root->appendNewControlValue(proc, Return, Origin(), clzValue);
+ CHECK(compileAndRun<unsigned>(proc, &a) == countLeadingZero(a));
+}
+
+void testClzArg32(int32_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* clzValue = root->appendNew<Value>(proc, Clz, Origin(), argument);
+ root->appendNewControlValue(proc, Return, Origin(), clzValue);
+ CHECK(compileAndRun<unsigned>(proc, a) == countLeadingZero(a));
+}
+
+void testClzMem32(int32_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* value = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* clzValue = root->appendNew<Value>(proc, Clz, Origin(), value);
+ root->appendNewControlValue(proc, Return, Origin(), clzValue);
+ CHECK(compileAndRun<unsigned>(proc, &a) == countLeadingZero(a));
+}
+
+void testAbsArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Abs, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0)));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), fabs(a)));
+}
+
+void testAbsImm(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Abs, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), fabs(a)));
+}
+
+void testAbsMem(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadDouble = root->appendNew<MemoryValue>(proc, Load, Double, Origin(), address);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Abs, Origin(), loadDouble));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, &a), fabs(a)));
+}
+
+void testAbsAbsArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* firstAbs = root->appendNew<Value>(proc, Abs, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0));
+ Value* secondAbs = root->appendNew<Value>(proc, Abs, Origin(), firstAbs);
+ root->appendNewControlValue(proc, Return, Origin(), secondAbs);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), fabs(a)));
+}
+
+void testAbsBitwiseCastArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentAsInt64 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argumentAsDouble = root->appendNew<Value>(proc, BitwiseCast, Origin(), argumentAsInt64);
+ Value* absValue = root->appendNew<Value>(proc, Abs, Origin(), argumentAsDouble);
+ root->appendNewControlValue(proc, Return, Origin(), absValue);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, bitwise_cast<int64_t>(a)), fabs(a)));
+}
+
+void testBitwiseCastAbsBitwiseCastArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentAsInt64 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argumentAsDouble = root->appendNew<Value>(proc, BitwiseCast, Origin(), argumentAsInt64);
+ Value* absValue = root->appendNew<Value>(proc, Abs, Origin(), argumentAsDouble);
+ Value* resultAsInt64 = root->appendNew<Value>(proc, BitwiseCast, Origin(), absValue);
+
+ root->appendNewControlValue(proc, Return, Origin(), resultAsInt64);
+
+ int64_t expectedResult = bitwise_cast<int64_t>(fabs(a));
+ CHECK(isIdentical(compileAndRun<int64_t>(proc, bitwise_cast<int64_t>(a)), expectedResult));
+}
+
+void testAbsArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* result = root->appendNew<Value>(proc, Abs, Origin(), argument);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(static_cast<float>(fabs(a)))));
+}
+
+void testAbsImm(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* result = root->appendNew<Value>(proc, Abs, Origin(), argument);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(static_cast<float>(fabs(a)))));
+}
+
+void testAbsMem(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadFloat = root->appendNew<MemoryValue>(proc, Load, Float, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Abs, Origin(), loadFloat);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, &a), bitwise_cast<int32_t>(static_cast<float>(fabs(a)))));
+}
+
+void testAbsAbsArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* firstAbs = root->appendNew<Value>(proc, Abs, Origin(), argument);
+ Value* secondAbs = root->appendNew<Value>(proc, Abs, Origin(), firstAbs);
+ root->appendNewControlValue(proc, Return, Origin(), secondAbs);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a)), static_cast<float>(fabs(a))));
+}
+
+void testAbsBitwiseCastArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentAsInt32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argumentAsfloat = root->appendNew<Value>(proc, BitwiseCast, Origin(), argumentAsInt32);
+ Value* absValue = root->appendNew<Value>(proc, Abs, Origin(), argumentAsfloat);
+ root->appendNewControlValue(proc, Return, Origin(), absValue);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a)), static_cast<float>(fabs(a))));
+}
+
+void testBitwiseCastAbsBitwiseCastArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argumentAsInt32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argumentAsfloat = root->appendNew<Value>(proc, BitwiseCast, Origin(), argumentAsInt32);
+ Value* absValue = root->appendNew<Value>(proc, Abs, Origin(), argumentAsfloat);
+ Value* resultAsInt64 = root->appendNew<Value>(proc, BitwiseCast, Origin(), absValue);
+
+ root->appendNewControlValue(proc, Return, Origin(), resultAsInt64);
+
+ int32_t expectedResult = bitwise_cast<int32_t>(static_cast<float>(fabs(a)));
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), expectedResult));
+}
+
+void testAbsArgWithUselessDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Abs, Origin(), asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(static_cast<float>(fabs(a)))));
+}
+
+void testAbsArgWithEffectfulDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Abs, Origin(), asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ Value* doubleAddress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, doubleAddress);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ double effect = 0;
+ int32_t resultValue = compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), &effect);
+ CHECK(isIdentical(resultValue, bitwise_cast<int32_t>(static_cast<float>(fabs(a)))));
+ CHECK(isIdentical(effect, static_cast<double>(fabs(a))));
+}
+
+void testCeilArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Ceil, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0)));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), ceil(a)));
+}
+
+void testCeilImm(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Ceil, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), ceil(a)));
+}
+
+void testCeilMem(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadDouble = root->appendNew<MemoryValue>(proc, Load, Double, Origin(), address);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Ceil, Origin(), loadDouble));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, &a), ceil(a)));
+}
+
+void testCeilCeilArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* firstCeil = root->appendNew<Value>(proc, Ceil, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0));
+ Value* secondCeil = root->appendNew<Value>(proc, Ceil, Origin(), firstCeil);
+ root->appendNewControlValue(proc, Return, Origin(), secondCeil);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), ceil(a)));
+}
+
+void testFloorCeilArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* firstCeil = root->appendNew<Value>(proc, Ceil, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0));
+ Value* wrappingFloor = root->appendNew<Value>(proc, Floor, Origin(), firstCeil);
+ root->appendNewControlValue(proc, Return, Origin(), wrappingFloor);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), ceil(a)));
+}
+
+void testCeilIToD64(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argumentAsDouble = root->appendNew<Value>(proc, IToD, Origin(), argument);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Ceil, Origin(), argumentAsDouble));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), ceil(static_cast<double>(a))));
+}
+
+void testCeilIToD32(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argumentAsDouble = root->appendNew<Value>(proc, IToD, Origin(), argument);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Ceil, Origin(), argumentAsDouble));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), ceil(static_cast<double>(a))));
+}
+
+void testCeilArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* result = root->appendNew<Value>(proc, Ceil, Origin(), argument);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(ceilf(a))));
+}
+
+void testCeilImm(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* result = root->appendNew<Value>(proc, Ceil, Origin(), argument);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(ceilf(a))));
+}
+
+void testCeilMem(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadFloat = root->appendNew<MemoryValue>(proc, Load, Float, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Ceil, Origin(), loadFloat);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, &a), bitwise_cast<int32_t>(ceilf(a))));
+}
+
+void testCeilCeilArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* firstCeil = root->appendNew<Value>(proc, Ceil, Origin(), argument);
+ Value* secondCeil = root->appendNew<Value>(proc, Ceil, Origin(), firstCeil);
+ root->appendNewControlValue(proc, Return, Origin(), secondCeil);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a)), ceilf(a)));
+}
+
+void testFloorCeilArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* firstCeil = root->appendNew<Value>(proc, Ceil, Origin(), argument);
+ Value* wrappingFloor = root->appendNew<Value>(proc, Floor, Origin(), firstCeil);
+ root->appendNewControlValue(proc, Return, Origin(), wrappingFloor);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a)), ceilf(a)));
+}
+
+void testCeilArgWithUselessDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Ceil, Origin(), asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(ceilf(a))));
+}
+
+void testCeilArgWithEffectfulDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Ceil, Origin(), asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ Value* doubleAddress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, doubleAddress);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ double effect = 0;
+ int32_t resultValue = compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), &effect);
+ CHECK(isIdentical(resultValue, bitwise_cast<int32_t>(ceilf(a))));
+ CHECK(isIdentical(effect, static_cast<double>(ceilf(a))));
+}
+
+void testFloorArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Floor, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0)));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), floor(a)));
+}
+
+void testFloorImm(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Floor, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), floor(a)));
+}
+
+void testFloorMem(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadDouble = root->appendNew<MemoryValue>(proc, Load, Double, Origin(), address);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Floor, Origin(), loadDouble));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, &a), floor(a)));
+}
+
+void testFloorFloorArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* firstFloor = root->appendNew<Value>(proc, Floor, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0));
+ Value* secondFloor = root->appendNew<Value>(proc, Floor, Origin(), firstFloor);
+ root->appendNewControlValue(proc, Return, Origin(), secondFloor);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), floor(a)));
+}
+
+void testCeilFloorArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* firstFloor = root->appendNew<Value>(proc, Floor, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0));
+ Value* wrappingCeil = root->appendNew<Value>(proc, Ceil, Origin(), firstFloor);
+ root->appendNewControlValue(proc, Return, Origin(), wrappingCeil);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), floor(a)));
+}
+
+void testFloorIToD64(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argumentAsDouble = root->appendNew<Value>(proc, IToD, Origin(), argument);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Floor, Origin(), argumentAsDouble));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), floor(static_cast<double>(a))));
+}
+
+void testFloorIToD32(int64_t a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argumentAsDouble = root->appendNew<Value>(proc, IToD, Origin(), argument);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Floor, Origin(), argumentAsDouble));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), floor(static_cast<double>(a))));
+}
+
+void testFloorArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* result = root->appendNew<Value>(proc, Floor, Origin(), argument);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(floorf(a))));
+}
+
+void testFloorImm(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* result = root->appendNew<Value>(proc, Floor, Origin(), argument);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(floorf(a))));
+}
+
+void testFloorMem(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadFloat = root->appendNew<MemoryValue>(proc, Load, Float, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Floor, Origin(), loadFloat);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, &a), bitwise_cast<int32_t>(floorf(a))));
+}
+
+void testFloorFloorArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* firstFloor = root->appendNew<Value>(proc, Floor, Origin(), argument);
+ Value* secondFloor = root->appendNew<Value>(proc, Floor, Origin(), firstFloor);
+ root->appendNewControlValue(proc, Return, Origin(), secondFloor);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a)), floorf(a)));
+}
+
+void testCeilFloorArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* firstFloor = root->appendNew<Value>(proc, Floor, Origin(), argument);
+ Value* wrappingCeil = root->appendNew<Value>(proc, Ceil, Origin(), firstFloor);
+ root->appendNewControlValue(proc, Return, Origin(), wrappingCeil);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a)), floorf(a)));
+}
+
+void testFloorArgWithUselessDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Floor, Origin(), asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(floorf(a))));
+}
+
+void testFloorArgWithEffectfulDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Floor, Origin(), asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ Value* doubleAddress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, doubleAddress);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ double effect = 0;
+ int32_t resultValue = compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), &effect);
+ CHECK(isIdentical(resultValue, bitwise_cast<int32_t>(floorf(a))));
+ CHECK(isIdentical(effect, static_cast<double>(floorf(a))));
+}
+
+void testSqrtArg(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Sqrt, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0)));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, a), sqrt(a)));
+}
+
+void testSqrtImm(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Sqrt, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), sqrt(a)));
+}
+
+void testSqrtMem(double a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadDouble = root->appendNew<MemoryValue>(proc, Load, Double, Origin(), address);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Sqrt, Origin(), loadDouble));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, &a), sqrt(a)));
+}
+
+void testSqrtArg(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* result = root->appendNew<Value>(proc, Sqrt, Origin(), argument);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(static_cast<float>(sqrt(a)))));
+}
+
+void testSqrtImm(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* result = root->appendNew<Value>(proc, Sqrt, Origin(), argument);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(static_cast<float>(sqrt(a)))));
+}
+
+void testSqrtMem(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadFloat = root->appendNew<MemoryValue>(proc, Load, Float, Origin(), address);
+ Value* result = root->appendNew<Value>(proc, Sqrt, Origin(), loadFloat);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), result);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, &a), bitwise_cast<int32_t>(static_cast<float>(sqrt(a)))));
+}
+
+void testSqrtArgWithUselessDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Sqrt, Origin(), asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a)), bitwise_cast<int32_t>(static_cast<float>(sqrt(a)))));
+}
+
+void testSqrtArgWithEffectfulDoubleConversion(float a)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* result = root->appendNew<Value>(proc, Sqrt, Origin(), asDouble);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), result);
+ Value* result32 = root->appendNew<Value>(proc, BitwiseCast, Origin(), floatResult);
+ Value* doubleAddress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), result, doubleAddress);
+ root->appendNewControlValue(proc, Return, Origin(), result32);
+
+ double effect = 0;
+ int32_t resultValue = compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), &effect);
+ CHECK(isIdentical(resultValue, bitwise_cast<int32_t>(static_cast<float>(sqrt(a)))));
+ CHECK(isIdentical(effect, static_cast<double>(sqrt(a))));
+}
+
+void testCompareTwoFloatToDouble(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg1As32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg1Float = root->appendNew<Value>(proc, BitwiseCast, Origin(), arg1As32);
+ Value* arg1AsDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), arg1Float);
+
+ Value* arg2As32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* arg2Float = root->appendNew<Value>(proc, BitwiseCast, Origin(), arg2As32);
+ Value* arg2AsDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), arg2Float);
+ Value* equal = root->appendNew<Value>(proc, Equal, Origin(), arg1AsDouble, arg2AsDouble);
+
+ root->appendNewControlValue(proc, Return, Origin(), equal);
+
+ CHECK(compileAndRun<int64_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)) == (a == b));
+}
+
+void testCompareOneFloatToDouble(float a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg1As32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg1Float = root->appendNew<Value>(proc, BitwiseCast, Origin(), arg1As32);
+ Value* arg1AsDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), arg1Float);
+
+ Value* arg2AsDouble = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* equal = root->appendNew<Value>(proc, Equal, Origin(), arg1AsDouble, arg2AsDouble);
+
+ root->appendNewControlValue(proc, Return, Origin(), equal);
+
+ CHECK(compileAndRun<int64_t>(proc, bitwise_cast<int32_t>(a), b) == (a == b));
+}
+
+void testCompareFloatToDoubleThroughPhi(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+ BasicBlock* tail = proc.addBlock();
+
+ Value* condition = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+
+ Value* arg1As32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* arg1Float = root->appendNew<Value>(proc, BitwiseCast, Origin(), arg1As32);
+ Value* arg1AsDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), arg1Float);
+
+ Value* arg2AsDouble = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg2AsFloat = root->appendNew<Value>(proc, DoubleToFloat, Origin(), arg2AsDouble);
+ Value* arg2AsFRoundedDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), arg2AsFloat);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ condition,
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ UpsilonValue* thenValue = thenCase->appendNew<UpsilonValue>(proc, Origin(), arg1AsDouble);
+ thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* elseConst = elseCase->appendNew<ConstDoubleValue>(proc, Origin(), 0.);
+ UpsilonValue* elseValue = elseCase->appendNew<UpsilonValue>(proc, Origin(), elseConst);
+ elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* doubleInput = tail->appendNew<Value>(proc, Phi, Double, Origin());
+ thenValue->setPhi(doubleInput);
+ elseValue->setPhi(doubleInput);
+ Value* equal = tail->appendNew<Value>(proc, Equal, Origin(), doubleInput, arg2AsFRoundedDouble);
+ tail->appendNewControlValue(proc, Return, Origin(), equal);
+
+ auto code = compile(proc);
+ int32_t integerA = bitwise_cast<int32_t>(a);
+ double doubleB = b;
+ CHECK(invoke<int64_t>(*code, 1, integerA, doubleB) == (a == b));
+ CHECK(invoke<int64_t>(*code, 0, integerA, doubleB) == (b == 0));
+}
+
+void testDoubleToFloatThroughPhi(float value)
+{
+ // Simple case of:
+ // if (a) {
+ // x = DoubleAdd(a, b)
+ // else
+ // x = DoubleAdd(a, c)
+ // DoubleToFloat(x)
+ //
+ // Both Adds can be converted to float add.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+ BasicBlock* tail = proc.addBlock();
+
+ Value* condition = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* argAsDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ condition,
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ Value* postitiveConst = thenCase->appendNew<ConstDoubleValue>(proc, Origin(), 42.5f);
+ Value* thenAdd = thenCase->appendNew<Value>(proc, Add, Origin(), argAsDouble, postitiveConst);
+ UpsilonValue* thenValue = thenCase->appendNew<UpsilonValue>(proc, Origin(), thenAdd);
+ thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* elseConst = elseCase->appendNew<ConstDoubleValue>(proc, Origin(), M_PI);
+ UpsilonValue* elseValue = elseCase->appendNew<UpsilonValue>(proc, Origin(), elseConst);
+ elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* doubleInput = tail->appendNew<Value>(proc, Phi, Double, Origin());
+ thenValue->setPhi(doubleInput);
+ elseValue->setPhi(doubleInput);
+ Value* floatResult = tail->appendNew<Value>(proc, DoubleToFloat, Origin(), doubleInput);
+ tail->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+ auto code = compile(proc);
+ CHECK(isIdentical(invoke<float>(*code, 1, bitwise_cast<int32_t>(value)), value + 42.5f));
+ CHECK(isIdentical(invoke<float>(*code, 0, bitwise_cast<int32_t>(value)), static_cast<float>(M_PI)));
+}
+
+void testReduceFloatToDoubleValidates()
+{
+ // Simple case of:
+ // f = DoubleToFloat(Bitcast(argGPR0))
+ // if (a) {
+ // x = FloatConst()
+ // else
+ // x = FloatConst()
+ // p = Phi(x)
+ // a = Mul(p, p)
+ // b = Add(a, f)
+ // c = Add(p, b)
+ // Return(c)
+ //
+ // This should not crash in the validator after ReduceFloatToDouble.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+ BasicBlock* tail = proc.addBlock();
+
+ Value* condition = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* thingy = root->appendNew<Value>(proc, BitwiseCast, Origin(), condition);
+ thingy = root->appendNew<Value>(proc, DoubleToFloat, Origin(), thingy); // Make the phase think it has work to do.
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ condition,
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ UpsilonValue* thenValue = thenCase->appendNew<UpsilonValue>(proc, Origin(),
+ thenCase->appendNew<ConstFloatValue>(proc, Origin(), 11.5));
+ thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ UpsilonValue* elseValue = elseCase->appendNew<UpsilonValue>(proc, Origin(),
+ elseCase->appendNew<ConstFloatValue>(proc, Origin(), 10.5));
+ elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* phi = tail->appendNew<Value>(proc, Phi, Float, Origin());
+ thenValue->setPhi(phi);
+ elseValue->setPhi(phi);
+ Value* result = tail->appendNew<Value>(proc, Mul, Origin(),
+ phi, phi);
+ result = tail->appendNew<Value>(proc, Add, Origin(),
+ result,
+ thingy);
+ result = tail->appendNew<Value>(proc, Add, Origin(),
+ phi,
+ result);
+ tail->appendNewControlValue(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ CHECK(isIdentical(invoke<float>(*code, 1), 11.5f * 11.5f + static_cast<float>(bitwise_cast<double>(static_cast<uint64_t>(1))) + 11.5f));
+ CHECK(isIdentical(invoke<float>(*code, 0), 10.5f * 10.5f + static_cast<float>(bitwise_cast<double>(static_cast<uint64_t>(0))) + 10.5f));
+}
+
+void testDoubleProducerPhiToFloatConversion(float value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+ BasicBlock* tail = proc.addBlock();
+
+ Value* condition = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ condition,
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ Value* asDouble = thenCase->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ UpsilonValue* thenValue = thenCase->appendNew<UpsilonValue>(proc, Origin(), asDouble);
+ thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* constDouble = elseCase->appendNew<ConstDoubleValue>(proc, Origin(), 42.5);
+ UpsilonValue* elseValue = elseCase->appendNew<UpsilonValue>(proc, Origin(), constDouble);
+ elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* doubleInput = tail->appendNew<Value>(proc, Phi, Double, Origin());
+ thenValue->setPhi(doubleInput);
+ elseValue->setPhi(doubleInput);
+
+ Value* argAsDoubleAgain = tail->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* finalAdd = tail->appendNew<Value>(proc, Add, Origin(), doubleInput, argAsDoubleAgain);
+ Value* floatResult = tail->appendNew<Value>(proc, DoubleToFloat, Origin(), finalAdd);
+ tail->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+ auto code = compile(proc);
+ CHECK(isIdentical(invoke<float>(*code, 1, bitwise_cast<int32_t>(value)), value + value));
+ CHECK(isIdentical(invoke<float>(*code, 0, bitwise_cast<int32_t>(value)), 42.5f + value));
+}
+
+void testDoubleProducerPhiToFloatConversionWithDoubleConsumer(float value)
+{
+ // In this case, the Upsilon-Phi effectively contains a Float value, but it is used
+ // as a Float and as a Double.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+ BasicBlock* tail = proc.addBlock();
+
+ Value* condition = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ condition,
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ Value* asDouble = thenCase->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ UpsilonValue* thenValue = thenCase->appendNew<UpsilonValue>(proc, Origin(), asDouble);
+ thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* constDouble = elseCase->appendNew<ConstDoubleValue>(proc, Origin(), 42.5);
+ UpsilonValue* elseValue = elseCase->appendNew<UpsilonValue>(proc, Origin(), constDouble);
+ elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* doubleInput = tail->appendNew<Value>(proc, Phi, Double, Origin());
+ thenValue->setPhi(doubleInput);
+ elseValue->setPhi(doubleInput);
+
+ Value* argAsDoubleAgain = tail->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* floatAdd = tail->appendNew<Value>(proc, Add, Origin(), doubleInput, argAsDoubleAgain);
+
+ // FRound.
+ Value* floatResult = tail->appendNew<Value>(proc, DoubleToFloat, Origin(), floatAdd);
+ Value* doubleResult = tail->appendNew<Value>(proc, FloatToDouble, Origin(), floatResult);
+
+ // This one *cannot* be eliminated
+ Value* doubleAdd = tail->appendNew<Value>(proc, Add, Origin(), doubleInput, doubleResult);
+
+ tail->appendNewControlValue(proc, Return, Origin(), doubleAdd);
+
+ auto code = compile(proc);
+ CHECK(isIdentical(invoke<double>(*code, 1, bitwise_cast<int32_t>(value)), (value + value) + static_cast<double>(value)));
+ CHECK(isIdentical(invoke<double>(*code, 0, bitwise_cast<int32_t>(value)), static_cast<double>((42.5f + value) + 42.5f)));
+}
+
+void testDoubleProducerPhiWithNonFloatConst(float value, double constValue)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+ BasicBlock* tail = proc.addBlock();
+
+ Value* condition = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ condition,
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ Value* asDouble = thenCase->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ UpsilonValue* thenValue = thenCase->appendNew<UpsilonValue>(proc, Origin(), asDouble);
+ thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* constDouble = elseCase->appendNew<ConstDoubleValue>(proc, Origin(), constValue);
+ UpsilonValue* elseValue = elseCase->appendNew<UpsilonValue>(proc, Origin(), constDouble);
+ elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ Value* doubleInput = tail->appendNew<Value>(proc, Phi, Double, Origin());
+ thenValue->setPhi(doubleInput);
+ elseValue->setPhi(doubleInput);
+
+ Value* argAsDoubleAgain = tail->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ Value* finalAdd = tail->appendNew<Value>(proc, Add, Origin(), doubleInput, argAsDoubleAgain);
+ Value* floatResult = tail->appendNew<Value>(proc, DoubleToFloat, Origin(), finalAdd);
+ tail->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+ auto code = compile(proc);
+ CHECK(isIdentical(invoke<float>(*code, 1, bitwise_cast<int32_t>(value)), value + value));
+ CHECK(isIdentical(invoke<float>(*code, 0, bitwise_cast<int32_t>(value)), static_cast<float>(constValue + value)));
+}
+
+void testDoubleArgToInt64BitwiseCast(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitwiseCast, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<int64_t>(proc, value), bitwise_cast<int64_t>(value)));
+}
+
+void testDoubleImmToInt64BitwiseCast(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstDoubleValue>(proc, Origin(), value);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitwiseCast, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<int64_t>(proc), bitwise_cast<int64_t>(value)));
+}
+
+void testTwoBitwiseCastOnDouble(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* first = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument);
+ Value* second = root->appendNew<Value>(proc, BitwiseCast, Origin(), first);
+ root->appendNewControlValue(proc, Return, Origin(), second);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, value), value));
+}
+
+void testBitwiseCastOnDoubleInMemory(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadDouble = root->appendNew<MemoryValue>(proc, Load, Double, Origin(), address);
+ Value* cast = root->appendNew<Value>(proc, BitwiseCast, Origin(), loadDouble);
+ root->appendNewControlValue(proc, Return, Origin(), cast);
+
+ CHECK(isIdentical(compileAndRun<int64_t>(proc, &value), bitwise_cast<int64_t>(value)));
+}
+
+void testBitwiseCastOnDoubleInMemoryIndexed(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* offset = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* scaledOffset = root->appendNew<Value>(proc, Shl, Origin(),
+ offset,
+ root->appendNew<Const32Value>(proc, Origin(), 3));
+ Value* address = root->appendNew<Value>(proc, Add, Origin(), base, scaledOffset);
+ MemoryValue* loadDouble = root->appendNew<MemoryValue>(proc, Load, Double, Origin(), address);
+ Value* cast = root->appendNew<Value>(proc, BitwiseCast, Origin(), loadDouble);
+ root->appendNewControlValue(proc, Return, Origin(), cast);
+
+ CHECK(isIdentical(compileAndRun<int64_t>(proc, &value, 0), bitwise_cast<int64_t>(value)));
+}
+
+void testInt64BArgToDoubleBitwiseCast(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitwiseCast, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, value), bitwise_cast<double>(value)));
+}
+
+void testInt64BImmToDoubleBitwiseCast(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Const64Value>(proc, Origin(), value);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitwiseCast, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), bitwise_cast<double>(value)));
+}
+
+void testTwoBitwiseCastOnInt64(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* first = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument);
+ Value* second = root->appendNew<Value>(proc, BitwiseCast, Origin(), first);
+ root->appendNewControlValue(proc, Return, Origin(), second);
+
+ CHECK(isIdentical(compileAndRun<int64_t>(proc, value), value));
+}
+
+void testBitwiseCastOnInt64InMemory(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadDouble = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* cast = root->appendNew<Value>(proc, BitwiseCast, Origin(), loadDouble);
+ root->appendNewControlValue(proc, Return, Origin(), cast);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, &value), bitwise_cast<double>(value)));
+}
+
+void testBitwiseCastOnInt64InMemoryIndexed(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* offset = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* scaledOffset = root->appendNew<Value>(proc, Shl, Origin(),
+ offset,
+ root->appendNew<Const32Value>(proc, Origin(), 3));
+ Value* address = root->appendNew<Value>(proc, Add, Origin(), base, scaledOffset);
+ MemoryValue* loadDouble = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* cast = root->appendNew<Value>(proc, BitwiseCast, Origin(), loadDouble);
+ root->appendNewControlValue(proc, Return, Origin(), cast);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, &value, 0), bitwise_cast<double>(value)));
+}
+
+void testFloatImmToInt32BitwiseCast(float value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstFloatValue>(proc, Origin(), value);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitwiseCast, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc), bitwise_cast<int32_t>(value)));
+}
+
+void testBitwiseCastOnFloatInMemory(float value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadFloat = root->appendNew<MemoryValue>(proc, Load, Float, Origin(), address);
+ Value* cast = root->appendNew<Value>(proc, BitwiseCast, Origin(), loadFloat);
+ root->appendNewControlValue(proc, Return, Origin(), cast);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, &value), bitwise_cast<int32_t>(value)));
+}
+
+void testInt32BArgToFloatBitwiseCast(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitwiseCast, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<float>(proc, value), bitwise_cast<float>(value)));
+}
+
+void testInt32BImmToFloatBitwiseCast(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<Const64Value>(proc, Origin(), value);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitwiseCast, Origin(), argument));
+
+ CHECK(isIdentical(compileAndRun<float>(proc), bitwise_cast<float>(value)));
+}
+
+void testTwoBitwiseCastOnInt32(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* first = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument);
+ Value* second = root->appendNew<Value>(proc, BitwiseCast, Origin(), first);
+ root->appendNewControlValue(proc, Return, Origin(), second);
+
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, value), value));
+}
+
+void testBitwiseCastOnInt32InMemory(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadFloat = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* cast = root->appendNew<Value>(proc, BitwiseCast, Origin(), loadFloat);
+ root->appendNewControlValue(proc, Return, Origin(), cast);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, &value), bitwise_cast<float>(value)));
+}
+
+void testConvertDoubleToFloatArg(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* asFloat = root->appendNew<Value>(proc, DoubleToFloat, Origin(), argument);
+ root->appendNewControlValue(proc, Return, Origin(), asFloat);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, value), static_cast<float>(value)));
+}
+
+void testConvertDoubleToFloatImm(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstDoubleValue>(proc, Origin(), value);
+ Value* asFloat = root->appendNew<Value>(proc, DoubleToFloat, Origin(), argument);
+ root->appendNewControlValue(proc, Return, Origin(), asFloat);
+
+ CHECK(isIdentical(compileAndRun<float>(proc), static_cast<float>(value)));
+}
+
+void testConvertDoubleToFloatMem(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadedDouble = root->appendNew<MemoryValue>(proc, Load, Double, Origin(), address);
+ Value* asFloat = root->appendNew<Value>(proc, DoubleToFloat, Origin(), loadedDouble);
+ root->appendNewControlValue(proc, Return, Origin(), asFloat);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, &value), static_cast<float>(value)));
+}
+
+void testConvertFloatToDoubleArg(float value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* floatValue = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument32);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue);
+ root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, bitwise_cast<int32_t>(value)), static_cast<double>(value)));
+}
+
+void testConvertFloatToDoubleImm(float value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ConstFloatValue>(proc, Origin(), value);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), argument);
+ root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+ CHECK(isIdentical(compileAndRun<double>(proc), static_cast<double>(value)));
+}
+
+void testConvertFloatToDoubleMem(float value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadedFloat = root->appendNew<MemoryValue>(proc, Load, Float, Origin(), address);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), loadedFloat);
+ root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, &value), static_cast<double>(value)));
+}
+
+void testConvertDoubleToFloatToDoubleToFloat(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* asFloat = root->appendNew<Value>(proc, DoubleToFloat, Origin(), argument);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), asFloat);
+ Value* asFloatAgain = root->appendNew<Value>(proc, DoubleToFloat, Origin(), asDouble);
+ root->appendNewControlValue(proc, Return, Origin(), asFloatAgain);
+
+ CHECK(isIdentical(compileAndRun<float>(proc, value), static_cast<float>(value)));
+}
+
+void testLoadFloatConvertDoubleConvertFloatStoreFloat(float value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* dst = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ MemoryValue* loadedFloat = root->appendNew<MemoryValue>(proc, Load, Float, Origin(), src);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), loadedFloat);
+ Value* asFloatAgain = root->appendNew<Value>(proc, DoubleToFloat, Origin(), asDouble);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), asFloatAgain, dst);
+
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ float input = value;
+ float output = 0.;
+ CHECK(!compileAndRun<int64_t>(proc, &input, &output));
+ CHECK(isIdentical(input, output));
+}
+
+void testFroundArg(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* asFloat = root->appendNew<Value>(proc, DoubleToFloat, Origin(), argument);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), asFloat);
+ root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, value), static_cast<double>(static_cast<float>(value))));
+}
+
+void testFroundMem(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadedDouble = root->appendNew<MemoryValue>(proc, Load, Double, Origin(), address);
+ Value* asFloat = root->appendNew<Value>(proc, DoubleToFloat, Origin(), loadedDouble);
+ Value* asDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), asFloat);
+ root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, &value), static_cast<double>(static_cast<float>(value))));
+}
+
+void testIToD64Arg()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* srcAsDouble = root->appendNew<Value>(proc, IToD, Origin(), src);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+ auto code = compile(proc);
+ for (auto testValue : int64Operands())
+ CHECK(isIdentical(invoke<double>(*code, testValue.value), static_cast<double>(testValue.value)));
+}
+
+void testIToF64Arg()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* srcAsFloat = root->appendNew<Value>(proc, IToF, Origin(), src);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+ auto code = compile(proc);
+ for (auto testValue : int64Operands())
+ CHECK(isIdentical(invoke<float>(*code, testValue.value), static_cast<float>(testValue.value)));
+}
+
+void testIToD32Arg()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* srcAsDouble = root->appendNew<Value>(proc, IToD, Origin(), src);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+ auto code = compile(proc);
+ for (auto testValue : int32Operands())
+ CHECK(isIdentical(invoke<double>(*code, testValue.value), static_cast<double>(testValue.value)));
+}
+
+void testIToF32Arg()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* srcAsFloat = root->appendNew<Value>(proc, IToF, Origin(), src);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+ auto code = compile(proc);
+ for (auto testValue : int32Operands())
+ CHECK(isIdentical(invoke<float>(*code, testValue.value), static_cast<float>(testValue.value)));
+}
+
+void testIToD64Mem()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadedSrc = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* srcAsDouble = root->appendNew<Value>(proc, IToD, Origin(), loadedSrc);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+ auto code = compile(proc);
+ int64_t inMemoryValue;
+ for (auto testValue : int64Operands()) {
+ inMemoryValue = testValue.value;
+ CHECK(isIdentical(invoke<double>(*code, &inMemoryValue), static_cast<double>(testValue.value)));
+ CHECK(inMemoryValue == testValue.value);
+ }
+}
+
+void testIToF64Mem()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadedSrc = root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), address);
+ Value* srcAsFloat = root->appendNew<Value>(proc, IToF, Origin(), loadedSrc);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+ auto code = compile(proc);
+ int64_t inMemoryValue;
+ for (auto testValue : int64Operands()) {
+ inMemoryValue = testValue.value;
+ CHECK(isIdentical(invoke<float>(*code, &inMemoryValue), static_cast<float>(testValue.value)));
+ CHECK(inMemoryValue == testValue.value);
+ }
+}
+
+void testIToD32Mem()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadedSrc = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* srcAsDouble = root->appendNew<Value>(proc, IToD, Origin(), loadedSrc);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+ auto code = compile(proc);
+ int32_t inMemoryValue;
+ for (auto testValue : int32Operands()) {
+ inMemoryValue = testValue.value;
+ CHECK(isIdentical(invoke<double>(*code, &inMemoryValue), static_cast<double>(testValue.value)));
+ CHECK(inMemoryValue == testValue.value);
+ }
+}
+
+void testIToF32Mem()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* loadedSrc = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), address);
+ Value* srcAsFloat = root->appendNew<Value>(proc, IToF, Origin(), loadedSrc);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+ auto code = compile(proc);
+ int32_t inMemoryValue;
+ for (auto testValue : int32Operands()) {
+ inMemoryValue = testValue.value;
+ CHECK(isIdentical(invoke<float>(*code, &inMemoryValue), static_cast<float>(testValue.value)));
+ CHECK(inMemoryValue == testValue.value);
+ }
+}
+
+void testIToD64Imm(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<Const64Value>(proc, Origin(), value);
+ Value* srcAsFloatingPoint = root->appendNew<Value>(proc, IToD, Origin(), src);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+ CHECK(isIdentical(compileAndRun<double>(proc), static_cast<double>(value)));
+}
+
+void testIToF64Imm(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<Const64Value>(proc, Origin(), value);
+ Value* srcAsFloatingPoint = root->appendNew<Value>(proc, IToF, Origin(), src);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+ CHECK(isIdentical(compileAndRun<float>(proc), static_cast<float>(value)));
+}
+
+void testIToD32Imm(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<Const32Value>(proc, Origin(), value);
+ Value* srcAsFloatingPoint = root->appendNew<Value>(proc, IToD, Origin(), src);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+ CHECK(isIdentical(compileAndRun<double>(proc), static_cast<double>(value)));
+}
+
+void testIToF32Imm(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<Const32Value>(proc, Origin(), value);
+ Value* srcAsFloatingPoint = root->appendNew<Value>(proc, IToF, Origin(), src);
+ root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+ CHECK(isIdentical(compileAndRun<float>(proc), static_cast<float>(value)));
+}
+
+void testIToDReducedToIToF64Arg()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* srcAsDouble = root->appendNew<Value>(proc, IToD, Origin(), src);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), srcAsDouble);
+ root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+ auto code = compile(proc);
+ for (auto testValue : int64Operands())
+ CHECK(isIdentical(invoke<float>(*code, testValue.value), static_cast<float>(testValue.value)));
+}
+
+void testIToDReducedToIToF32Arg()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* src = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* srcAsDouble = root->appendNew<Value>(proc, IToD, Origin(), src);
+ Value* floatResult = root->appendNew<Value>(proc, DoubleToFloat, Origin(), srcAsDouble);
+ root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+ auto code = compile(proc);
+ for (auto testValue : int32Operands())
+ CHECK(isIdentical(invoke<float>(*code, testValue.value), static_cast<float>(testValue.value)));
+}
+
+void testStore32(int value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int slot = 0xbaadbeef;
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &slot));
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, value));
+ CHECK(slot == value);
+}
+
+void testStoreConstant(int value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int slot = 0xbaadbeef;
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), value),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &slot));
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == value);
+}
+
+void testStoreConstantPtr(intptr_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ intptr_t slot;
+ if (is64Bit())
+ slot = (static_cast<intptr_t>(0xbaadbeef) << 32) + static_cast<intptr_t>(0xbaadbeef);
+ else
+ slot = 0xbaadbeef;
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), value),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &slot));
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == value);
+}
+
+void testStore8Arg()
+{
+ { // Direct addressing.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+
+ root->appendNew<MemoryValue>(proc, Store8, Origin(), value, address);
+ root->appendNewControlValue(proc, Return, Origin(), value);
+
+ int8_t storage = 0;
+ CHECK(compileAndRun<int64_t>(proc, 42, &storage) == 42);
+ CHECK(storage == 42);
+ }
+
+ { // Indexed addressing.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* offset = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ Value* displacement = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+ Value* baseDisplacement = root->appendNew<Value>(proc, Add, Origin(), displacement, base);
+ Value* address = root->appendNew<Value>(proc, Add, Origin(), baseDisplacement, offset);
+
+ root->appendNew<MemoryValue>(proc, Store8, Origin(), value, address);
+ root->appendNewControlValue(proc, Return, Origin(), value);
+
+ int8_t storage = 0;
+ CHECK(compileAndRun<int64_t>(proc, 42, &storage, 1) == 42);
+ CHECK(storage == 42);
+ }
+}
+
+void testStore8Imm()
+{
+ { // Direct addressing.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<Const32Value>(proc, Origin(), 42);
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+
+ root->appendNew<MemoryValue>(proc, Store8, Origin(), value, address);
+ root->appendNewControlValue(proc, Return, Origin(), value);
+
+ int8_t storage = 0;
+ CHECK(compileAndRun<int64_t>(proc, &storage) == 42);
+ CHECK(storage == 42);
+ }
+
+ { // Indexed addressing.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<Const32Value>(proc, Origin(), 42);
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* offset = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* displacement = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+ Value* baseDisplacement = root->appendNew<Value>(proc, Add, Origin(), displacement, base);
+ Value* address = root->appendNew<Value>(proc, Add, Origin(), baseDisplacement, offset);
+
+ root->appendNew<MemoryValue>(proc, Store8, Origin(), value, address);
+ root->appendNewControlValue(proc, Return, Origin(), value);
+
+ int8_t storage = 0;
+ CHECK(compileAndRun<int64_t>(proc, &storage, 1) == 42);
+ CHECK(storage == 42);
+ }
+}
+
+void testStorePartial8BitRegisterOnX86()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ // We want to have this in ECX.
+ Value* returnValue = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+
+ // We want this suck in EDX.
+ Value* whereToStore = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+
+ // The patch point is there to help us force the hand of the compiler.
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+
+ // For the value above to be materialized and give the allocator
+ // a stronger insentive to name those register the way we need.
+ patchpoint->append(ConstrainedValue(returnValue, ValueRep(GPRInfo::regT3)));
+ patchpoint->append(ConstrainedValue(whereToStore, ValueRep(GPRInfo::regT2)));
+
+ // We'll produce EDI.
+ patchpoint->resultConstraint = ValueRep::reg(GPRInfo::regT6);
+
+ // Give the allocator a good reason not to use any other register.
+ RegisterSet clobberSet = RegisterSet::allGPRs();
+ clobberSet.exclude(RegisterSet::stackRegisters());
+ clobberSet.exclude(RegisterSet::reservedHardwareRegisters());
+ clobberSet.clear(GPRInfo::regT3);
+ clobberSet.clear(GPRInfo::regT2);
+ clobberSet.clear(GPRInfo::regT6);
+ patchpoint->clobberLate(clobberSet);
+
+ // Set EDI.
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ jit.xor64(params[0].gpr(), params[0].gpr());
+ });
+
+ // If everything went well, we should have the big number in eax,
+ // patchpoint == EDI and whereToStore = EDX.
+ // Since EDI == 5, and AH = 5 on 8 bit store, this would go wrong
+ // if we use X86 partial registers.
+ root->appendNew<MemoryValue>(proc, Store8, Origin(), patchpoint, whereToStore);
+
+ root->appendNewControlValue(proc, Return, Origin(), returnValue);
+
+ int8_t storage = 0xff;
+ CHECK(compileAndRun<int64_t>(proc, 0x12345678abcdef12, &storage) == 0x12345678abcdef12);
+ CHECK(!storage);
+}
+
+void testStore16Arg()
+{
+ { // Direct addressing.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+
+ root->appendNew<MemoryValue>(proc, Store16, Origin(), value, address);
+ root->appendNewControlValue(proc, Return, Origin(), value);
+
+ int16_t storage = -1;
+ CHECK(compileAndRun<int64_t>(proc, 42, &storage) == 42);
+ CHECK(storage == 42);
+ }
+
+ { // Indexed addressing.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* offset = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ Value* displacement = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+ Value* baseDisplacement = root->appendNew<Value>(proc, Add, Origin(), displacement, base);
+ Value* address = root->appendNew<Value>(proc, Add, Origin(), baseDisplacement, offset);
+
+ root->appendNew<MemoryValue>(proc, Store16, Origin(), value, address);
+ root->appendNewControlValue(proc, Return, Origin(), value);
+
+ int16_t storage = -1;
+ CHECK(compileAndRun<int64_t>(proc, 42, &storage, 1) == 42);
+ CHECK(storage == 42);
+ }
+}
+
+void testStore16Imm()
+{
+ { // Direct addressing.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<Const32Value>(proc, Origin(), 42);
+ Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+
+ root->appendNew<MemoryValue>(proc, Store16, Origin(), value, address);
+ root->appendNewControlValue(proc, Return, Origin(), value);
+
+ int16_t storage = -1;
+ CHECK(compileAndRun<int64_t>(proc, &storage) == 42);
+ CHECK(storage == 42);
+ }
+
+ { // Indexed addressing.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<Const32Value>(proc, Origin(), 42);
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* offset = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* displacement = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+ Value* baseDisplacement = root->appendNew<Value>(proc, Add, Origin(), displacement, base);
+ Value* address = root->appendNew<Value>(proc, Add, Origin(), baseDisplacement, offset);
+
+ root->appendNew<MemoryValue>(proc, Store16, Origin(), value, address);
+ root->appendNewControlValue(proc, Return, Origin(), value);
+
+ int16_t storage = -1;
+ CHECK(compileAndRun<int64_t>(proc, &storage, 1) == 42);
+ CHECK(storage == 42);
+ }
+}
+
+void testTrunc(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+
+ CHECK(compileAndRun<int>(proc, value) == static_cast<int>(value));
+}
+
+void testAdd1(int value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), 1)));
+
+ CHECK(compileAndRun<int>(proc, value) == value + 1);
+}
+
+void testAdd1Ptr(intptr_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 1)));
+
+ CHECK(compileAndRun<intptr_t>(proc, value) == value + 1);
+}
+
+void testNeg32(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == -value);
+}
+
+void testNegPtr(intptr_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+
+ CHECK(compileAndRun<intptr_t>(proc, value) == -value);
+}
+
+void testStoreAddLoad32(int amount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int slot = 37;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), slotPtr),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, amount));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm32(int amount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int slot = 37;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), slotPtr),
+ root->appendNew<Const32Value>(proc, Origin(), amount)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad8(int amount, B3::Opcode loadOpcode)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int8_t slot = 37;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ root->appendNew<MemoryValue>(
+ proc, Store8, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, loadOpcode, Origin(), slotPtr),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, amount));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm8(int amount, B3::Opcode loadOpcode)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int8_t slot = 37;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ root->appendNew<MemoryValue>(
+ proc, Store8, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, loadOpcode, Origin(), slotPtr),
+ root->appendNew<Const32Value>(proc, Origin(), amount)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad16(int amount, B3::Opcode loadOpcode)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int16_t slot = 37;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ root->appendNew<MemoryValue>(
+ proc, Store16, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, loadOpcode, Origin(), slotPtr),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, amount));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm16(int amount, B3::Opcode loadOpcode)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int16_t slot = 37;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ root->appendNew<MemoryValue>(
+ proc, Store16, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, loadOpcode, Origin(), slotPtr),
+ root->appendNew<Const32Value>(proc, Origin(), amount)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad64(int amount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int64_t slot = 37000000000ll;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), slotPtr),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, amount));
+ CHECK(slot == 37000000000ll + amount);
+}
+
+void testStoreAddLoadImm64(int64_t amount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int64_t slot = 370000000000ll;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), slotPtr),
+ root->appendNew<Const64Value>(proc, Origin(), amount)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == 370000000000ll + amount);
+}
+
+void testStoreAddLoad32Index(int amount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int slot = 37;
+ int* ptr = &slot;
+ intptr_t zero = 0;
+ Value* slotPtr = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &ptr)),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &zero)));
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), slotPtr),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, amount));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm32Index(int amount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int slot = 37;
+ int* ptr = &slot;
+ intptr_t zero = 0;
+ Value* slotPtr = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &ptr)),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &zero)));
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), slotPtr),
+ root->appendNew<Const32Value>(proc, Origin(), amount)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad8Index(int amount, B3::Opcode loadOpcode)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int8_t slot = 37;
+ int8_t* ptr = &slot;
+ intptr_t zero = 0;
+ Value* slotPtr = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &ptr)),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &zero)));
+ root->appendNew<MemoryValue>(
+ proc, Store8, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, loadOpcode, Origin(), slotPtr),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, amount));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm8Index(int amount, B3::Opcode loadOpcode)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int8_t slot = 37;
+ int8_t* ptr = &slot;
+ intptr_t zero = 0;
+ Value* slotPtr = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &ptr)),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &zero)));
+ root->appendNew<MemoryValue>(
+ proc, Store8, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, loadOpcode, Origin(), slotPtr),
+ root->appendNew<Const32Value>(proc, Origin(), amount)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad16Index(int amount, B3::Opcode loadOpcode)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int16_t slot = 37;
+ int16_t* ptr = &slot;
+ intptr_t zero = 0;
+ Value* slotPtr = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &ptr)),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &zero)));
+ root->appendNew<MemoryValue>(
+ proc, Store16, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, loadOpcode, Origin(), slotPtr),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, amount));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm16Index(int amount, B3::Opcode loadOpcode)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int16_t slot = 37;
+ int16_t* ptr = &slot;
+ intptr_t zero = 0;
+ Value* slotPtr = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &ptr)),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &zero)));
+ root->appendNew<MemoryValue>(
+ proc, Store16, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, loadOpcode, Origin(), slotPtr),
+ root->appendNew<Const32Value>(proc, Origin(), amount)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad64Index(int amount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int64_t slot = 37000000000ll;
+ int64_t* ptr = &slot;
+ intptr_t zero = 0;
+ Value* slotPtr = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &ptr)),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &zero)));
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), slotPtr),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, amount));
+ CHECK(slot == 37000000000ll + amount);
+}
+
+void testStoreAddLoadImm64Index(int64_t amount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int64_t slot = 370000000000ll;
+ int64_t* ptr = &slot;
+ intptr_t zero = 0;
+ Value* slotPtr = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &ptr)),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &zero)));
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int64, Origin(), slotPtr),
+ root->appendNew<Const64Value>(proc, Origin(), amount)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == 370000000000ll + amount);
+}
+
+void testStoreSubLoad(int amount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int32_t startValue = std::numeric_limits<int32_t>::min();
+ int32_t slot = startValue;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), slotPtr),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, amount));
+ CHECK(slot == startValue - amount);
+}
+
+void testStoreAddLoadInterference(int amount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int slot = 37;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ ArgumentRegValue* otherSlotPtr =
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ MemoryValue* load = root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), slotPtr);
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 666),
+ otherSlotPtr);
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ load, root->appendNew<Const32Value>(proc, Origin(), amount)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc, &slot));
+ CHECK(slot == 37 + amount);
+}
+
+void testStoreAddAndLoad(int amount, int mask)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int slot = 37;
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), slotPtr),
+ root->appendNew<Const32Value>(proc, Origin(), amount)),
+ root->appendNew<Const32Value>(proc, Origin(), mask)),
+ slotPtr);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+ CHECK(slot == ((37 + amount) & mask));
+}
+
+void testStoreNegLoad32(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ int32_t slot = value;
+
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), slotPtr)),
+ slotPtr);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int32_t>(proc));
+ CHECK(slot == -value);
+}
+
+void testStoreNegLoadPtr(intptr_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ intptr_t slot = value;
+
+ ConstPtrValue* slotPtr = root->appendNew<ConstPtrValue>(proc, Origin(), &slot);
+
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Sub, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0),
+ root->appendNew<MemoryValue>(proc, Load, pointerType(), Origin(), slotPtr)),
+ slotPtr);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int32_t>(proc));
+ CHECK(slot == -value);
+}
+
+void testAdd1Uncommuted(int value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 1),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(compileAndRun<int>(proc, value) == value + 1);
+}
+
+void testLoadOffset()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int array[] = { 1, 2 };
+ ConstPtrValue* arrayPtr = root->appendNew<ConstPtrValue>(proc, Origin(), array);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), arrayPtr, 0),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), arrayPtr, sizeof(int))));
+
+ CHECK(compileAndRun<int>(proc) == array[0] + array[1]);
+}
+
+void testLoadOffsetNotConstant()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int array[] = { 1, 2 };
+ Value* arrayPtr = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), arrayPtr, 0),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), arrayPtr, sizeof(int))));
+
+ CHECK(compileAndRun<int>(proc, &array[0]) == array[0] + array[1]);
+}
+
+void testLoadOffsetUsingAdd()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int array[] = { 1, 2 };
+ ConstPtrValue* arrayPtr = root->appendNew<ConstPtrValue>(proc, Origin(), array);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(), arrayPtr,
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0))),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(), arrayPtr,
+ root->appendNew<ConstPtrValue>(proc, Origin(), sizeof(int))))));
+
+ CHECK(compileAndRun<int>(proc) == array[0] + array[1]);
+}
+
+void testLoadOffsetUsingAddInterference()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int array[] = { 1, 2 };
+ ConstPtrValue* arrayPtr = root->appendNew<ConstPtrValue>(proc, Origin(), array);
+ ArgumentRegValue* otherArrayPtr =
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Const32Value* theNumberOfTheBeast = root->appendNew<Const32Value>(proc, Origin(), 666);
+ MemoryValue* left = root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(), arrayPtr,
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0)));
+ MemoryValue* right = root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(), arrayPtr,
+ root->appendNew<ConstPtrValue>(proc, Origin(), sizeof(int))));
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(), theNumberOfTheBeast, otherArrayPtr, 0);
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(), theNumberOfTheBeast, otherArrayPtr, sizeof(int));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(), left, right));
+
+ CHECK(compileAndRun<int>(proc, &array[0]) == 1 + 2);
+ CHECK(array[0] == 666);
+ CHECK(array[1] == 666);
+}
+
+void testLoadOffsetUsingAddNotConstant()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int array[] = { 1, 2 };
+ Value* arrayPtr = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(), arrayPtr,
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0))),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(), arrayPtr,
+ root->appendNew<ConstPtrValue>(proc, Origin(), sizeof(int))))));
+
+ CHECK(compileAndRun<int>(proc, &array[0]) == array[0] + array[1]);
+}
+
+void testLoadAddrShift(unsigned shift)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int slots[2];
+
+ // Figure out which slot to use while having proper alignment for the shift.
+ int* slot;
+ uintptr_t arg;
+ for (unsigned i = sizeof(slots)/sizeof(slots[0]); i--;) {
+ slot = slots + i;
+ arg = bitwise_cast<uintptr_t>(slot) >> shift;
+ if (bitwise_cast<int*>(arg << shift) == slot)
+ break;
+ }
+
+ *slot = 8675309;
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const32Value>(proc, Origin(), shift))));
+
+ CHECK(compileAndRun<int>(proc, arg) == 8675309);
+}
+
+void testFramePointer()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, FramePointer, Origin()));
+
+ void* fp = compileAndRun<void*>(proc);
+ CHECK(fp < &proc);
+ CHECK(fp >= bitwise_cast<char*>(&proc) - 10000);
+}
+
+void testOverrideFramePointer()
+{
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ // Add a stack slot to make the frame non trivial.
+ root->appendNew<SlotBaseValue>(proc, Origin(), proc.addStackSlot(8));
+
+ // Sub on x86 UseDef the source. If FP is not protected correctly, it will be overridden since it is the last visible use.
+ Value* offset = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* fp = root->appendNew<Value>(proc, FramePointer, Origin());
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), fp, offset);
+
+ root->appendNewControlValue(proc, Return, Origin(), result);
+ CHECK(compileAndRun<int64_t>(proc, 1));
+ }
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNew<SlotBaseValue>(proc, Origin(), proc.addStackSlot(8));
+
+ Value* offset = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* fp = root->appendNew<Value>(proc, FramePointer, Origin());
+ Value* offsetFP = root->appendNew<Value>(proc, BitAnd, Origin(), offset, fp);
+ Value* arg = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* offsetArg = root->appendNew<Value>(proc, Add, Origin(), offset, arg);
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), offsetArg, offsetFP);
+
+ root->appendNewControlValue(proc, Return, Origin(), result);
+ CHECK(compileAndRun<int64_t>(proc, 1, 2));
+ }
+}
+
+void testStackSlot()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<SlotBaseValue>(proc, Origin(), proc.addStackSlot(1)));
+
+ void* stackSlot = compileAndRun<void*>(proc);
+ CHECK(stackSlot < &proc);
+ CHECK(stackSlot >= bitwise_cast<char*>(&proc) - 10000);
+}
+
+void testLoadFromFramePointer()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<Value>(proc, FramePointer, Origin())));
+
+ void* fp = compileAndRun<void*>(proc);
+ void* myFP = __builtin_frame_address(0);
+ CHECK(fp <= myFP);
+ CHECK(fp >= bitwise_cast<char*>(myFP) - 10000);
+}
+
+void testStoreLoadStackSlot(int value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ SlotBaseValue* stack =
+ root->appendNew<SlotBaseValue>(proc, Origin(), proc.addStackSlot(sizeof(int)));
+
+ root->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ stack);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(proc, Load, Int32, Origin(), stack));
+
+ CHECK(compileAndRun<int>(proc, value) == value);
+}
+
+template<typename LoadedType, typename EffectiveType>
+EffectiveType modelLoad(EffectiveType value)
+{
+ union {
+ EffectiveType original;
+ LoadedType loaded;
+ } u;
+
+ u.original = value;
+ if (std::is_signed<LoadedType>::value)
+ return static_cast<EffectiveType>(u.loaded);
+ return static_cast<EffectiveType>(static_cast<typename std::make_unsigned<EffectiveType>::type>(u.loaded));
+}
+
+template<>
+float modelLoad<float, float>(float value) { return value; }
+
+template<>
+double modelLoad<double, double>(double value) { return value; }
+
+template<B3::Type type, typename CType, typename InputType>
+void testLoad(B3::Opcode opcode, InputType value)
+{
+ // Simple load from an absolute address.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, opcode, type, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &value)));
+
+ CHECK(isIdentical(compileAndRun<CType>(proc), modelLoad<CType>(value)));
+ }
+
+ // Simple load from an address in a register.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, opcode, type, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+
+ CHECK(isIdentical(compileAndRun<CType>(proc, &value), modelLoad<CType>(value)));
+ }
+
+ // Simple load from an address in a register, at an offset.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, opcode, type, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ sizeof(InputType)));
+
+ CHECK(isIdentical(compileAndRun<CType>(proc, &value - 1), modelLoad<CType>(value)));
+ }
+
+ // Load from a simple base-index with various scales.
+ for (unsigned logScale = 0; logScale <= 3; ++logScale) {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, opcode, type, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), logScale)))));
+
+ CHECK(isIdentical(compileAndRun<CType>(proc, &value - 2, (sizeof(InputType) * 2) >> logScale), modelLoad<CType>(value)));
+ }
+
+ // Load from a simple base-index with various scales, but commuted.
+ for (unsigned logScale = 0; logScale <= 3; ++logScale) {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, opcode, type, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), logScale)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(isIdentical(compileAndRun<CType>(proc, &value - 2, (sizeof(InputType) * 2) >> logScale), modelLoad<CType>(value)));
+ }
+}
+
+template<typename T>
+void testLoad(B3::Opcode opcode, int32_t value)
+{
+ return testLoad<Int32, T>(opcode, value);
+}
+
+template<B3::Type type, typename T>
+void testLoad(T value)
+{
+ return testLoad<type, T>(Load, value);
+}
+
+void testStoreFloat(double input)
+{
+ // Simple store from an address in a register.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* argumentAsFloat = root->appendNew<Value>(proc, DoubleToFloat, Origin(), argument);
+
+ Value* destinationAddress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNew<MemoryValue>(proc, Store, Origin(), argumentAsFloat, destinationAddress);
+
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ float output = 0.;
+ CHECK(!compileAndRun<int64_t>(proc, input, &output));
+ CHECK(isIdentical(static_cast<float>(input), output));
+ }
+
+ // Simple indexed store.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* argumentAsFloat = root->appendNew<Value>(proc, DoubleToFloat, Origin(), argument);
+
+ Value* destinationBaseAddress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* index = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* scaledIndex = root->appendNew<Value>(
+ proc, Shl, Origin(),
+ index,
+ root->appendNew<Const32Value>(proc, Origin(), 2));
+ Value* destinationAddress = root->appendNew<Value>(proc, Add, Origin(), scaledIndex, destinationBaseAddress);
+
+ root->appendNew<MemoryValue>(proc, Store, Origin(), argumentAsFloat, destinationAddress);
+
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ float output = 0.;
+ CHECK(!compileAndRun<int64_t>(proc, input, &output - 1, 1));
+ CHECK(isIdentical(static_cast<float>(input), output));
+ }
+}
+
+void testStoreDoubleConstantAsFloat(double input)
+{
+ // Simple store from an address in a register.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* value = root->appendNew<ConstDoubleValue>(proc, Origin(), input);
+ Value* valueAsFloat = root->appendNew<Value>(proc, DoubleToFloat, Origin(), value);
+
+ Value* destinationAddress = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+
+ root->appendNew<MemoryValue>(proc, Store, Origin(), valueAsFloat, destinationAddress);
+
+ root->appendNewControlValue(proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ float output = 0.;
+ CHECK(!compileAndRun<int64_t>(proc, input, &output));
+ CHECK(isIdentical(static_cast<float>(input), output));
+}
+
+void testSpillGP()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Vector<Value*> sources;
+ sources.append(root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ sources.append(root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+
+ for (unsigned i = 0; i < 30; ++i) {
+ sources.append(
+ root->appendNew<Value>(proc, Add, Origin(), sources[sources.size() - 1], sources[sources.size() - 2])
+ );
+ }
+
+ Value* total = root->appendNew<Const64Value>(proc, Origin(), 0);
+ for (Value* value : sources)
+ total = root->appendNew<Value>(proc, Add, Origin(), total, value);
+
+ root->appendNewControlValue(proc, Return, Origin(), total);
+ compileAndRun<int>(proc, 1, 2);
+}
+
+void testSpillFP()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Vector<Value*> sources;
+ sources.append(root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0));
+ sources.append(root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1));
+
+ for (unsigned i = 0; i < 30; ++i) {
+ sources.append(
+ root->appendNew<Value>(proc, Add, Origin(), sources[sources.size() - 1], sources[sources.size() - 2])
+ );
+ }
+
+ Value* total = root->appendNew<ConstDoubleValue>(proc, Origin(), 0.);
+ for (Value* value : sources)
+ total = root->appendNew<Value>(proc, Add, Origin(), total, value);
+
+ root->appendNewControlValue(proc, Return, Origin(), total);
+ compileAndRun<double>(proc, 1.1, 2.5);
+}
+
+void testInt32ToDoublePartialRegisterStall()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* loop = proc.addBlock();
+ BasicBlock* done = proc.addBlock();
+
+ // Head.
+ Value* total = root->appendNew<ConstDoubleValue>(proc, Origin(), 0.);
+ Value* counter = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ UpsilonValue* originalTotal = root->appendNew<UpsilonValue>(proc, Origin(), total);
+ UpsilonValue* originalCounter = root->appendNew<UpsilonValue>(proc, Origin(), counter);
+ root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+
+ // Loop.
+ Value* loopCounter = loop->appendNew<Value>(proc, Phi, Int64, Origin());
+ Value* loopTotal = loop->appendNew<Value>(proc, Phi, Double, Origin());
+ originalCounter->setPhi(loopCounter);
+ originalTotal->setPhi(loopTotal);
+
+ Value* truncatedCounter = loop->appendNew<Value>(proc, Trunc, Origin(), loopCounter);
+ Value* doubleCounter = loop->appendNew<Value>(proc, IToD, Origin(), truncatedCounter);
+ Value* updatedTotal = loop->appendNew<Value>(proc, Add, Origin(), doubleCounter, loopTotal);
+ UpsilonValue* updatedTotalUpsilon = loop->appendNew<UpsilonValue>(proc, Origin(), updatedTotal);
+ updatedTotalUpsilon->setPhi(loopTotal);
+
+ Value* decCounter = loop->appendNew<Value>(proc, Sub, Origin(), loopCounter, loop->appendNew<Const64Value>(proc, Origin(), 1));
+ UpsilonValue* decCounterUpsilon = loop->appendNew<UpsilonValue>(proc, Origin(), decCounter);
+ decCounterUpsilon->setPhi(loopCounter);
+ loop->appendNewControlValue(
+ proc, Branch, Origin(),
+ decCounter,
+ FrequentedBlock(loop), FrequentedBlock(done));
+
+ // Tail.
+ done->appendNewControlValue(proc, Return, Origin(), updatedTotal);
+ CHECK(isIdentical(compileAndRun<double>(proc, 100000), 5000050000.));
+}
+
+void testInt32ToDoublePartialRegisterWithoutStall()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* loop = proc.addBlock();
+ BasicBlock* done = proc.addBlock();
+
+ // Head.
+ Value* total = root->appendNew<ConstDoubleValue>(proc, Origin(), 0.);
+ Value* counter = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ UpsilonValue* originalTotal = root->appendNew<UpsilonValue>(proc, Origin(), total);
+ UpsilonValue* originalCounter = root->appendNew<UpsilonValue>(proc, Origin(), counter);
+ uint64_t forPaddingInput;
+ Value* forPaddingInputAddress = root->appendNew<ConstPtrValue>(proc, Origin(), &forPaddingInput);
+ uint64_t forPaddingOutput;
+ Value* forPaddingOutputAddress = root->appendNew<ConstPtrValue>(proc, Origin(), &forPaddingOutput);
+ root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+
+ // Loop.
+ Value* loopCounter = loop->appendNew<Value>(proc, Phi, Int64, Origin());
+ Value* loopTotal = loop->appendNew<Value>(proc, Phi, Double, Origin());
+ originalCounter->setPhi(loopCounter);
+ originalTotal->setPhi(loopTotal);
+
+ Value* truncatedCounter = loop->appendNew<Value>(proc, Trunc, Origin(), loopCounter);
+ Value* doubleCounter = loop->appendNew<Value>(proc, IToD, Origin(), truncatedCounter);
+ Value* updatedTotal = loop->appendNew<Value>(proc, Add, Origin(), doubleCounter, loopTotal);
+
+ // Add enough padding instructions to avoid a stall.
+ Value* loadPadding = loop->appendNew<MemoryValue>(proc, Load, Int64, Origin(), forPaddingInputAddress);
+ Value* padding = loop->appendNew<Value>(proc, BitXor, Origin(), loadPadding, loopCounter);
+ padding = loop->appendNew<Value>(proc, Add, Origin(), padding, loopCounter);
+ padding = loop->appendNew<Value>(proc, BitOr, Origin(), padding, loopCounter);
+ padding = loop->appendNew<Value>(proc, Sub, Origin(), padding, loopCounter);
+ padding = loop->appendNew<Value>(proc, BitXor, Origin(), padding, loopCounter);
+ padding = loop->appendNew<Value>(proc, Add, Origin(), padding, loopCounter);
+ padding = loop->appendNew<Value>(proc, BitOr, Origin(), padding, loopCounter);
+ padding = loop->appendNew<Value>(proc, Sub, Origin(), padding, loopCounter);
+ padding = loop->appendNew<Value>(proc, BitXor, Origin(), padding, loopCounter);
+ padding = loop->appendNew<Value>(proc, Add, Origin(), padding, loopCounter);
+ padding = loop->appendNew<Value>(proc, BitOr, Origin(), padding, loopCounter);
+ padding = loop->appendNew<Value>(proc, Sub, Origin(), padding, loopCounter);
+ loop->appendNew<MemoryValue>(proc, Store, Origin(), padding, forPaddingOutputAddress);
+
+ UpsilonValue* updatedTotalUpsilon = loop->appendNew<UpsilonValue>(proc, Origin(), updatedTotal);
+ updatedTotalUpsilon->setPhi(loopTotal);
+
+ Value* decCounter = loop->appendNew<Value>(proc, Sub, Origin(), loopCounter, loop->appendNew<Const64Value>(proc, Origin(), 1));
+ UpsilonValue* decCounterUpsilon = loop->appendNew<UpsilonValue>(proc, Origin(), decCounter);
+ decCounterUpsilon->setPhi(loopCounter);
+ loop->appendNewControlValue(
+ proc, Branch, Origin(),
+ decCounter,
+ FrequentedBlock(loop), FrequentedBlock(done));
+
+ // Tail.
+ done->appendNewControlValue(proc, Return, Origin(), updatedTotal);
+ CHECK(isIdentical(compileAndRun<double>(proc, 100000), 5000050000.));
+}
+
+void testBranch()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ CHECK(invoke<int>(*code, 42) == 1);
+ CHECK(invoke<int>(*code, 0) == 0);
+}
+
+void testBranchPtr()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ CHECK(invoke<int>(*code, static_cast<intptr_t>(42)) == 1);
+ CHECK(invoke<int>(*code, static_cast<intptr_t>(0)) == 0);
+}
+
+void testDiamond()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+ BasicBlock* done = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ UpsilonValue* thenResult = thenCase->appendNew<UpsilonValue>(
+ proc, Origin(), thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+ thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+ UpsilonValue* elseResult = elseCase->appendNew<UpsilonValue>(
+ proc, Origin(), elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+ elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+ Value* phi = done->appendNew<Value>(proc, Phi, Int32, Origin());
+ thenResult->setPhi(phi);
+ elseResult->setPhi(phi);
+ done->appendNewControlValue(proc, Return, Origin(), phi);
+
+ auto code = compile(proc);
+ CHECK(invoke<int>(*code, 42) == 1);
+ CHECK(invoke<int>(*code, 0) == 0);
+}
+
+void testBranchNotEqual()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, NotEqual, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), 0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ CHECK(invoke<int>(*code, 42) == 1);
+ CHECK(invoke<int>(*code, 0) == 0);
+}
+
+void testBranchNotEqualCommute()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, NotEqual, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ CHECK(invoke<int>(*code, 42) == 1);
+ CHECK(invoke<int>(*code, 0) == 0);
+}
+
+void testBranchNotEqualNotEqual()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, NotEqual, Origin(),
+ root->appendNew<Value>(
+ proc, NotEqual, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), 0)),
+ root->appendNew<Const32Value>(proc, Origin(), 0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ CHECK(invoke<int>(*code, 42) == 1);
+ CHECK(invoke<int>(*code, 0) == 0);
+}
+
+void testBranchEqual()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), 0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ auto code = compile(proc);
+ CHECK(invoke<int>(*code, 42) == 1);
+ CHECK(invoke<int>(*code, 0) == 0);
+}
+
+void testBranchEqualEqual()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), 0)),
+ root->appendNew<Const32Value>(proc, Origin(), 0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ CHECK(invoke<int>(*code, 42) == 1);
+ CHECK(invoke<int>(*code, 0) == 0);
+}
+
+void testBranchEqualCommute()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ auto code = compile(proc);
+ CHECK(invoke<int>(*code, 42) == 1);
+ CHECK(invoke<int>(*code, 0) == 0);
+}
+
+void testBranchEqualEqual1()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), 0)),
+ root->appendNew<Const32Value>(proc, Origin(), 1)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ auto code = compile(proc);
+ CHECK(invoke<int>(*code, 42) == 1);
+ CHECK(invoke<int>(*code, 0) == 0);
+}
+
+void testBranchEqualOrUnorderedArgs(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* argumentA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* argumentB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, EqualOrUnordered, Origin(),
+ argumentA,
+ argumentB),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 42));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), -13));
+
+ int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+ CHECK(compileAndRun<int64_t>(proc, a, b) == expected);
+}
+
+void testBranchEqualOrUnorderedArgs(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* argumentA = root->appendNew<MemoryValue>(proc, Load, Float, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argumentB = root->appendNew<MemoryValue>(proc, Load, Float, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, EqualOrUnordered, Origin(),
+ argumentA,
+ argumentB),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 42));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), -13));
+
+ int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+ CHECK(compileAndRun<int64_t>(proc, &a, &b) == expected);
+}
+
+void testBranchNotEqualAndOrderedArgs(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* argumentA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* argumentB = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* equalOrUnordered = root->appendNew<Value>(
+ proc, EqualOrUnordered, Origin(),
+ argumentA,
+ argumentB);
+ Value* notEqualAndOrdered = root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0),
+ equalOrUnordered);
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ notEqualAndOrdered,
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 42));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), -13));
+
+ int64_t expected = (!std::isunordered(a, b) && a != b) ? 42 : -13;
+ CHECK(compileAndRun<int64_t>(proc, a, b) == expected);
+}
+
+void testBranchNotEqualAndOrderedArgs(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* argumentA = root->appendNew<MemoryValue>(proc, Load, Float, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argumentB = root->appendNew<MemoryValue>(proc, Load, Float, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* equalOrUnordered = root->appendNew<Value>(
+ proc, EqualOrUnordered, Origin(),
+ argumentA,
+ argumentB);
+ Value* notEqualAndOrdered = root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0),
+ equalOrUnordered);
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ notEqualAndOrdered,
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 42));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), -13));
+
+ int64_t expected = (!std::isunordered(a, b) && a != b) ? 42 : -13;
+ CHECK(compileAndRun<int64_t>(proc, &a, &b) == expected);
+}
+
+void testBranchEqualOrUnorderedDoubleArgImm(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* argumentA = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* argumentB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, EqualOrUnordered, Origin(),
+ argumentA,
+ argumentB),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 42));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), -13));
+
+ int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+ CHECK(compileAndRun<int64_t>(proc, a) == expected);
+}
+
+void testBranchEqualOrUnorderedFloatArgImm(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* argumentA = root->appendNew<MemoryValue>(proc, Load, Float, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argumentB = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, EqualOrUnordered, Origin(),
+ argumentA,
+ argumentB),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 42));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), -13));
+
+ int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+ CHECK(compileAndRun<int64_t>(proc, &a) == expected);
+}
+
+void testBranchEqualOrUnorderedDoubleImms(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* argumentA = root->appendNew<ConstDoubleValue>(proc, Origin(), a);
+ Value* argumentB = root->appendNew<ConstDoubleValue>(proc, Origin(), b);
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, EqualOrUnordered, Origin(),
+ argumentA,
+ argumentB),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 42));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), -13));
+
+ int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+ CHECK(compileAndRun<int64_t>(proc) == expected);
+}
+
+void testBranchEqualOrUnorderedFloatImms(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* argumentA = root->appendNew<ConstFloatValue>(proc, Origin(), a);
+ Value* argumentB = root->appendNew<ConstFloatValue>(proc, Origin(), b);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, EqualOrUnordered, Origin(),
+ argumentA,
+ argumentB),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 42));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), -13));
+
+ int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+ CHECK(compileAndRun<int64_t>(proc) == expected);
+}
+
+void testBranchEqualOrUnorderedFloatWithUselessDoubleConversion(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* argument1 = root->appendNew<MemoryValue>(proc, Load, Float, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2 = root->appendNew<MemoryValue>(proc, Load, Float, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* argument1AsDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), argument1);
+ Value* argument2AsDouble = root->appendNew<Value>(proc, FloatToDouble, Origin(), argument2);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, EqualOrUnordered, Origin(),
+ argument1AsDouble,
+ argument2AsDouble),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 42));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), -13));
+
+ int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+ CHECK(compileAndRun<int64_t>(proc, &a, &b) == expected);
+}
+
+void testBranchFold(int value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), value),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(compileAndRun<int>(proc) == !!value);
+}
+
+void testDiamondFold(int value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+ BasicBlock* done = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), value),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ UpsilonValue* thenResult = thenCase->appendNew<UpsilonValue>(
+ proc, Origin(), thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+ thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+ UpsilonValue* elseResult = elseCase->appendNew<UpsilonValue>(
+ proc, Origin(), elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+ elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+ Value* phi = done->appendNew<Value>(proc, Phi, Int32, Origin());
+ thenResult->setPhi(phi);
+ elseResult->setPhi(phi);
+ done->appendNewControlValue(proc, Return, Origin(), phi);
+
+ CHECK(compileAndRun<int>(proc) == !!value);
+}
+
+void testBranchNotEqualFoldPtr(intptr_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, NotEqual, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), value),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(compileAndRun<int>(proc) == !!value);
+}
+
+void testBranchEqualFoldPtr(intptr_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), value),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(compileAndRun<int>(proc) == !value);
+}
+
+void testBranchLoadPtr()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ intptr_t cond;
+ cond = 42;
+ CHECK(invoke<int>(*code, &cond) == 1);
+ cond = 0;
+ CHECK(invoke<int>(*code, &cond) == 0);
+}
+
+void testBranchLoad32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ int32_t cond;
+ cond = 42;
+ CHECK(invoke<int>(*code, &cond) == 1);
+ cond = 0;
+ CHECK(invoke<int>(*code, &cond) == 0);
+}
+
+void testBranchLoad8S()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load8S, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ int8_t cond;
+ cond = -1;
+ CHECK(invoke<int>(*code, &cond) == 1);
+ cond = 0;
+ CHECK(invoke<int>(*code, &cond) == 0);
+}
+
+void testBranchLoad8Z()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load8Z, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ uint8_t cond;
+ cond = 1;
+ CHECK(invoke<int>(*code, &cond) == 1);
+ cond = 0;
+ CHECK(invoke<int>(*code, &cond) == 0);
+}
+
+void testBranchLoad16S()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load16S, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ int16_t cond;
+ cond = -1;
+ CHECK(invoke<int>(*code, &cond) == 1);
+ cond = 0;
+ CHECK(invoke<int>(*code, &cond) == 0);
+}
+
+void testBranchLoad16Z()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load16Z, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ uint16_t cond;
+ cond = 1;
+ CHECK(invoke<int>(*code, &cond) == 1);
+ cond = 0;
+ CHECK(invoke<int>(*code, &cond) == 0);
+}
+
+void testBranch8WithLoad8ZIndex()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ int logScale = 1;
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, Above, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load8Z, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), logScale)))),
+ root->appendNew<Const32Value>(proc, Origin(), 250)),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<Const32Value>(proc, Origin(), 1));
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ uint32_t cond;
+ cond = 0xffffffffU; // All bytes are 0xff.
+ CHECK(invoke<int>(*code, &cond - 2, (sizeof(uint32_t) * 2) >> logScale) == 1);
+ cond = 0x00000000U; // All bytes are 0.
+ CHECK(invoke<int>(*code, &cond - 2, (sizeof(uint32_t) * 2) >> logScale) == 0);
+}
+
+void testComplex(unsigned numVars, unsigned numConstructs)
+{
+ double before = monotonicallyIncreasingTimeMS();
+
+ Procedure proc;
+ BasicBlock* current = proc.addBlock();
+
+ Const32Value* one = current->appendNew<Const32Value>(proc, Origin(), 1);
+
+ Vector<int32_t> varSlots;
+ for (unsigned i = numVars; i--;)
+ varSlots.append(i);
+
+ Vector<Value*> vars;
+ for (int32_t& varSlot : varSlots) {
+ Value* varSlotPtr = current->appendNew<ConstPtrValue>(proc, Origin(), &varSlot);
+ vars.append(current->appendNew<MemoryValue>(proc, Load, Int32, Origin(), varSlotPtr));
+ }
+
+ for (unsigned i = 0; i < numConstructs; ++i) {
+ if (i & 1) {
+ // Control flow diamond.
+ unsigned predicateVarIndex = ((i >> 1) + 2) % numVars;
+ unsigned thenIncVarIndex = ((i >> 1) + 0) % numVars;
+ unsigned elseIncVarIndex = ((i >> 1) + 1) % numVars;
+
+ BasicBlock* thenBlock = proc.addBlock();
+ BasicBlock* elseBlock = proc.addBlock();
+ BasicBlock* continuation = proc.addBlock();
+
+ current->appendNewControlValue(
+ proc, Branch, Origin(), vars[predicateVarIndex],
+ FrequentedBlock(thenBlock), FrequentedBlock(elseBlock));
+
+ UpsilonValue* thenThenResult = thenBlock->appendNew<UpsilonValue>(
+ proc, Origin(),
+ thenBlock->appendNew<Value>(proc, Add, Origin(), vars[thenIncVarIndex], one));
+ UpsilonValue* thenElseResult = thenBlock->appendNew<UpsilonValue>(
+ proc, Origin(), vars[elseIncVarIndex]);
+ thenBlock->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+ UpsilonValue* elseElseResult = elseBlock->appendNew<UpsilonValue>(
+ proc, Origin(),
+ elseBlock->appendNew<Value>(proc, Add, Origin(), vars[elseIncVarIndex], one));
+ UpsilonValue* elseThenResult = elseBlock->appendNew<UpsilonValue>(
+ proc, Origin(), vars[thenIncVarIndex]);
+ elseBlock->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+ Value* thenPhi = continuation->appendNew<Value>(proc, Phi, Int32, Origin());
+ thenThenResult->setPhi(thenPhi);
+ elseThenResult->setPhi(thenPhi);
+ vars[thenIncVarIndex] = thenPhi;
+
+ Value* elsePhi = continuation->appendNew<Value>(proc, Phi, Int32, Origin());
+ thenElseResult->setPhi(elsePhi);
+ elseElseResult->setPhi(elsePhi);
+ vars[elseIncVarIndex] = thenPhi;
+
+ current = continuation;
+ } else {
+ // Loop.
+
+ BasicBlock* loopEntry = proc.addBlock();
+ BasicBlock* loopReentry = proc.addBlock();
+ BasicBlock* loopBody = proc.addBlock();
+ BasicBlock* loopExit = proc.addBlock();
+ BasicBlock* loopSkip = proc.addBlock();
+ BasicBlock* continuation = proc.addBlock();
+
+ Value* startIndex = vars[((i >> 1) + 1) % numVars];
+ Value* startSum = current->appendNew<Const32Value>(proc, Origin(), 0);
+ current->appendNewControlValue(
+ proc, Branch, Origin(), startIndex,
+ FrequentedBlock(loopEntry), FrequentedBlock(loopSkip));
+
+ UpsilonValue* startIndexForBody = loopEntry->appendNew<UpsilonValue>(
+ proc, Origin(), startIndex);
+ UpsilonValue* startSumForBody = loopEntry->appendNew<UpsilonValue>(
+ proc, Origin(), startSum);
+ loopEntry->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loopBody));
+
+ Value* bodyIndex = loopBody->appendNew<Value>(proc, Phi, Int32, Origin());
+ startIndexForBody->setPhi(bodyIndex);
+ Value* bodySum = loopBody->appendNew<Value>(proc, Phi, Int32, Origin());
+ startSumForBody->setPhi(bodySum);
+ Value* newBodyIndex = loopBody->appendNew<Value>(proc, Sub, Origin(), bodyIndex, one);
+ Value* newBodySum = loopBody->appendNew<Value>(
+ proc, Add, Origin(),
+ bodySum,
+ loopBody->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ loopBody->appendNew<Value>(
+ proc, Add, Origin(),
+ loopBody->appendNew<ConstPtrValue>(proc, Origin(), varSlots.data()),
+ loopBody->appendNew<Value>(
+ proc, Shl, Origin(),
+ loopBody->appendNew<Value>(
+ proc, ZExt32, Origin(),
+ loopBody->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ newBodyIndex,
+ loopBody->appendNew<Const32Value>(
+ proc, Origin(), numVars - 1))),
+ loopBody->appendNew<Const32Value>(proc, Origin(), 2)))));
+ loopBody->appendNewControlValue(
+ proc, Branch, Origin(), newBodyIndex,
+ FrequentedBlock(loopReentry), FrequentedBlock(loopExit));
+
+ loopReentry->appendNew<UpsilonValue>(proc, Origin(), newBodyIndex, bodyIndex);
+ loopReentry->appendNew<UpsilonValue>(proc, Origin(), newBodySum, bodySum);
+ loopReentry->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loopBody));
+
+ UpsilonValue* exitSum = loopExit->appendNew<UpsilonValue>(proc, Origin(), newBodySum);
+ loopExit->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+ UpsilonValue* skipSum = loopSkip->appendNew<UpsilonValue>(proc, Origin(), startSum);
+ loopSkip->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+ Value* finalSum = continuation->appendNew<Value>(proc, Phi, Int32, Origin());
+ exitSum->setPhi(finalSum);
+ skipSum->setPhi(finalSum);
+
+ current = continuation;
+ vars[((i >> 1) + 0) % numVars] = finalSum;
+ }
+ }
+
+ current->appendNewControlValue(proc, Return, Origin(), vars[0]);
+
+ compile(proc);
+
+ double after = monotonicallyIncreasingTimeMS();
+ dataLog(toCString(" That took ", after - before, " ms.\n"));
+}
+
+void testSimplePatchpoint()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 3);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ CHECK(params[2].isGPR());
+ add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+}
+
+void testSimplePatchpointWithoutOuputClobbersGPArgs()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* const1 = root->appendNew<Const64Value>(proc, Origin(), 42);
+ Value* const2 = root->appendNew<Const64Value>(proc, Origin(), 13);
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ patchpoint->clobberLate(RegisterSet(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1));
+ patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), params[0].gpr());
+ jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), params[1].gpr());
+ jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), GPRInfo::argumentGPR0);
+ jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), GPRInfo::argumentGPR1);
+ });
+
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), arg1, arg2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+}
+
+void testSimplePatchpointWithOuputClobbersGPArgs()
+{
+ // We can't predict where the output will be but we want to be sure it is not
+ // one of the clobbered registers which is a bit hard to test.
+ //
+ // What we do is force the hand of our register allocator by clobbering absolutely
+ // everything but 1. The only valid allocation is to give it to the result and
+ // spill everything else.
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* const1 = root->appendNew<Const64Value>(proc, Origin(), 42);
+ Value* const2 = root->appendNew<Const64Value>(proc, Origin(), 13);
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int64, Origin());
+
+ RegisterSet clobberAll = RegisterSet::allGPRs();
+ clobberAll.exclude(RegisterSet::stackRegisters());
+ clobberAll.exclude(RegisterSet::reservedHardwareRegisters());
+ clobberAll.clear(GPRInfo::argumentGPR2);
+ patchpoint->clobberLate(clobberAll);
+
+ patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 3);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ CHECK(params[2].isGPR());
+ jit.move(params[1].gpr(), params[0].gpr());
+ jit.add64(params[2].gpr(), params[0].gpr());
+
+ clobberAll.forEach([&] (Reg reg) {
+ jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), reg.gpr());
+ });
+ });
+
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), patchpoint,
+ root->appendNew<Value>(proc, Add, Origin(), arg1, arg2));
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 58);
+}
+
+void testSimplePatchpointWithoutOuputClobbersFPArgs()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* const1 = root->appendNew<ConstDoubleValue>(proc, Origin(), 42.5);
+ Value* const2 = root->appendNew<ConstDoubleValue>(proc, Origin(), 13.1);
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ patchpoint->clobberLate(RegisterSet(FPRInfo::argumentFPR0, FPRInfo::argumentFPR1));
+ patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isFPR());
+ CHECK(params[1].isFPR());
+ jit.moveZeroToDouble(params[0].fpr());
+ jit.moveZeroToDouble(params[1].fpr());
+ jit.moveZeroToDouble(FPRInfo::argumentFPR0);
+ jit.moveZeroToDouble(FPRInfo::argumentFPR1);
+ });
+
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), arg1, arg2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<double>(proc, 1.5, 2.5) == 4);
+}
+
+void testSimplePatchpointWithOuputClobbersFPArgs()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* const1 = root->appendNew<ConstDoubleValue>(proc, Origin(), 42.5);
+ Value* const2 = root->appendNew<ConstDoubleValue>(proc, Origin(), 13.1);
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Double, Origin());
+
+ RegisterSet clobberAll = RegisterSet::allFPRs();
+ clobberAll.exclude(RegisterSet::stackRegisters());
+ clobberAll.exclude(RegisterSet::reservedHardwareRegisters());
+ clobberAll.clear(FPRInfo::argumentFPR2);
+ patchpoint->clobberLate(clobberAll);
+
+ patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 3);
+ CHECK(params[0].isFPR());
+ CHECK(params[1].isFPR());
+ CHECK(params[2].isFPR());
+ jit.addDouble(params[1].fpr(), params[2].fpr(), params[0].fpr());
+
+ clobberAll.forEach([&] (Reg reg) {
+ jit.moveZeroToDouble(reg.fpr());
+ });
+ });
+
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), patchpoint,
+ root->appendNew<Value>(proc, Add, Origin(), arg1, arg2));
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<double>(proc, 1.5, 2.5) == 59.6);
+}
+
+void testPatchpointWithEarlyClobber()
+{
+ auto test = [] (GPRReg registerToClobber, bool arg1InArgGPR, bool arg2InArgGPR) {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ patchpoint->clobberEarly(RegisterSet(registerToClobber));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ CHECK((params[1].gpr() == GPRInfo::argumentGPR0) == arg1InArgGPR);
+ CHECK((params[2].gpr() == GPRInfo::argumentGPR1) == arg2InArgGPR);
+
+ add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+ });
+
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+ };
+
+ test(GPRInfo::nonArgGPR0, true, true);
+ test(GPRInfo::argumentGPR0, false, true);
+ test(GPRInfo::argumentGPR1, true, false);
+}
+
+void testPatchpointCallArg()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->append(ConstrainedValue(arg1, ValueRep::stackArgument(0)));
+ patchpoint->append(ConstrainedValue(arg2, ValueRep::stackArgument(8)));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 3);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isStack());
+ CHECK(params[2].isStack());
+ jit.load32(
+ CCallHelpers::Address(GPRInfo::callFrameRegister, params[1].offsetFromFP()),
+ params[0].gpr());
+ jit.add32(
+ CCallHelpers::Address(GPRInfo::callFrameRegister, params[2].offsetFromFP()),
+ params[0].gpr());
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+}
+
+void testPatchpointFixedRegister()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->append(ConstrainedValue(arg1, ValueRep(GPRInfo::regT0)));
+ patchpoint->append(ConstrainedValue(arg2, ValueRep(GPRInfo::regT1)));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 3);
+ CHECK(params[0].isGPR());
+ CHECK(params[1] == ValueRep(GPRInfo::regT0));
+ CHECK(params[2] == ValueRep(GPRInfo::regT1));
+ add32(jit, GPRInfo::regT0, GPRInfo::regT1, params[0].gpr());
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+}
+
+void testPatchpointAny(ValueRep rep)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->append(ConstrainedValue(arg1, rep));
+ patchpoint->append(ConstrainedValue(arg2, rep));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ // We shouldn't have spilled the inputs, so we assert that they're in registers.
+ CHECK(params.size() == 3);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ CHECK(params[2].isGPR());
+ add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+}
+
+void testPatchpointGPScratch()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->append(arg1, ValueRep::SomeRegister);
+ patchpoint->append(arg2, ValueRep::SomeRegister);
+ patchpoint->numGPScratchRegisters = 2;
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ // We shouldn't have spilled the inputs, so we assert that they're in registers.
+ CHECK(params.size() == 3);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ CHECK(params[2].isGPR());
+ CHECK(params.gpScratch(0) != InvalidGPRReg);
+ CHECK(params.gpScratch(0) != params[0].gpr());
+ CHECK(params.gpScratch(0) != params[1].gpr());
+ CHECK(params.gpScratch(0) != params[2].gpr());
+ CHECK(params.gpScratch(1) != InvalidGPRReg);
+ CHECK(params.gpScratch(1) != params.gpScratch(0));
+ CHECK(params.gpScratch(1) != params[0].gpr());
+ CHECK(params.gpScratch(1) != params[1].gpr());
+ CHECK(params.gpScratch(1) != params[2].gpr());
+ CHECK(!params.unavailableRegisters().get(params.gpScratch(0)));
+ CHECK(!params.unavailableRegisters().get(params.gpScratch(1)));
+ add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+}
+
+void testPatchpointFPScratch()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->append(arg1, ValueRep::SomeRegister);
+ patchpoint->append(arg2, ValueRep::SomeRegister);
+ patchpoint->numFPScratchRegisters = 2;
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ // We shouldn't have spilled the inputs, so we assert that they're in registers.
+ CHECK(params.size() == 3);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ CHECK(params[2].isGPR());
+ CHECK(params.fpScratch(0) != InvalidFPRReg);
+ CHECK(params.fpScratch(1) != InvalidFPRReg);
+ CHECK(params.fpScratch(1) != params.fpScratch(0));
+ CHECK(!params.unavailableRegisters().get(params.fpScratch(0)));
+ CHECK(!params.unavailableRegisters().get(params.fpScratch(1)));
+ add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+}
+
+void testPatchpointLotsOfLateAnys()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Vector<int> things;
+ for (unsigned i = 200; i--;)
+ things.append(i);
+
+ Vector<Value*> values;
+ for (int& thing : things) {
+ Value* value = root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &thing));
+ values.append(value);
+ }
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ for (Value* value : values)
+ patchpoint->append(ConstrainedValue(value, ValueRep::LateColdAny));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ // We shouldn't have spilled the inputs, so we assert that they're in registers.
+ CHECK(params.size() == things.size() + 1);
+ CHECK(params[0].isGPR());
+ jit.move(CCallHelpers::TrustedImm32(0), params[0].gpr());
+ for (unsigned i = 1; i < params.size(); ++i) {
+ if (params[i].isGPR()) {
+ CHECK(params[i] != params[0]);
+ jit.add32(params[i].gpr(), params[0].gpr());
+ } else {
+ CHECK(params[i].isStack());
+ jit.add32(CCallHelpers::Address(GPRInfo::callFrameRegister, params[i].offsetFromFP()), params[0].gpr());
+ }
+ }
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc) == (things.size() * (things.size() - 1)) / 2);
+}
+
+void testPatchpointAnyImm(ValueRep rep)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), 42);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->append(ConstrainedValue(arg1, rep));
+ patchpoint->append(ConstrainedValue(arg2, rep));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 3);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ CHECK(params[2].isConstant());
+ CHECK(params[2].value() == 42);
+ jit.add32(
+ CCallHelpers::TrustedImm32(static_cast<int32_t>(params[2].value())),
+ params[1].gpr(), params[0].gpr());
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc, 1) == 43);
+}
+
+void testPatchpointManyImms()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Const32Value>(proc, Origin(), 42);
+ Value* arg2 = root->appendNew<Const64Value>(proc, Origin(), 43);
+ Value* arg3 = root->appendNew<Const64Value>(proc, Origin(), 43000000000000ll);
+ Value* arg4 = root->appendNew<ConstDoubleValue>(proc, Origin(), 42.5);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ patchpoint->append(ConstrainedValue(arg1, ValueRep::WarmAny));
+ patchpoint->append(ConstrainedValue(arg2, ValueRep::WarmAny));
+ patchpoint->append(ConstrainedValue(arg3, ValueRep::WarmAny));
+ patchpoint->append(ConstrainedValue(arg4, ValueRep::WarmAny));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+ CHECK(params.size() == 4);
+ CHECK(params[0] == ValueRep::constant(42));
+ CHECK(params[1] == ValueRep::constant(43));
+ CHECK(params[2] == ValueRep::constant(43000000000000ll));
+ CHECK(params[3] == ValueRep::constant(bitwise_cast<int64_t>(42.5)));
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(!compileAndRun<int>(proc));
+}
+
+void testPatchpointWithRegisterResult()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ patchpoint->resultConstraint = ValueRep::reg(GPRInfo::nonArgGPR0);
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 3);
+ CHECK(params[0] == ValueRep::reg(GPRInfo::nonArgGPR0));
+ CHECK(params[1].isGPR());
+ CHECK(params[2].isGPR());
+ add32(jit, params[1].gpr(), params[2].gpr(), GPRInfo::nonArgGPR0);
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+}
+
+void testPatchpointWithStackArgumentResult()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ patchpoint->resultConstraint = ValueRep::stackArgument(0);
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 3);
+ CHECK(params[0] == ValueRep::stack(-static_cast<intptr_t>(proc.frameSize())));
+ CHECK(params[1].isGPR());
+ CHECK(params[2].isGPR());
+ jit.add32(params[1].gpr(), params[2].gpr(), jit.scratchRegister());
+ jit.store32(jit.scratchRegister(), CCallHelpers::Address(CCallHelpers::stackPointerRegister, 0));
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+}
+
+void testPatchpointWithAnyResult()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Double, Origin());
+ patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ patchpoint->resultConstraint = ValueRep::WarmAny;
+ patchpoint->clobberLate(RegisterSet::allFPRs());
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->clobber(RegisterSet(GPRInfo::regT0));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 3);
+ CHECK(params[0].isStack());
+ CHECK(params[1].isGPR());
+ CHECK(params[2].isGPR());
+ add32(jit, params[1].gpr(), params[2].gpr(), GPRInfo::regT0);
+ jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
+ jit.storeDouble(FPRInfo::fpRegT0, CCallHelpers::Address(GPRInfo::callFrameRegister, params[0].offsetFromFP()));
+ });
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ CHECK(compileAndRun<double>(proc, 1, 2) == 3);
+}
+
+void testSimpleCheck()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ CheckValue* check = root->appendNew<CheckValue>(proc, Check, Origin(), arg);
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(!params.size());
+
+ // This should always work because a function this simple should never have callee
+ // saves.
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int>(*code, 0) == 0);
+ CHECK(invoke<int>(*code, 1) == 42);
+}
+
+void testCheckFalse()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ CheckValue* check = root->appendNew<CheckValue>(
+ proc, Check, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+ check->setGenerator(
+ [&] (CCallHelpers&, const StackmapGenerationParams&) {
+ CHECK(!"This should not have executed");
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int>(*code) == 0);
+}
+
+void testCheckTrue()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ CheckValue* check = root->appendNew<CheckValue>(
+ proc, Check, Origin(), root->appendNew<Const32Value>(proc, Origin(), 1));
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.value()->opcode() == Patchpoint);
+ CHECK(!params.size());
+
+ // This should always work because a function this simple should never have callee
+ // saves.
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int>(*code) == 42);
+}
+
+void testCheckLessThan()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ CheckValue* check = root->appendNew<CheckValue>(
+ proc, Check, Origin(),
+ root->appendNew<Value>(
+ proc, LessThan, Origin(), arg,
+ root->appendNew<Const32Value>(proc, Origin(), 42)));
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(!params.size());
+
+ // This should always work because a function this simple should never have callee
+ // saves.
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int>(*code, 42) == 0);
+ CHECK(invoke<int>(*code, 1000) == 0);
+ CHECK(invoke<int>(*code, 41) == 42);
+ CHECK(invoke<int>(*code, 0) == 42);
+ CHECK(invoke<int>(*code, -1) == 42);
+}
+
+void testCheckMegaCombo()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* index = root->appendNew<Value>(
+ proc, ZExt32, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ Value* ptr = root->appendNew<Value>(
+ proc, Add, Origin(), base,
+ root->appendNew<Value>(
+ proc, Shl, Origin(), index,
+ root->appendNew<Const32Value>(proc, Origin(), 1)));
+
+ CheckValue* check = root->appendNew<CheckValue>(
+ proc, Check, Origin(),
+ root->appendNew<Value>(
+ proc, LessThan, Origin(),
+ root->appendNew<MemoryValue>(proc, Load8S, Origin(), ptr),
+ root->appendNew<Const32Value>(proc, Origin(), 42)));
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(!params.size());
+
+ // This should always work because a function this simple should never have callee
+ // saves.
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+
+ int8_t value;
+ value = 42;
+ CHECK(invoke<int>(*code, &value - 2, 1) == 0);
+ value = 127;
+ CHECK(invoke<int>(*code, &value - 2, 1) == 0);
+ value = 41;
+ CHECK(invoke<int>(*code, &value - 2, 1) == 42);
+ value = 0;
+ CHECK(invoke<int>(*code, &value - 2, 1) == 42);
+ value = -1;
+ CHECK(invoke<int>(*code, &value - 2, 1) == 42);
+}
+
+void testCheckTrickyMegaCombo()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* index = root->appendNew<Value>(
+ proc, ZExt32, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)),
+ root->appendNew<Const32Value>(proc, Origin(), 1)));
+
+ Value* ptr = root->appendNew<Value>(
+ proc, Add, Origin(), base,
+ root->appendNew<Value>(
+ proc, Shl, Origin(), index,
+ root->appendNew<Const32Value>(proc, Origin(), 1)));
+
+ CheckValue* check = root->appendNew<CheckValue>(
+ proc, Check, Origin(),
+ root->appendNew<Value>(
+ proc, LessThan, Origin(),
+ root->appendNew<MemoryValue>(proc, Load8S, Origin(), ptr),
+ root->appendNew<Const32Value>(proc, Origin(), 42)));
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(!params.size());
+
+ // This should always work because a function this simple should never have callee
+ // saves.
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+
+ int8_t value;
+ value = 42;
+ CHECK(invoke<int>(*code, &value - 2, 0) == 0);
+ value = 127;
+ CHECK(invoke<int>(*code, &value - 2, 0) == 0);
+ value = 41;
+ CHECK(invoke<int>(*code, &value - 2, 0) == 42);
+ value = 0;
+ CHECK(invoke<int>(*code, &value - 2, 0) == 42);
+ value = -1;
+ CHECK(invoke<int>(*code, &value - 2, 0) == 42);
+}
+
+void testCheckTwoMegaCombos()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* index = root->appendNew<Value>(
+ proc, ZExt32, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ Value* ptr = root->appendNew<Value>(
+ proc, Add, Origin(), base,
+ root->appendNew<Value>(
+ proc, Shl, Origin(), index,
+ root->appendNew<Const32Value>(proc, Origin(), 1)));
+
+ Value* predicate = root->appendNew<Value>(
+ proc, LessThan, Origin(),
+ root->appendNew<MemoryValue>(proc, Load8S, Origin(), ptr),
+ root->appendNew<Const32Value>(proc, Origin(), 42));
+
+ CheckValue* check = root->appendNew<CheckValue>(proc, Check, Origin(), predicate);
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(!params.size());
+
+ // This should always work because a function this simple should never have callee
+ // saves.
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ CheckValue* check2 = root->appendNew<CheckValue>(proc, Check, Origin(), predicate);
+ check2->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(!params.size());
+
+ // This should always work because a function this simple should never have callee
+ // saves.
+ jit.move(CCallHelpers::TrustedImm32(43), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+
+ int8_t value;
+ value = 42;
+ CHECK(invoke<int>(*code, &value - 2, 1) == 0);
+ value = 127;
+ CHECK(invoke<int>(*code, &value - 2, 1) == 0);
+ value = 41;
+ CHECK(invoke<int>(*code, &value - 2, 1) == 42);
+ value = 0;
+ CHECK(invoke<int>(*code, &value - 2, 1) == 42);
+ value = -1;
+ CHECK(invoke<int>(*code, &value - 2, 1) == 42);
+}
+
+void testCheckTwoNonRedundantMegaCombos()
+{
+ Procedure proc;
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* base = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* index = root->appendNew<Value>(
+ proc, ZExt32, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* branchPredicate = root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)),
+ root->appendNew<Const32Value>(proc, Origin(), 0xff));
+
+ Value* ptr = root->appendNew<Value>(
+ proc, Add, Origin(), base,
+ root->appendNew<Value>(
+ proc, Shl, Origin(), index,
+ root->appendNew<Const32Value>(proc, Origin(), 1)));
+
+ Value* checkPredicate = root->appendNew<Value>(
+ proc, LessThan, Origin(),
+ root->appendNew<MemoryValue>(proc, Load8S, Origin(), ptr),
+ root->appendNew<Const32Value>(proc, Origin(), 42));
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(), branchPredicate,
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ CheckValue* check = thenCase->appendNew<CheckValue>(proc, Check, Origin(), checkPredicate);
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(!params.size());
+
+ // This should always work because a function this simple should never have callee
+ // saves.
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(), thenCase->appendNew<Const32Value>(proc, Origin(), 43));
+
+ CheckValue* check2 = elseCase->appendNew<CheckValue>(proc, Check, Origin(), checkPredicate);
+ check2->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(!params.size());
+
+ // This should always work because a function this simple should never have callee
+ // saves.
+ jit.move(CCallHelpers::TrustedImm32(44), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(), elseCase->appendNew<Const32Value>(proc, Origin(), 45));
+
+ auto code = compile(proc);
+
+ int8_t value;
+
+ value = 42;
+ CHECK(invoke<int>(*code, &value - 2, 1, true) == 43);
+ value = 127;
+ CHECK(invoke<int>(*code, &value - 2, 1, true) == 43);
+ value = 41;
+ CHECK(invoke<int>(*code, &value - 2, 1, true) == 42);
+ value = 0;
+ CHECK(invoke<int>(*code, &value - 2, 1, true) == 42);
+ value = -1;
+ CHECK(invoke<int>(*code, &value - 2, 1, true) == 42);
+
+ value = 42;
+ CHECK(invoke<int>(*code, &value - 2, 1, false) == 45);
+ value = 127;
+ CHECK(invoke<int>(*code, &value - 2, 1, false) == 45);
+ value = 41;
+ CHECK(invoke<int>(*code, &value - 2, 1, false) == 44);
+ value = 0;
+ CHECK(invoke<int>(*code, &value - 2, 1, false) == 44);
+ value = -1;
+ CHECK(invoke<int>(*code, &value - 2, 1, false) == 44);
+}
+
+void testCheckAddImm()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), 42);
+ CheckValue* checkAdd = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg1, arg2);
+ checkAdd->append(arg1);
+ checkAdd->append(arg2);
+ checkAdd->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isConstant());
+ CHECK(params[1].value() == 42);
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(42), FPRInfo::fpRegT1);
+ jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkAdd));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0) == 42.0);
+ CHECK(invoke<double>(*code, 1) == 43.0);
+ CHECK(invoke<double>(*code, 42) == 84.0);
+ CHECK(invoke<double>(*code, 2147483647) == 2147483689.0);
+}
+
+void testCheckAddImmCommute()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), 42);
+ CheckValue* checkAdd = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg2, arg1);
+ checkAdd->append(arg1);
+ checkAdd->append(arg2);
+ checkAdd->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isConstant());
+ CHECK(params[1].value() == 42);
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(42), FPRInfo::fpRegT1);
+ jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkAdd));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0) == 42.0);
+ CHECK(invoke<double>(*code, 1) == 43.0);
+ CHECK(invoke<double>(*code, 42) == 84.0);
+ CHECK(invoke<double>(*code, 2147483647) == 2147483689.0);
+}
+
+void testCheckAddImmSomeRegister()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), 42);
+ CheckValue* checkAdd = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg1, arg2);
+ checkAdd->appendSomeRegister(arg1);
+ checkAdd->appendSomeRegister(arg2);
+ checkAdd->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+ jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkAdd));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0) == 42.0);
+ CHECK(invoke<double>(*code, 1) == 43.0);
+ CHECK(invoke<double>(*code, 42) == 84.0);
+ CHECK(invoke<double>(*code, 2147483647) == 2147483689.0);
+}
+
+void testCheckAdd()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ CheckValue* checkAdd = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg1, arg2);
+ checkAdd->appendSomeRegister(arg1);
+ checkAdd->appendSomeRegister(arg2);
+ checkAdd->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+ jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkAdd));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0, 42) == 42.0);
+ CHECK(invoke<double>(*code, 1, 42) == 43.0);
+ CHECK(invoke<double>(*code, 42, 42) == 84.0);
+ CHECK(invoke<double>(*code, 2147483647, 42) == 2147483689.0);
+}
+
+void testCheckAdd64()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ CheckValue* checkAdd = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg1, arg2);
+ checkAdd->appendSomeRegister(arg1);
+ checkAdd->appendSomeRegister(arg2);
+ checkAdd->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+ jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkAdd));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0ll, 42ll) == 42.0);
+ CHECK(invoke<double>(*code, 1ll, 42ll) == 43.0);
+ CHECK(invoke<double>(*code, 42ll, 42ll) == 84.0);
+ CHECK(invoke<double>(*code, 9223372036854775807ll, 42ll) == static_cast<double>(9223372036854775807ll) + 42.0);
+}
+
+void testCheckAddFold(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Const32Value>(proc, Origin(), a);
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), b);
+ CheckValue* checkAdd = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg1, arg2);
+ checkAdd->setGenerator(
+ [&] (CCallHelpers&, const StackmapGenerationParams&) {
+ CHECK(!"Should have been folded");
+ });
+ root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int>(*code) == a + b);
+}
+
+void testCheckAddFoldFail(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Const32Value>(proc, Origin(), a);
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), b);
+ CheckValue* checkAdd = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg1, arg2);
+ checkAdd->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int>(*code) == 42);
+}
+
+void testCheckAddArgumentAliasing64()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* arg3 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+
+ // Pretend to use all the args.
+ PatchpointValue* useArgs = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+ useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+ useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ // Last use of first arg (here, arg1).
+ CheckValue* checkAdd1 = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg1, arg2);
+ checkAdd1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ // Last use of second arg (here, arg2).
+ CheckValue* checkAdd2 = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg3, arg2);
+ checkAdd2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ // Keep arg3 live.
+ PatchpointValue* keepArg2Live = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ // Only use of checkAdd1 and checkAdd2.
+ CheckValue* checkAdd3 = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), checkAdd1, checkAdd2);
+ checkAdd3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ root->appendNewControlValue(proc, Return, Origin(), checkAdd3);
+
+ CHECK(compileAndRun<int64_t>(proc, 1, 2, 3) == 8);
+}
+
+void testCheckAddArgumentAliasing32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* arg3 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2));
+
+ // Pretend to use all the args.
+ PatchpointValue* useArgs = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+ useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+ useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ // Last use of first arg (here, arg1).
+ CheckValue* checkAdd1 = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg1, arg2);
+ checkAdd1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ // Last use of second arg (here, arg3).
+ CheckValue* checkAdd2 = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg2, arg3);
+ checkAdd2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ // Keep arg3 live.
+ PatchpointValue* keepArg2Live = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ // Only use of checkAdd1 and checkAdd2.
+ CheckValue* checkAdd3 = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), checkAdd1, checkAdd2);
+ checkAdd3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ root->appendNewControlValue(proc, Return, Origin(), checkAdd3);
+
+ CHECK(compileAndRun<int32_t>(proc, 1, 2, 3) == 8);
+}
+
+void testCheckAddSelfOverflow64()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ CheckValue* checkAdd = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg, arg);
+ checkAdd->append(arg);
+ checkAdd->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ jit.move(params[0].gpr(), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+
+ // Make sure the arg is not the destination of the operation.
+ PatchpointValue* opaqueUse = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ opaqueUse->append(ConstrainedValue(arg, ValueRep::SomeRegister));
+ opaqueUse->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int64_t>(*code, 0ll) == 0);
+ CHECK(invoke<int64_t>(*code, 1ll) == 2);
+ CHECK(invoke<int64_t>(*code, std::numeric_limits<int64_t>::max()) == std::numeric_limits<int64_t>::max());
+}
+
+void testCheckAddSelfOverflow32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ CheckValue* checkAdd = root->appendNew<CheckValue>(proc, CheckAdd, Origin(), arg, arg);
+ checkAdd->append(arg);
+ checkAdd->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ jit.move(params[0].gpr(), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+
+ // Make sure the arg is not the destination of the operation.
+ PatchpointValue* opaqueUse = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ opaqueUse->append(ConstrainedValue(arg, ValueRep::SomeRegister));
+ opaqueUse->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int32_t>(*code, 0ll) == 0);
+ CHECK(invoke<int32_t>(*code, 1ll) == 2);
+ CHECK(invoke<int32_t>(*code, std::numeric_limits<int32_t>::max()) == std::numeric_limits<int32_t>::max());
+}
+
+void testCheckSubImm()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), 42);
+ CheckValue* checkSub = root->appendNew<CheckValue>(proc, CheckSub, Origin(), arg1, arg2);
+ checkSub->append(arg1);
+ checkSub->append(arg2);
+ checkSub->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isConstant());
+ CHECK(params[1].value() == 42);
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(42), FPRInfo::fpRegT1);
+ jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkSub));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0) == -42.0);
+ CHECK(invoke<double>(*code, 1) == -41.0);
+ CHECK(invoke<double>(*code, 42) == 0.0);
+ CHECK(invoke<double>(*code, -2147483647) == -2147483689.0);
+}
+
+void testCheckSubBadImm()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ int32_t badImm = std::numeric_limits<int>::min();
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), badImm);
+ CheckValue* checkSub = root->appendNew<CheckValue>(proc, CheckSub, Origin(), arg1, arg2);
+ checkSub->append(arg1);
+ checkSub->append(arg2);
+ checkSub->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+
+ if (params[1].isConstant()) {
+ CHECK(params[1].value() == badImm);
+ jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(badImm), FPRInfo::fpRegT1);
+ } else {
+ CHECK(params[1].isGPR());
+ jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+ }
+ jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkSub));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0) == -static_cast<double>(badImm));
+ CHECK(invoke<double>(*code, -1) == -static_cast<double>(badImm) - 1);
+ CHECK(invoke<double>(*code, 1) == -static_cast<double>(badImm) + 1);
+ CHECK(invoke<double>(*code, 42) == -static_cast<double>(badImm) + 42);
+}
+
+void testCheckSub()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ CheckValue* checkSub = root->appendNew<CheckValue>(proc, CheckSub, Origin(), arg1, arg2);
+ checkSub->append(arg1);
+ checkSub->append(arg2);
+ checkSub->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+ jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkSub));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0, 42) == -42.0);
+ CHECK(invoke<double>(*code, 1, 42) == -41.0);
+ CHECK(invoke<double>(*code, 42, 42) == 0.0);
+ CHECK(invoke<double>(*code, -2147483647, 42) == -2147483689.0);
+}
+
+NEVER_INLINE double doubleSub(double a, double b)
+{
+ return a - b;
+}
+
+void testCheckSub64()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ CheckValue* checkSub = root->appendNew<CheckValue>(proc, CheckSub, Origin(), arg1, arg2);
+ checkSub->append(arg1);
+ checkSub->append(arg2);
+ checkSub->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+ jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkSub));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0ll, 42ll) == -42.0);
+ CHECK(invoke<double>(*code, 1ll, 42ll) == -41.0);
+ CHECK(invoke<double>(*code, 42ll, 42ll) == 0.0);
+ CHECK(invoke<double>(*code, -9223372036854775807ll, 42ll) == doubleSub(static_cast<double>(-9223372036854775807ll), 42.0));
+}
+
+void testCheckSubFold(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Const32Value>(proc, Origin(), a);
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), b);
+ CheckValue* checkSub = root->appendNew<CheckValue>(proc, CheckSub, Origin(), arg1, arg2);
+ checkSub->setGenerator(
+ [&] (CCallHelpers&, const StackmapGenerationParams&) {
+ CHECK(!"Should have been folded");
+ });
+ root->appendNewControlValue(proc, Return, Origin(), checkSub);
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int>(*code) == a - b);
+}
+
+void testCheckSubFoldFail(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Const32Value>(proc, Origin(), a);
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), b);
+ CheckValue* checkSub = root->appendNew<CheckValue>(proc, CheckSub, Origin(), arg1, arg2);
+ checkSub->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(proc, Return, Origin(), checkSub);
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int>(*code) == 42);
+}
+
+void testCheckNeg()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Const32Value>(proc, Origin(), 0);
+ Value* arg2 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ CheckValue* checkNeg = root->appendNew<CheckValue>(proc, CheckSub, Origin(), arg1, arg2);
+ checkNeg->append(arg2);
+ checkNeg->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 1);
+ CHECK(params[0].isGPR());
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT1);
+ jit.negateDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkNeg));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0) == 0.0);
+ CHECK(invoke<double>(*code, 1) == -1.0);
+ CHECK(invoke<double>(*code, 42) == -42.0);
+ CHECK(invoke<double>(*code, -2147483647 - 1) == 2147483648.0);
+}
+
+void testCheckNeg64()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Const64Value>(proc, Origin(), 0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ CheckValue* checkNeg = root->appendNew<CheckValue>(proc, CheckSub, Origin(), arg1, arg2);
+ checkNeg->append(arg2);
+ checkNeg->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 1);
+ CHECK(params[0].isGPR());
+ jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT1);
+ jit.negateDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkNeg));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0ll) == 0.0);
+ CHECK(invoke<double>(*code, 1ll) == -1.0);
+ CHECK(invoke<double>(*code, 42ll) == -42.0);
+ CHECK(invoke<double>(*code, -9223372036854775807ll - 1) == 9223372036854775808.0);
+}
+
+void testCheckMul()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ CheckValue* checkMul = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg1, arg2);
+ checkMul->append(arg1);
+ checkMul->append(arg2);
+ checkMul->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+ jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkMul));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0, 42) == 0.0);
+ CHECK(invoke<double>(*code, 1, 42) == 42.0);
+ CHECK(invoke<double>(*code, 42, 42) == 42.0 * 42.0);
+ CHECK(invoke<double>(*code, 2147483647, 42) == 2147483647.0 * 42.0);
+}
+
+void testCheckMulMemory()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ int left;
+ int right;
+
+ Value* arg1 = root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &left));
+ Value* arg2 = root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &right));
+ CheckValue* checkMul = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg1, arg2);
+ checkMul->append(arg1);
+ checkMul->append(arg2);
+ checkMul->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+ jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkMul));
+
+ auto code = compile(proc);
+
+ left = 0;
+ right = 42;
+ CHECK(invoke<double>(*code) == 0.0);
+
+ left = 1;
+ right = 42;
+ CHECK(invoke<double>(*code) == 42.0);
+
+ left = 42;
+ right = 42;
+ CHECK(invoke<double>(*code) == 42.0 * 42.0);
+
+ left = 2147483647;
+ right = 42;
+ CHECK(invoke<double>(*code) == 2147483647.0 * 42.0);
+}
+
+void testCheckMul2()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), 2);
+ CheckValue* checkMul = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg1, arg2);
+ checkMul->append(arg1);
+ checkMul->append(arg2);
+ checkMul->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isConstant());
+ CHECK(params[1].value() == 2);
+ jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(2), FPRInfo::fpRegT1);
+ jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkMul));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0) == 0.0);
+ CHECK(invoke<double>(*code, 1) == 2.0);
+ CHECK(invoke<double>(*code, 42) == 42.0 * 2.0);
+ CHECK(invoke<double>(*code, 2147483647) == 2147483647.0 * 2.0);
+}
+
+void testCheckMul64()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ CheckValue* checkMul = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg1, arg2);
+ checkMul->append(arg1);
+ checkMul->append(arg2);
+ checkMul->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+ jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkMul));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0, 42) == 0.0);
+ CHECK(invoke<double>(*code, 1, 42) == 42.0);
+ CHECK(invoke<double>(*code, 42, 42) == 42.0 * 42.0);
+ CHECK(invoke<double>(*code, 9223372036854775807ll, 42) == static_cast<double>(9223372036854775807ll) * 42.0);
+}
+
+void testCheckMulFold(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Const32Value>(proc, Origin(), a);
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), b);
+ CheckValue* checkMul = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg1, arg2);
+ checkMul->setGenerator(
+ [&] (CCallHelpers&, const StackmapGenerationParams&) {
+ CHECK(!"Should have been folded");
+ });
+ root->appendNewControlValue(proc, Return, Origin(), checkMul);
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int>(*code) == a * b);
+}
+
+void testCheckMulFoldFail(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Const32Value>(proc, Origin(), a);
+ Value* arg2 = root->appendNew<Const32Value>(proc, Origin(), b);
+ CheckValue* checkMul = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg1, arg2);
+ checkMul->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(proc, Return, Origin(), checkMul);
+
+ auto code = compile(proc);
+
+ CHECK(invoke<int>(*code) == 42);
+}
+
+void testCheckMulArgumentAliasing64()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* arg3 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+
+ // Pretend to use all the args.
+ PatchpointValue* useArgs = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+ useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+ useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ // Last use of first arg (here, arg1).
+ CheckValue* checkMul1 = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg1, arg2);
+ checkMul1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ // Last use of second arg (here, arg2).
+ CheckValue* checkMul2 = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg3, arg2);
+ checkMul2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ // Keep arg3 live.
+ PatchpointValue* keepArg2Live = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ // Only use of checkMul1 and checkMul2.
+ CheckValue* checkMul3 = root->appendNew<CheckValue>(proc, CheckMul, Origin(), checkMul1, checkMul2);
+ checkMul3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ root->appendNewControlValue(proc, Return, Origin(), checkMul3);
+
+ CHECK(compileAndRun<int64_t>(proc, 2, 3, 4) == 72);
+}
+
+void testCheckMulArgumentAliasing32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* arg3 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2));
+
+ // Pretend to use all the args.
+ PatchpointValue* useArgs = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+ useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+ useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ // Last use of first arg (here, arg1).
+ CheckValue* checkMul1 = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg1, arg2);
+ checkMul1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ // Last use of second arg (here, arg3).
+ CheckValue* checkMul2 = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg2, arg3);
+ checkMul2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ // Keep arg3 live.
+ PatchpointValue* keepArg2Live = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ // Only use of checkMul1 and checkMul2.
+ CheckValue* checkMul3 = root->appendNew<CheckValue>(proc, CheckMul, Origin(), checkMul1, checkMul2);
+ checkMul3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+ root->appendNewControlValue(proc, Return, Origin(), checkMul3);
+
+ CHECK(compileAndRun<int32_t>(proc, 2, 3, 4) == 72);
+}
+
+void testCheckMul64SShr()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<Value>(
+ proc, SShr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const32Value>(proc, Origin(), 1));
+ Value* arg2 = root->appendNew<Value>(
+ proc, SShr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 1));
+ CheckValue* checkMul = root->appendNew<CheckValue>(proc, CheckMul, Origin(), arg1, arg2);
+ checkMul->append(arg1);
+ checkMul->append(arg2);
+ checkMul->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 2);
+ CHECK(params[0].isGPR());
+ CHECK(params[1].isGPR());
+ jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+ jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+ jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, IToD, Origin(), checkMul));
+
+ auto code = compile(proc);
+
+ CHECK(invoke<double>(*code, 0ll, 42ll) == 0.0);
+ CHECK(invoke<double>(*code, 1ll, 42ll) == 0.0);
+ CHECK(invoke<double>(*code, 42ll, 42ll) == (42.0 / 2.0) * (42.0 / 2.0));
+ CHECK(invoke<double>(*code, 10000000000ll, 10000000000ll) == 25000000000000000000.0);
+}
+
+template<typename LeftFunctor, typename RightFunctor, typename InputType>
+void genericTestCompare(
+ B3::Opcode opcode, const LeftFunctor& leftFunctor, const RightFunctor& rightFunctor,
+ InputType left, InputType right, int result)
+{
+ // Using a compare.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* leftValue = leftFunctor(root, proc);
+ Value* rightValue = rightFunctor(root, proc);
+ Value* comparisonResult = root->appendNew<Value>(proc, opcode, Origin(), leftValue, rightValue);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, NotEqual, Origin(),
+ comparisonResult,
+ root->appendIntConstant(proc, Origin(), comparisonResult->type(), 0)));
+
+ CHECK(compileAndRun<int>(proc, left, right) == result);
+ }
+
+ // Using a branch.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* leftValue = leftFunctor(root, proc);
+ Value* rightValue = rightFunctor(root, proc);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(proc, opcode, Origin(), leftValue, rightValue),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ // We use a patchpoint on the then case to ensure that this doesn't get if-converted.
+ PatchpointValue* patchpoint = thenCase->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params.size() == 1);
+ CHECK(params[0].isGPR());
+ jit.move(CCallHelpers::TrustedImm32(1), params[0].gpr());
+ });
+ thenCase->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<Const32Value>(proc, Origin(), 0));
+
+ CHECK(compileAndRun<int>(proc, left, right) == result);
+ }
+}
+
+template<typename InputType>
+InputType modelCompare(B3::Opcode opcode, InputType left, InputType right)
+{
+ switch (opcode) {
+ case Equal:
+ return left == right;
+ case NotEqual:
+ return left != right;
+ case LessThan:
+ return left < right;
+ case GreaterThan:
+ return left > right;
+ case LessEqual:
+ return left <= right;
+ case GreaterEqual:
+ return left >= right;
+ case Above:
+ return static_cast<typename std::make_unsigned<InputType>::type>(left) >
+ static_cast<typename std::make_unsigned<InputType>::type>(right);
+ case Below:
+ return static_cast<typename std::make_unsigned<InputType>::type>(left) <
+ static_cast<typename std::make_unsigned<InputType>::type>(right);
+ case AboveEqual:
+ return static_cast<typename std::make_unsigned<InputType>::type>(left) >=
+ static_cast<typename std::make_unsigned<InputType>::type>(right);
+ case BelowEqual:
+ return static_cast<typename std::make_unsigned<InputType>::type>(left) <=
+ static_cast<typename std::make_unsigned<InputType>::type>(right);
+ case BitAnd:
+ return !!(left & right);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+}
+
+template<typename T>
+void testCompareLoad(B3::Opcode opcode, B3::Opcode loadOpcode, int left, int right)
+{
+ int result = modelCompare(opcode, modelLoad<T>(left), right);
+
+ // Test addr-to-tmp
+ int slot = left;
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<MemoryValue>(
+ proc, loadOpcode, Int32, Origin(),
+ block->appendNew<ConstPtrValue>(proc, Origin(), &slot));
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Value>(
+ proc, Trunc, Origin(),
+ block->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ },
+ left, right, result);
+
+ // Test addr-to-imm
+ slot = left;
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<MemoryValue>(
+ proc, loadOpcode, Int32, Origin(),
+ block->appendNew<ConstPtrValue>(proc, Origin(), &slot));
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Const32Value>(proc, Origin(), right);
+ },
+ left, right, result);
+
+ result = modelCompare(opcode, left, modelLoad<T>(right));
+
+ // Test tmp-to-addr
+ slot = right;
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Value>(
+ proc, Trunc, Origin(),
+ block->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<MemoryValue>(
+ proc, loadOpcode, Int32, Origin(),
+ block->appendNew<ConstPtrValue>(proc, Origin(), &slot));
+ },
+ left, right, result);
+
+ // Test imm-to-addr
+ slot = right;
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Const32Value>(proc, Origin(), left);
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<MemoryValue>(
+ proc, loadOpcode, Int32, Origin(),
+ block->appendNew<ConstPtrValue>(proc, Origin(), &slot));
+ },
+ left, right, result);
+
+ // Test addr-to-addr, with the same addr.
+ slot = left;
+ Value* value;
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ value = block->appendNew<MemoryValue>(
+ proc, loadOpcode, Int32, Origin(),
+ block->appendNew<ConstPtrValue>(proc, Origin(), &slot));
+ return value;
+ },
+ [&] (BasicBlock*, Procedure&) {
+ return value;
+ },
+ left, left, modelCompare(opcode, modelLoad<T>(left), modelLoad<T>(left)));
+}
+
+void testCompareImpl(B3::Opcode opcode, int64_t left, int64_t right)
+{
+ int64_t result = modelCompare(opcode, left, right);
+ int32_t int32Result = modelCompare(opcode, static_cast<int32_t>(left), static_cast<int32_t>(right));
+
+ // Test tmp-to-tmp.
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ },
+ left, right, result);
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Value>(
+ proc, Trunc, Origin(),
+ block->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Value>(
+ proc, Trunc, Origin(),
+ block->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ },
+ left, right, int32Result);
+
+ // Test imm-to-tmp.
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Const64Value>(proc, Origin(), left);
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ },
+ left, right, result);
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Const32Value>(proc, Origin(), left);
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Value>(
+ proc, Trunc, Origin(),
+ block->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ },
+ left, right, int32Result);
+
+ // Test tmp-to-imm.
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Const64Value>(proc, Origin(), right);
+ },
+ left, right, result);
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Value>(
+ proc, Trunc, Origin(),
+ block->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Const32Value>(proc, Origin(), right);
+ },
+ left, right, int32Result);
+
+ // Test imm-to-imm.
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Const64Value>(proc, Origin(), left);
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Const64Value>(proc, Origin(), right);
+ },
+ left, right, result);
+ genericTestCompare(
+ opcode,
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Const32Value>(proc, Origin(), left);
+ },
+ [&] (BasicBlock* block, Procedure& proc) {
+ return block->appendNew<Const32Value>(proc, Origin(), right);
+ },
+ left, right, int32Result);
+
+ testCompareLoad<int32_t>(opcode, Load, left, right);
+ testCompareLoad<int8_t>(opcode, Load8S, left, right);
+ testCompareLoad<uint8_t>(opcode, Load8Z, left, right);
+ testCompareLoad<int16_t>(opcode, Load16S, left, right);
+ testCompareLoad<uint16_t>(opcode, Load16Z, left, right);
+}
+
+void testCompare(B3::Opcode opcode, int64_t left, int64_t right)
+{
+ testCompareImpl(opcode, left, right);
+ testCompareImpl(opcode, left, right + 1);
+ testCompareImpl(opcode, left, right - 1);
+}
+
+void testEqualDouble(double left, double right, bool result)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1)));
+
+ CHECK(compileAndRun<bool>(proc, left, right) == result);
+}
+
+int simpleFunction(int a, int b)
+{
+ return a + b;
+}
+
+void testCallSimple(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<CCallValue>(
+ proc, Int32, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), bitwise_cast<void*>(simpleFunction)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ CHECK(compileAndRun<int>(proc, a, b) == a + b);
+}
+
+void testCallRare(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* common = proc.addBlock();
+ BasicBlock* rare = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ FrequentedBlock(rare, FrequencyClass::Rare),
+ FrequentedBlock(common));
+
+ common->appendNewControlValue(
+ proc, Return, Origin(), common->appendNew<Const32Value>(proc, Origin(), 0));
+
+ rare->appendNewControlValue(
+ proc, Return, Origin(),
+ rare->appendNew<CCallValue>(
+ proc, Int32, Origin(),
+ rare->appendNew<ConstPtrValue>(proc, Origin(), bitwise_cast<void*>(simpleFunction)),
+ rare->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ rare->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)));
+
+ CHECK(compileAndRun<int>(proc, true, a, b) == a + b);
+}
+
+void testCallRareLive(int a, int b, int c)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* common = proc.addBlock();
+ BasicBlock* rare = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ FrequentedBlock(rare, FrequencyClass::Rare),
+ FrequentedBlock(common));
+
+ common->appendNewControlValue(
+ proc, Return, Origin(), common->appendNew<Const32Value>(proc, Origin(), 0));
+
+ rare->appendNewControlValue(
+ proc, Return, Origin(),
+ rare->appendNew<Value>(
+ proc, Add, Origin(),
+ rare->appendNew<CCallValue>(
+ proc, Int32, Origin(),
+ rare->appendNew<ConstPtrValue>(proc, Origin(), bitwise_cast<void*>(simpleFunction)),
+ rare->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ rare->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)),
+ rare->appendNew<Value>(
+ proc, Trunc, Origin(),
+ rare->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR3))));
+
+ CHECK(compileAndRun<int>(proc, true, a, b, c) == a + b + c);
+}
+
+void testCallSimplePure(int a, int b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<CCallValue>(
+ proc, Int32, Origin(), Effects::none(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), bitwise_cast<void*>(simpleFunction)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ CHECK(compileAndRun<int>(proc, a, b) == a + b);
+}
+
+int functionWithHellaArguments(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m, int n, int o, int p, int q, int r, int s, int t, int u, int v, int w, int x, int y, int z)
+{
+ return (a << 0) + (b << 1) + (c << 2) + (d << 3) + (e << 4) + (f << 5) + (g << 6) + (h << 7) + (i << 8) + (j << 9) + (k << 10) + (l << 11) + (m << 12) + (n << 13) + (o << 14) + (p << 15) + (q << 16) + (r << 17) + (s << 18) + (t << 19) + (u << 20) + (v << 21) + (w << 22) + (x << 23) + (y << 24) + (z << 25);
+}
+
+void testCallFunctionWithHellaArguments()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Vector<Value*> args;
+ for (unsigned i = 0; i < 26; ++i)
+ args.append(root->appendNew<Const32Value>(proc, Origin(), i + 1));
+
+ CCallValue* call = root->appendNew<CCallValue>(
+ proc, Int32, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), bitwise_cast<void*>(functionWithHellaArguments)));
+ call->children().appendVector(args);
+
+ root->appendNewControlValue(proc, Return, Origin(), call);
+
+ CHECK(compileAndRun<int>(proc) == functionWithHellaArguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26));
+}
+
+void testReturnDouble(double value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<ConstDoubleValue>(proc, Origin(), value));
+
+ CHECK(isIdentical(compileAndRun<double>(proc), value));
+}
+
+void testReturnFloat(float value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<ConstFloatValue>(proc, Origin(), value));
+
+ CHECK(isIdentical(compileAndRun<float>(proc), value));
+}
+
+double simpleFunctionDouble(double a, double b)
+{
+ return a + b;
+}
+
+void testCallSimpleDouble(double a, double b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<CCallValue>(
+ proc, Double, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), bitwise_cast<void*>(simpleFunctionDouble)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1)));
+
+ CHECK(compileAndRun<double>(proc, a, b) == a + b);
+}
+
+float simpleFunctionFloat(float a, float b)
+{
+ return a + b;
+}
+
+void testCallSimpleFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<CCallValue>(
+ proc, Float, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), bitwise_cast<void*>(simpleFunctionFloat)),
+ floatValue1,
+ floatValue2));
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b)), a + b));
+}
+
+double functionWithHellaDoubleArguments(double a, double b, double c, double d, double e, double f, double g, double h, double i, double j, double k, double l, double m, double n, double o, double p, double q, double r, double s, double t, double u, double v, double w, double x, double y, double z)
+{
+ return a * pow(2, 0) + b * pow(2, 1) + c * pow(2, 2) + d * pow(2, 3) + e * pow(2, 4) + f * pow(2, 5) + g * pow(2, 6) + h * pow(2, 7) + i * pow(2, 8) + j * pow(2, 9) + k * pow(2, 10) + l * pow(2, 11) + m * pow(2, 12) + n * pow(2, 13) + o * pow(2, 14) + p * pow(2, 15) + q * pow(2, 16) + r * pow(2, 17) + s * pow(2, 18) + t * pow(2, 19) + u * pow(2, 20) + v * pow(2, 21) + w * pow(2, 22) + x * pow(2, 23) + y * pow(2, 24) + z * pow(2, 25);
+}
+
+void testCallFunctionWithHellaDoubleArguments()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Vector<Value*> args;
+ for (unsigned i = 0; i < 26; ++i)
+ args.append(root->appendNew<ConstDoubleValue>(proc, Origin(), i + 1));
+
+ CCallValue* call = root->appendNew<CCallValue>(
+ proc, Double, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), bitwise_cast<void*>(functionWithHellaDoubleArguments)));
+ call->children().appendVector(args);
+
+ root->appendNewControlValue(proc, Return, Origin(), call);
+
+ CHECK(compileAndRun<double>(proc) == functionWithHellaDoubleArguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26));
+}
+
+float functionWithHellaFloatArguments(float a, float b, float c, float d, float e, float f, float g, float h, float i, float j, float k, float l, float m, float n, float o, float p, float q, float r, float s, float t, float u, float v, float w, float x, float y, float z)
+{
+ return a * pow(2, 0) + b * pow(2, 1) + c * pow(2, 2) + d * pow(2, 3) + e * pow(2, 4) + f * pow(2, 5) + g * pow(2, 6) + h * pow(2, 7) + i * pow(2, 8) + j * pow(2, 9) + k * pow(2, 10) + l * pow(2, 11) + m * pow(2, 12) + n * pow(2, 13) + o * pow(2, 14) + p * pow(2, 15) + q * pow(2, 16) + r * pow(2, 17) + s * pow(2, 18) + t * pow(2, 19) + u * pow(2, 20) + v * pow(2, 21) + w * pow(2, 22) + x * pow(2, 23) + y * pow(2, 24) + z * pow(2, 25);
+}
+
+void testCallFunctionWithHellaFloatArguments()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Vector<Value*> args;
+ for (unsigned i = 0; i < 26; ++i)
+ args.append(root->appendNew<ConstFloatValue>(proc, Origin(), i + 1));
+
+ CCallValue* call = root->appendNew<CCallValue>(
+ proc, Float, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), bitwise_cast<void*>(functionWithHellaFloatArguments)));
+ call->children().appendVector(args);
+
+ root->appendNewControlValue(proc, Return, Origin(), call);
+
+ CHECK(compileAndRun<float>(proc) == functionWithHellaFloatArguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26));
+}
+
+void testChillDiv(int num, int den, int res)
+{
+ // Test non-constant.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, chill(Div), Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+
+ CHECK(compileAndRun<int>(proc, num, den) == res);
+ }
+
+ // Test constant.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, chill(Div), Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), num),
+ root->appendNew<Const32Value>(proc, Origin(), den)));
+
+ CHECK(compileAndRun<int>(proc) == res);
+ }
+}
+
+void testChillDivTwice(int num1, int den1, int num2, int den2, int res)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, chill(Div), Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))),
+ root->appendNew<Value>(
+ proc, chill(Div), Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR3)))));
+
+ CHECK(compileAndRun<int>(proc, num1, den1, num2, den2) == res);
+}
+
+void testChillDiv64(int64_t num, int64_t den, int64_t res)
+{
+ if (!is64Bit())
+ return;
+
+ // Test non-constant.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, chill(Div), Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ CHECK(compileAndRun<int64_t>(proc, num, den) == res);
+ }
+
+ // Test constant.
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, chill(Div), Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), num),
+ root->appendNew<Const64Value>(proc, Origin(), den)));
+
+ CHECK(compileAndRun<int64_t>(proc) == res);
+ }
+}
+
+void testModArg(int64_t value)
+{
+ if (!value)
+ return;
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), argument, argument);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(!compileAndRun<int64_t>(proc, value));
+}
+
+void testModArgs(int64_t numerator, int64_t denominator)
+{
+ if (!denominator)
+ return;
+ if (numerator == std::numeric_limits<int64_t>::min() && denominator == -1)
+ return;
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argument2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), argument1, argument2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int64_t>(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testModImms(int64_t numerator, int64_t denominator)
+{
+ if (!denominator)
+ return;
+ if (numerator == std::numeric_limits<int64_t>::min() && denominator == -1)
+ return;
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument1 = root->appendNew<Const64Value>(proc, Origin(), numerator);
+ Value* argument2 = root->appendNew<Const64Value>(proc, Origin(), denominator);
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), argument1, argument2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int64_t>(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testModArg32(int32_t value)
+{
+ if (!value)
+ return;
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), argument, argument);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(!compileAndRun<int32_t>(proc, value));
+}
+
+void testModArgs32(int32_t numerator, int32_t denominator)
+{
+ if (!denominator)
+ return;
+ if (numerator == std::numeric_limits<int32_t>::min() && denominator == -1)
+ return;
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), argument1, argument2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int32_t>(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testModImms32(int32_t numerator, int32_t denominator)
+{
+ if (!denominator)
+ return;
+ if (numerator == std::numeric_limits<int32_t>::min() && denominator == -1)
+ return;
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument1 = root->appendNew<Const32Value>(proc, Origin(), numerator);
+ Value* argument2 = root->appendNew<Const32Value>(proc, Origin(), denominator);
+ Value* result = root->appendNew<Value>(proc, Mod, Origin(), argument1, argument2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int32_t>(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testChillModArg(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* result = root->appendNew<Value>(proc, chill(Mod), Origin(), argument, argument);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(!compileAndRun<int64_t>(proc, value));
+}
+
+void testChillModArgs(int64_t numerator, int64_t denominator)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argument2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* result = root->appendNew<Value>(proc, chill(Mod), Origin(), argument1, argument2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int64_t>(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testChillModImms(int64_t numerator, int64_t denominator)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument1 = root->appendNew<Const64Value>(proc, Origin(), numerator);
+ Value* argument2 = root->appendNew<Const64Value>(proc, Origin(), denominator);
+ Value* result = root->appendNew<Value>(proc, chill(Mod), Origin(), argument1, argument2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int64_t>(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testChillModArg32(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* result = root->appendNew<Value>(proc, chill(Mod), Origin(), argument, argument);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(!compileAndRun<int32_t>(proc, value));
+}
+
+void testChillModArgs32(int32_t numerator, int32_t denominator)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument1 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* result = root->appendNew<Value>(proc, chill(Mod), Origin(), argument1, argument2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int32_t>(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testChillModImms32(int32_t numerator, int32_t denominator)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* argument1 = root->appendNew<Const32Value>(proc, Origin(), numerator);
+ Value* argument2 = root->appendNew<Const32Value>(proc, Origin(), denominator);
+ Value* result = root->appendNew<Value>(proc, chill(Mod), Origin(), argument1, argument2);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int32_t>(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testSwitch(unsigned degree, unsigned gap = 1)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ BasicBlock* terminate = proc.addBlock();
+ terminate->appendNewControlValue(
+ proc, Return, Origin(),
+ terminate->appendNew<Const32Value>(proc, Origin(), 0));
+
+ SwitchValue* switchValue = root->appendNew<SwitchValue>(
+ proc, Origin(), root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ switchValue->setFallThrough(FrequentedBlock(terminate));
+
+ for (unsigned i = 0; i < degree; ++i) {
+ BasicBlock* newBlock = proc.addBlock();
+ newBlock->appendNewControlValue(
+ proc, Return, Origin(),
+ newBlock->appendNew<ArgumentRegValue>(
+ proc, Origin(), (i & 1) ? GPRInfo::argumentGPR2 : GPRInfo::argumentGPR1));
+ switchValue->appendCase(SwitchCase(gap * i, FrequentedBlock(newBlock)));
+ }
+
+ auto code = compile(proc);
+
+ for (unsigned i = 0; i < degree; ++i) {
+ CHECK(invoke<int32_t>(*code, i * gap, 42, 11) == ((i & 1) ? 11 : 42));
+ if (gap > 1) {
+ CHECK(!invoke<int32_t>(*code, i * gap + 1, 42, 11));
+ CHECK(!invoke<int32_t>(*code, i * gap - 1, 42, 11));
+ }
+ }
+
+ CHECK(!invoke<int32_t>(*code, -1, 42, 11));
+ CHECK(!invoke<int32_t>(*code, degree * gap, 42, 11));
+ CHECK(!invoke<int32_t>(*code, degree * gap + 1, 42, 11));
+}
+
+void testSwitchChillDiv(unsigned degree, unsigned gap = 1)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* left = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* right = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+
+ BasicBlock* terminate = proc.addBlock();
+ terminate->appendNewControlValue(
+ proc, Return, Origin(),
+ terminate->appendNew<Const32Value>(proc, Origin(), 0));
+
+ SwitchValue* switchValue = root->appendNew<SwitchValue>(
+ proc, Origin(), root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ switchValue->setFallThrough(FrequentedBlock(terminate));
+
+ for (unsigned i = 0; i < degree; ++i) {
+ BasicBlock* newBlock = proc.addBlock();
+
+ newBlock->appendNewControlValue(
+ proc, Return, Origin(),
+ newBlock->appendNew<Value>(
+ proc, chill(Div), Origin(), (i & 1) ? right : left, (i & 1) ? left : right));
+
+ switchValue->appendCase(SwitchCase(gap * i, FrequentedBlock(newBlock)));
+ }
+
+ auto code = compile(proc);
+
+ for (unsigned i = 0; i < degree; ++i) {
+ dataLog("i = ", i, "\n");
+ int32_t result = invoke<int32_t>(*code, i * gap, 42, 11);
+ dataLog("result = ", result, "\n");
+ CHECK(result == ((i & 1) ? 11/42 : 42/11));
+ if (gap > 1) {
+ CHECK(!invoke<int32_t>(*code, i * gap + 1, 42, 11));
+ CHECK(!invoke<int32_t>(*code, i * gap - 1, 42, 11));
+ }
+ }
+
+ CHECK(!invoke<int32_t>(*code, -1, 42, 11));
+ CHECK(!invoke<int32_t>(*code, degree * gap, 42, 11));
+ CHECK(!invoke<int32_t>(*code, degree * gap + 1, 42, 11));
+}
+
+void testSwitchTargettingSameBlock()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ BasicBlock* terminate = proc.addBlock();
+ terminate->appendNewControlValue(
+ proc, Return, Origin(),
+ terminate->appendNew<Const32Value>(proc, Origin(), 5));
+
+ SwitchValue* switchValue = root->appendNew<SwitchValue>(
+ proc, Origin(), root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ switchValue->setFallThrough(FrequentedBlock(terminate));
+
+ BasicBlock* otherTarget = proc.addBlock();
+ otherTarget->appendNewControlValue(
+ proc, Return, Origin(),
+ otherTarget->appendNew<Const32Value>(proc, Origin(), 42));
+ switchValue->appendCase(SwitchCase(3, FrequentedBlock(otherTarget)));
+ switchValue->appendCase(SwitchCase(13, FrequentedBlock(otherTarget)));
+
+ auto code = compile(proc);
+
+ for (unsigned i = 0; i < 20; ++i) {
+ int32_t expected = (i == 3 || i == 13) ? 42 : 5;
+ CHECK(invoke<int32_t>(*code, i) == expected);
+ }
+}
+
+void testSwitchTargettingSameBlockFoldPathConstant()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ BasicBlock* terminate = proc.addBlock();
+ terminate->appendNewControlValue(
+ proc, Return, Origin(),
+ terminate->appendNew<Const32Value>(proc, Origin(), 42));
+
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ SwitchValue* switchValue = root->appendNew<SwitchValue>(proc, Origin(), argument);
+ switchValue->setFallThrough(FrequentedBlock(terminate));
+
+ BasicBlock* otherTarget = proc.addBlock();
+ otherTarget->appendNewControlValue(
+ proc, Return, Origin(), argument);
+ switchValue->appendCase(SwitchCase(3, FrequentedBlock(otherTarget)));
+ switchValue->appendCase(SwitchCase(13, FrequentedBlock(otherTarget)));
+
+ auto code = compile(proc);
+
+ for (unsigned i = 0; i < 20; ++i) {
+ int32_t expected = (i == 3 || i == 13) ? i : 42;
+ CHECK(invoke<int32_t>(*code, i) == expected);
+ }
+}
+
+void testTruncFold(int64_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<Const64Value>(proc, Origin(), value)));
+
+ CHECK(compileAndRun<int>(proc) == static_cast<int>(value));
+}
+
+void testZExt32(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, ZExt32, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(compileAndRun<uint64_t>(proc, value) == static_cast<uint64_t>(static_cast<uint32_t>(value)));
+}
+
+void testZExt32Fold(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, ZExt32, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), value)));
+
+ CHECK(compileAndRun<uint64_t>(proc, value) == static_cast<uint64_t>(static_cast<uint32_t>(value)));
+}
+
+void testSExt32(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt32, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(compileAndRun<int64_t>(proc, value) == static_cast<int64_t>(value));
+}
+
+void testSExt32Fold(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt32, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), value)));
+
+ CHECK(compileAndRun<int64_t>(proc, value) == static_cast<int64_t>(value));
+}
+
+void testTruncZExt32(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<Value>(
+ proc, ZExt32, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == value);
+}
+
+void testTruncSExt32(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<Value>(
+ proc, SExt32, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == value);
+}
+
+void testSExt8(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt8, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == static_cast<int32_t>(static_cast<int8_t>(value)));
+}
+
+void testSExt8Fold(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt8, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), value)));
+
+ CHECK(compileAndRun<int32_t>(proc) == static_cast<int32_t>(static_cast<int8_t>(value)));
+}
+
+void testSExt8SExt8(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt8, Origin(),
+ root->appendNew<Value>(
+ proc, SExt8, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == static_cast<int32_t>(static_cast<int8_t>(value)));
+}
+
+void testSExt8SExt16(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt8, Origin(),
+ root->appendNew<Value>(
+ proc, SExt16, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == static_cast<int32_t>(static_cast<int8_t>(value)));
+}
+
+void testSExt8BitAnd(int32_t value, int32_t mask)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt8, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), mask))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == static_cast<int32_t>(static_cast<int8_t>(value & mask)));
+}
+
+void testBitAndSExt8(int32_t value, int32_t mask)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, SExt8, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ root->appendNew<Const32Value>(proc, Origin(), mask)));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == (static_cast<int32_t>(static_cast<int8_t>(value)) & mask));
+}
+
+void testSExt16(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt16, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == static_cast<int32_t>(static_cast<int16_t>(value)));
+}
+
+void testSExt16Fold(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt16, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), value)));
+
+ CHECK(compileAndRun<int32_t>(proc) == static_cast<int32_t>(static_cast<int16_t>(value)));
+}
+
+void testSExt16SExt16(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt16, Origin(),
+ root->appendNew<Value>(
+ proc, SExt16, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == static_cast<int32_t>(static_cast<int16_t>(value)));
+}
+
+void testSExt16SExt8(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt16, Origin(),
+ root->appendNew<Value>(
+ proc, SExt8, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == static_cast<int32_t>(static_cast<int8_t>(value)));
+}
+
+void testSExt16BitAnd(int32_t value, int32_t mask)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt16, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), mask))));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == static_cast<int32_t>(static_cast<int16_t>(value & mask)));
+}
+
+void testBitAndSExt16(int32_t value, int32_t mask)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, SExt16, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ root->appendNew<Const32Value>(proc, Origin(), mask)));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == (static_cast<int32_t>(static_cast<int16_t>(value)) & mask));
+}
+
+void testSExt32BitAnd(int32_t value, int32_t mask)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SExt32, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), mask))));
+
+ CHECK(compileAndRun<int64_t>(proc, value) == static_cast<int64_t>(value & mask));
+}
+
+void testBitAndSExt32(int32_t value, int64_t mask)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, SExt32, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))),
+ root->appendNew<Const64Value>(proc, Origin(), mask)));
+
+ CHECK(compileAndRun<int64_t>(proc, value) == (static_cast<int64_t>(value) & mask));
+}
+
+void testBasicSelect()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 42)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)));
+
+ auto code = compile(proc);
+ CHECK(invoke<intptr_t>(*code, 42, 1, 2) == 1);
+ CHECK(invoke<intptr_t>(*code, 42, 642462, 32533) == 642462);
+ CHECK(invoke<intptr_t>(*code, 43, 1, 2) == 2);
+ CHECK(invoke<intptr_t>(*code, 43, 642462, 32533) == 32533);
+}
+
+void testSelectTest()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)));
+
+ auto code = compile(proc);
+ CHECK(invoke<intptr_t>(*code, 42, 1, 2) == 1);
+ CHECK(invoke<intptr_t>(*code, 42, 642462, 32533) == 642462);
+ CHECK(invoke<intptr_t>(*code, 0, 1, 2) == 2);
+ CHECK(invoke<intptr_t>(*code, 0, 642462, 32533) == 32533);
+}
+
+void testSelectCompareDouble()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, LessThan, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ auto code = compile(proc);
+ CHECK(invoke<intptr_t>(*code, -1.0, 1.0, 1, 2) == 1);
+ CHECK(invoke<intptr_t>(*code, 42.5, 42.51, 642462, 32533) == 642462);
+ CHECK(invoke<intptr_t>(*code, PNaN, 0.0, 1, 2) == 2);
+ CHECK(invoke<intptr_t>(*code, 42.51, 42.5, 642462, 32533) == 32533);
+ CHECK(invoke<intptr_t>(*code, 42.52, 42.52, 524978245, 352) == 352);
+}
+
+template<B3::Opcode opcode>
+void testSelectCompareFloat(float a, float b, bool (*operation)(float, float))
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, opcode, Origin(),
+ floatValue1,
+ floatValue2),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR3)));
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b), 42, -5), operation(a, b) ? 42 : -5));
+}
+
+void testSelectCompareFloat(float a, float b)
+{
+ testSelectCompareFloat<Equal>(a, b, [](float a, float b) -> bool { return a == b; });
+ testSelectCompareFloat<NotEqual>(a, b, [](float a, float b) -> bool { return a != b; });
+ testSelectCompareFloat<LessThan>(a, b, [](float a, float b) -> bool { return a < b; });
+ testSelectCompareFloat<GreaterThan>(a, b, [](float a, float b) -> bool { return a > b; });
+ testSelectCompareFloat<LessEqual>(a, b, [](float a, float b) -> bool { return a <= b; });
+ testSelectCompareFloat<GreaterEqual>(a, b, [](float a, float b) -> bool { return a >= b; });
+ testSelectCompareFloat<EqualOrUnordered>(a, b, [](float a, float b) -> bool { return a != a || b != b || a == b; });
+}
+
+template<B3::Opcode opcode>
+void testSelectCompareFloatToDouble(float a, float b, bool (*operation)(float, float))
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* doubleValue1 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue1);
+ Value* doubleValue2 = root->appendNew<Value>(proc, FloatToDouble, Origin(), floatValue2);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, opcode, Origin(),
+ doubleValue1,
+ doubleValue2),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR3)));
+ CHECK(isIdentical(compileAndRun<int32_t>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b), 42, -5), operation(a, b) ? 42 : -5));
+}
+
+void testSelectCompareFloatToDouble(float a, float b)
+{
+ testSelectCompareFloatToDouble<Equal>(a, b, [](float a, float b) -> bool { return a == b; });
+ testSelectCompareFloatToDouble<NotEqual>(a, b, [](float a, float b) -> bool { return a != b; });
+ testSelectCompareFloatToDouble<LessThan>(a, b, [](float a, float b) -> bool { return a < b; });
+ testSelectCompareFloatToDouble<GreaterThan>(a, b, [](float a, float b) -> bool { return a > b; });
+ testSelectCompareFloatToDouble<LessEqual>(a, b, [](float a, float b) -> bool { return a <= b; });
+ testSelectCompareFloatToDouble<GreaterEqual>(a, b, [](float a, float b) -> bool { return a >= b; });
+ testSelectCompareFloatToDouble<EqualOrUnordered>(a, b, [](float a, float b) -> bool { return a != a || b != b || a == b; });
+}
+
+void testSelectDouble()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 42)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1)));
+
+ auto code = compile(proc);
+ CHECK(invoke<double>(*code, 42, 1.5, 2.6) == 1.5);
+ CHECK(invoke<double>(*code, 42, 642462.7, 32533.8) == 642462.7);
+ CHECK(invoke<double>(*code, 43, 1.9, 2.0) == 2.0);
+ CHECK(invoke<double>(*code, 43, 642462.1, 32533.2) == 32533.2);
+}
+
+void testSelectDoubleTest()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1)));
+
+ auto code = compile(proc);
+ CHECK(invoke<double>(*code, 42, 1.5, 2.6) == 1.5);
+ CHECK(invoke<double>(*code, 42, 642462.7, 32533.8) == 642462.7);
+ CHECK(invoke<double>(*code, 0, 1.9, 2.0) == 2.0);
+ CHECK(invoke<double>(*code, 0, 642462.1, 32533.2) == 32533.2);
+}
+
+void testSelectDoubleCompareDouble()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, LessThan, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR2),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR3)));
+
+ auto code = compile(proc);
+ CHECK(invoke<double>(*code, -1.0, 1.0, 1.1, 2.2) == 1.1);
+ CHECK(invoke<double>(*code, 42.5, 42.51, 642462.3, 32533.4) == 642462.3);
+ CHECK(invoke<double>(*code, PNaN, 0.0, 1.5, 2.6) == 2.6);
+ CHECK(invoke<double>(*code, 42.51, 42.5, 642462.7, 32533.8) == 32533.8);
+ CHECK(invoke<double>(*code, 42.52, 42.52, 524978245.9, 352.0) == 352.0);
+}
+
+void testSelectDoubleCompareFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, LessThan, Origin(),
+ floatValue1,
+ floatValue2),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1)));
+
+ CHECK(isIdentical(compileAndRun<double>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b), 42.1, -M_PI), a < b ? 42.1 : -M_PI));
+}
+
+void testSelectFloatCompareFloat(float a, float b)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* argument1int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* argument2int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* argument3int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2));
+ Value* argument4int32 = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR3));
+ Value* floatValue1 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument1int32);
+ Value* floatValue2 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument2int32);
+ Value* floatValue3 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument3int32);
+ Value* floatValue4 = root->appendNew<Value>(proc, BitwiseCast, Origin(), argument4int32);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, LessThan, Origin(),
+ floatValue1,
+ floatValue2),
+ floatValue3,
+ floatValue4));
+
+ CHECK(isIdentical(compileAndRun<float>(proc, bitwise_cast<int32_t>(a), bitwise_cast<int32_t>(b), bitwise_cast<int32_t>(1.1f), bitwise_cast<int32_t>(-42.f)), a < b ? 1.1f : -42.f));
+}
+
+
+template<B3::Opcode opcode>
+void testSelectDoubleCompareDouble(bool (*operation)(double, double))
+{
+ { // Compare arguments and selected arguments are all different.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR2);
+ Value* arg3 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR3);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, opcode, Origin(),
+ arg0,
+ arg1),
+ arg2,
+ arg3));
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<double>()) {
+ for (auto& right : floatingPointOperands<double>()) {
+ double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+ CHECK(isIdentical(invoke<double>(*code, left.value, right.value, 42.5, -66.5), expected));
+ }
+ }
+ }
+ { // Compare arguments and selected arguments are all different. "thenCase" is live after operation.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR2);
+ Value* arg3 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR3);
+
+ Value* result = root->appendNew<Value>(proc, Select, Origin(),
+ root->appendNew<Value>(proc, opcode, Origin(), arg0, arg1),
+ arg2,
+ arg3);
+
+ PatchpointValue* keepValuesLive = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ root->appendNewControlValue(proc, Return, Origin(), result);
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<double>()) {
+ for (auto& right : floatingPointOperands<double>()) {
+ double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+ CHECK(isIdentical(invoke<double>(*code, left.value, right.value, 42.5, -66.5), expected));
+ }
+ }
+ }
+ { // Compare arguments and selected arguments are all different. "elseCase" is live after operation.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR2);
+ Value* arg3 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR3);
+
+ Value* result = root->appendNew<Value>(proc, Select, Origin(),
+ root->appendNew<Value>(proc, opcode, Origin(), arg0, arg1),
+ arg2,
+ arg3);
+
+ PatchpointValue* keepValuesLive = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+ keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ root->appendNewControlValue(proc, Return, Origin(), result);
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<double>()) {
+ for (auto& right : floatingPointOperands<double>()) {
+ double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+ CHECK(isIdentical(invoke<double>(*code, left.value, right.value, 42.5, -66.5), expected));
+ }
+ }
+ }
+ { // Compare arguments and selected arguments are all different. Both cases are live after operation.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR2);
+ Value* arg3 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR3);
+
+ Value* result = root->appendNew<Value>(proc, Select, Origin(),
+ root->appendNew<Value>(proc, opcode, Origin(), arg0, arg1),
+ arg2,
+ arg3);
+
+ PatchpointValue* keepValuesLive = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+ keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ root->appendNewControlValue(proc, Return, Origin(), result);
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<double>()) {
+ for (auto& right : floatingPointOperands<double>()) {
+ double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+ CHECK(isIdentical(invoke<double>(*code, left.value, right.value, 42.5, -66.5), expected));
+ }
+ }
+ }
+ { // The left argument is the same as the "elseCase" argument.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR2);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, opcode, Origin(),
+ arg0,
+ arg1),
+ arg2,
+ arg0));
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<double>()) {
+ for (auto& right : floatingPointOperands<double>()) {
+ double expected = operation(left.value, right.value) ? 42.5 : left.value;
+ CHECK(isIdentical(invoke<double>(*code, left.value, right.value, 42.5, left.value), expected));
+ }
+ }
+ }
+ { // The left argument is the same as the "elseCase" argument. "thenCase" is live after operation.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR2);
+
+ Value* result = root->appendNew<Value>(proc, Select, Origin(),
+ root->appendNew<Value>(proc, opcode, Origin(), arg0, arg1),
+ arg2,
+ arg0);
+
+ PatchpointValue* keepValuesLive = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ root->appendNewControlValue(proc, Return, Origin(), result);
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<double>()) {
+ for (auto& right : floatingPointOperands<double>()) {
+ double expected = operation(left.value, right.value) ? 42.5 : left.value;
+ CHECK(isIdentical(invoke<double>(*code, left.value, right.value, 42.5, left.value), expected));
+ }
+ }
+ }
+}
+
+void testSelectDoubleCompareDoubleWithAliasing()
+{
+ testSelectDoubleCompareDouble<Equal>([](double a, double b) -> bool { return a == b; });
+ testSelectDoubleCompareDouble<NotEqual>([](double a, double b) -> bool { return a != b; });
+ testSelectDoubleCompareDouble<LessThan>([](double a, double b) -> bool { return a < b; });
+ testSelectDoubleCompareDouble<GreaterThan>([](double a, double b) -> bool { return a > b; });
+ testSelectDoubleCompareDouble<LessEqual>([](double a, double b) -> bool { return a <= b; });
+ testSelectDoubleCompareDouble<GreaterEqual>([](double a, double b) -> bool { return a >= b; });
+ testSelectDoubleCompareDouble<EqualOrUnordered>([](double a, double b) -> bool { return a != a || b != b || a == b; });
+}
+
+template<B3::Opcode opcode>
+void testSelectFloatCompareFloat(bool (*operation)(float, float))
+{
+ { // Compare arguments and selected arguments are all different.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg0 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* arg1 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* arg2 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)));
+ Value* arg3 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR3)));
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, opcode, Origin(),
+ arg0,
+ arg1),
+ arg2,
+ arg3));
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<float>()) {
+ for (auto& right : floatingPointOperands<float>()) {
+ float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+ CHECK(isIdentical(invoke<float>(*code, bitwise_cast<int32_t>(left.value), bitwise_cast<int32_t>(right.value), bitwise_cast<int32_t>(42.5f), bitwise_cast<int32_t>(-66.5f)), expected));
+ }
+ }
+ }
+ { // Compare arguments and selected arguments are all different. "thenCase" is live after operation.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* arg1 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* arg2 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)));
+ Value* arg3 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR3)));
+
+ Value* result = root->appendNew<Value>(proc, Select, Origin(),
+ root->appendNew<Value>(proc, opcode, Origin(), arg0, arg1),
+ arg2,
+ arg3);
+
+ PatchpointValue* keepValuesLive = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ root->appendNewControlValue(proc, Return, Origin(), result);
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<float>()) {
+ for (auto& right : floatingPointOperands<float>()) {
+ float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+ CHECK(isIdentical(invoke<float>(*code, bitwise_cast<int32_t>(left.value), bitwise_cast<int32_t>(right.value), bitwise_cast<int32_t>(42.5f), bitwise_cast<int32_t>(-66.5f)), expected));
+ }
+ }
+ }
+ { // Compare arguments and selected arguments are all different. "elseCase" is live after operation.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* arg1 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* arg2 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)));
+ Value* arg3 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR3)));
+
+ Value* result = root->appendNew<Value>(proc, Select, Origin(),
+ root->appendNew<Value>(proc, opcode, Origin(), arg0, arg1),
+ arg2,
+ arg3);
+
+ PatchpointValue* keepValuesLive = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+ keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ root->appendNewControlValue(proc, Return, Origin(), result);
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<float>()) {
+ for (auto& right : floatingPointOperands<float>()) {
+ float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+ CHECK(isIdentical(invoke<float>(*code, bitwise_cast<int32_t>(left.value), bitwise_cast<int32_t>(right.value), bitwise_cast<int32_t>(42.5f), bitwise_cast<int32_t>(-66.5f)), expected));
+ }
+ }
+ }
+ { // Compare arguments and selected arguments are all different. Both cases are live after operation.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* arg1 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* arg2 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)));
+ Value* arg3 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR3)));
+
+ Value* result = root->appendNew<Value>(proc, Select, Origin(),
+ root->appendNew<Value>(proc, opcode, Origin(), arg0, arg1),
+ arg2,
+ arg3);
+
+ PatchpointValue* keepValuesLive = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+ keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ root->appendNewControlValue(proc, Return, Origin(), result);
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<float>()) {
+ for (auto& right : floatingPointOperands<float>()) {
+ float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+ CHECK(isIdentical(invoke<float>(*code, bitwise_cast<int32_t>(left.value), bitwise_cast<int32_t>(right.value), bitwise_cast<int32_t>(42.5f), bitwise_cast<int32_t>(-66.5f)), expected));
+ }
+ }
+ }
+ { // The left argument is the same as the "elseCase" argument.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* arg1 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* arg2 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)));
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, opcode, Origin(),
+ arg0,
+ arg1),
+ arg2,
+ arg0));
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<float>()) {
+ for (auto& right : floatingPointOperands<float>()) {
+ float expected = operation(left.value, right.value) ? 42.5 : left.value;
+ CHECK(isIdentical(invoke<float>(*code, bitwise_cast<int32_t>(left.value), bitwise_cast<int32_t>(right.value), bitwise_cast<int32_t>(42.5f), bitwise_cast<int32_t>(left.value)), expected));
+ }
+ }
+ }
+ { // The left argument is the same as the "elseCase" argument. "thenCase" is live after operation.
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg0 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ Value* arg1 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+ Value* arg2 = root->appendNew<Value>(proc, BitwiseCast, Origin(),
+ root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)));
+
+ Value* result = root->appendNew<Value>(proc, Select, Origin(),
+ root->appendNew<Value>(proc, opcode, Origin(), arg0, arg1),
+ arg2,
+ arg0);
+
+ PatchpointValue* keepValuesLive = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+ keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+ root->appendNewControlValue(proc, Return, Origin(), result);
+ auto code = compile(proc);
+
+ for (auto& left : floatingPointOperands<float>()) {
+ for (auto& right : floatingPointOperands<float>()) {
+ float expected = operation(left.value, right.value) ? 42.5 : left.value;
+ CHECK(isIdentical(invoke<float>(*code, bitwise_cast<int32_t>(left.value), bitwise_cast<int32_t>(right.value), bitwise_cast<int32_t>(42.5f), bitwise_cast<int32_t>(left.value)), expected));
+ }
+ }
+ }
+}
+
+void testSelectFloatCompareFloatWithAliasing()
+{
+ testSelectFloatCompareFloat<Equal>([](float a, float b) -> bool { return a == b; });
+ testSelectFloatCompareFloat<NotEqual>([](float a, float b) -> bool { return a != b; });
+ testSelectFloatCompareFloat<LessThan>([](float a, float b) -> bool { return a < b; });
+ testSelectFloatCompareFloat<GreaterThan>([](float a, float b) -> bool { return a > b; });
+ testSelectFloatCompareFloat<LessEqual>([](float a, float b) -> bool { return a <= b; });
+ testSelectFloatCompareFloat<GreaterEqual>([](float a, float b) -> bool { return a >= b; });
+ testSelectFloatCompareFloat<EqualOrUnordered>([](float a, float b) -> bool { return a != a || b != b || a == b; });
+}
+
+void testSelectFold(intptr_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), value),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 42)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ auto code = compile(proc);
+ CHECK(invoke<intptr_t>(*code, 1, 2) == (value == 42 ? 1 : 2));
+ CHECK(invoke<intptr_t>(*code, 642462, 32533) == (value == 42 ? 642462 : 32533));
+}
+
+void testSelectInvert()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, Equal, Origin(),
+ root->appendNew<Value>(
+ proc, NotEqual, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 42)),
+ root->appendNew<Const32Value>(proc, Origin(), 0)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2)));
+
+ auto code = compile(proc);
+ CHECK(invoke<intptr_t>(*code, 42, 1, 2) == 1);
+ CHECK(invoke<intptr_t>(*code, 42, 642462, 32533) == 642462);
+ CHECK(invoke<intptr_t>(*code, 43, 1, 2) == 2);
+ CHECK(invoke<intptr_t>(*code, 43, 642462, 32533) == 32533);
+}
+
+void testCheckSelect()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ CheckValue* check = root->appendNew<CheckValue>(
+ proc, Check, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(
+ proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), 0xff)),
+ root->appendNew<ConstPtrValue>(proc, Origin(), -42),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 35)),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 42)));
+ unsigned generationCount = 0;
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ generationCount++;
+ jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ CHECK(generationCount == 1);
+ CHECK(invoke<int>(*code, true) == 0);
+ CHECK(invoke<int>(*code, false) == 666);
+}
+
+void testCheckSelectCheckSelect()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ CheckValue* check = root->appendNew<CheckValue>(
+ proc, Check, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(
+ proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), 0xff)),
+ root->appendNew<ConstPtrValue>(proc, Origin(), -42),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 35)),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 42)));
+
+ unsigned generationCount = 0;
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ generationCount++;
+ jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+
+ CheckValue* check2 = root->appendNew<CheckValue>(
+ proc, Check, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(
+ proc, Origin(), GPRInfo::argumentGPR1)),
+ root->appendNew<Const32Value>(proc, Origin(), 0xff)),
+ root->appendNew<ConstPtrValue>(proc, Origin(), -43),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 36)),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 43)));
+
+ unsigned generationCount2 = 0;
+ check2->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ generationCount2++;
+ jit.move(CCallHelpers::TrustedImm32(667), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 0));
+
+ auto code = compile(proc);
+ CHECK(generationCount == 1);
+ CHECK(generationCount2 == 1);
+ CHECK(invoke<int>(*code, true, true) == 0);
+ CHECK(invoke<int>(*code, false, true) == 666);
+ CHECK(invoke<int>(*code, true, false) == 667);
+}
+
+void testCheckSelectAndCSE()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ auto* selectValue = root->appendNew<Value>(
+ proc, Select, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(
+ proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), 0xff)),
+ root->appendNew<ConstPtrValue>(proc, Origin(), -42),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 35));
+
+ auto* constant = root->appendNew<ConstPtrValue>(proc, Origin(), 42);
+ auto* addValue = root->appendNew<Value>(proc, Add, Origin(), selectValue, constant);
+
+ CheckValue* check = root->appendNew<CheckValue>(proc, Check, Origin(), addValue);
+ unsigned generationCount = 0;
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ generationCount++;
+ jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+
+ auto* addValue2 = root->appendNew<Value>(proc, Add, Origin(), selectValue, constant);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Add, Origin(), addValue, addValue2));
+
+ auto code = compile(proc);
+ CHECK(generationCount == 1);
+ CHECK(invoke<int>(*code, true) == 0);
+ CHECK(invoke<int>(*code, false) == 666);
+}
+
+double b3Pow(double x, int y)
+{
+ if (y < 0 || y > 1000)
+ return pow(x, y);
+ double result = 1;
+ while (y) {
+ if (y & 1)
+ result *= x;
+ x *= x;
+ y >>= 1;
+ }
+ return result;
+}
+
+void testPowDoubleByIntegerLoop(double xOperand, int32_t yOperand)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* x = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* y = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ auto result = powDoubleInt32(proc, root, Origin(), x, y);
+ BasicBlock* continuation = result.first;
+ continuation->appendNewControlValue(proc, Return, Origin(), result.second);
+
+ CHECK(isIdentical(compileAndRun<double>(proc, xOperand, yOperand), b3Pow(xOperand, yOperand)));
+}
+
+void testTruncOrHigh()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), 0x100000000))));
+
+ int64_t value = 0x123456781234;
+ CHECK(compileAndRun<int>(proc, value) == 0x56781234);
+}
+
+void testTruncOrLow()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), 0x1000000))));
+
+ int64_t value = 0x123456781234;
+ CHECK(compileAndRun<int>(proc, value) == 0x57781234);
+}
+
+void testBitAndOrHigh()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), 0x8)),
+ root->appendNew<Const64Value>(proc, Origin(), 0x777777777777)));
+
+ int64_t value = 0x123456781234;
+ CHECK(compileAndRun<int64_t>(proc, value) == 0x123456701234ll);
+}
+
+void testBitAndOrLow()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<Value>(
+ proc, BitOr, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const64Value>(proc, Origin(), 0x1)),
+ root->appendNew<Const64Value>(proc, Origin(), 0x777777777777)));
+
+ int64_t value = 0x123456781234;
+ CHECK(compileAndRun<int64_t>(proc, value) == 0x123456701235ll);
+}
+
+void testBranch64Equal(int64_t left, int64_t right)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(proc, Equal, Origin(), arg1, arg2),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ bool trueResult = true;
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<MemoryValue>(
+ proc, Load8Z, Origin(),
+ thenCase->appendNew<ConstPtrValue>(proc, Origin(), &trueResult)));
+
+ bool elseResult = false;
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<MemoryValue>(
+ proc, Load8Z, Origin(),
+ elseCase->appendNew<ConstPtrValue>(proc, Origin(), &elseResult)));
+
+ CHECK(compileAndRun<bool>(proc, left, right) == (left == right));
+}
+
+void testBranch64EqualImm(int64_t left, int64_t right)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ConstPtrValue>(proc, Origin(), right);
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(proc, Equal, Origin(), arg1, arg2),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ bool trueResult = true;
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<MemoryValue>(
+ proc, Load8Z, Origin(),
+ thenCase->appendNew<ConstPtrValue>(proc, Origin(), &trueResult)));
+
+ bool elseResult = false;
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<MemoryValue>(
+ proc, Load8Z, Origin(),
+ elseCase->appendNew<ConstPtrValue>(proc, Origin(), &elseResult)));
+
+ CHECK(compileAndRun<bool>(proc, left) == (left == right));
+}
+
+void testBranch64EqualMem(int64_t left, int64_t right)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* arg1 = root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(proc, Equal, Origin(), arg1, arg2),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ bool trueResult = true;
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<MemoryValue>(
+ proc, Load8Z, Origin(),
+ thenCase->appendNew<ConstPtrValue>(proc, Origin(), &trueResult)));
+
+ bool elseResult = false;
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<MemoryValue>(
+ proc, Load8Z, Origin(),
+ elseCase->appendNew<ConstPtrValue>(proc, Origin(), &elseResult)));
+
+ CHECK(compileAndRun<bool>(proc, &left, right) == (left == right));
+}
+
+void testBranch64EqualMemImm(int64_t left, int64_t right)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+
+ Value* arg1 = root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg2 = root->appendNew<ConstPtrValue>(proc, Origin(), right);
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(proc, Equal, Origin(), arg1, arg2),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ bool trueResult = true;
+ thenCase->appendNewControlValue(
+ proc, Return, Origin(),
+ thenCase->appendNew<MemoryValue>(
+ proc, Load8Z, Origin(),
+ thenCase->appendNew<ConstPtrValue>(proc, Origin(), &trueResult)));
+
+ bool elseResult = false;
+ elseCase->appendNewControlValue(
+ proc, Return, Origin(),
+ elseCase->appendNew<MemoryValue>(
+ proc, Load8Z, Origin(),
+ elseCase->appendNew<ConstPtrValue>(proc, Origin(), &elseResult)));
+
+ CHECK(compileAndRun<bool>(proc, &left) == (left == right));
+}
+
+void testStore8Load8Z(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ int8_t byte;
+ Value* ptr = root->appendNew<ConstPtrValue>(proc, Origin(), &byte);
+
+ root->appendNew<MemoryValue>(
+ proc, Store8, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ ptr);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(proc, Load8Z, Origin(), ptr));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == static_cast<uint8_t>(value));
+}
+
+void testStore16Load16Z(int32_t value)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ int16_t byte;
+ Value* ptr = root->appendNew<ConstPtrValue>(proc, Origin(), &byte);
+
+ root->appendNew<MemoryValue>(
+ proc, Store16, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ ptr);
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(proc, Load16Z, Origin(), ptr));
+
+ CHECK(compileAndRun<int32_t>(proc, value) == static_cast<uint16_t>(value));
+}
+
+void testSShrShl32(int32_t value, int32_t sshrAmount, int32_t shlAmount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SShr, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<Const32Value>(proc, Origin(), shlAmount)),
+ root->appendNew<Const32Value>(proc, Origin(), sshrAmount)));
+
+ CHECK(
+ compileAndRun<int32_t>(proc, value)
+ == ((value << (shlAmount & 31)) >> (sshrAmount & 31)));
+}
+
+void testSShrShl64(int64_t value, int32_t sshrAmount, int32_t shlAmount)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SShr, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Const32Value>(proc, Origin(), shlAmount)),
+ root->appendNew<Const32Value>(proc, Origin(), sshrAmount)));
+
+ CHECK(
+ compileAndRun<int64_t>(proc, value)
+ == ((value << (shlAmount & 63)) >> (sshrAmount & 63)));
+}
+
+template<typename T>
+void testRotR(T valueInt, int32_t shift)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ if (sizeof(T) == 4)
+ value = root->appendNew<Value>(proc, Trunc, Origin(), value);
+
+ Value* ammount = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ root->appendNewControlValue(proc, Return, Origin(),
+ root->appendNew<Value>(proc, RotR, Origin(), value, ammount));
+
+ CHECK_EQ(compileAndRun<T>(proc, valueInt, shift), rotateRight(valueInt, shift));
+}
+
+template<typename T>
+void testRotL(T valueInt, int32_t shift)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ if (sizeof(T) == 4)
+ value = root->appendNew<Value>(proc, Trunc, Origin(), value);
+
+ Value* ammount = root->appendNew<Value>(proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ root->appendNewControlValue(proc, Return, Origin(),
+ root->appendNew<Value>(proc, RotL, Origin(), value, ammount));
+
+ CHECK_EQ(compileAndRun<T>(proc, valueInt, shift), rotateLeft(valueInt, shift));
+}
+
+template<typename T>
+void testRotRWithImmShift(T valueInt, int32_t shift)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ if (sizeof(T) == 4)
+ value = root->appendNew<Value>(proc, Trunc, Origin(), value);
+
+ Value* ammount = root->appendIntConstant(proc, Origin(), Int32, shift);
+ root->appendNewControlValue(proc, Return, Origin(),
+ root->appendNew<Value>(proc, RotR, Origin(), value, ammount));
+
+ CHECK_EQ(compileAndRun<T>(proc, valueInt, shift), rotateRight(valueInt, shift));
+}
+
+template<typename T>
+void testRotLWithImmShift(T valueInt, int32_t shift)
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ if (sizeof(T) == 4)
+ value = root->appendNew<Value>(proc, Trunc, Origin(), value);
+
+ Value* ammount = root->appendIntConstant(proc, Origin(), Int32, shift);
+ root->appendNewControlValue(proc, Return, Origin(),
+ root->appendNew<Value>(proc, RotL, Origin(), value, ammount));
+
+ CHECK_EQ(compileAndRun<T>(proc, valueInt, shift), rotateLeft(valueInt, shift));
+}
+
+template<typename T>
+void testComputeDivisionMagic(T value, T magicMultiplier, unsigned shift)
+{
+ DivisionMagic<T> magic = computeDivisionMagic(value);
+ CHECK(magic.magicMultiplier == magicMultiplier);
+ CHECK(magic.shift == shift);
+}
+
+void testTrivialInfiniteLoop()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* loop = proc.addBlock();
+ root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+ loop->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+
+ compile(proc);
+}
+
+void testFoldPathEqual()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenBlock = proc.addBlock();
+ BasicBlock* elseBlock = proc.addBlock();
+
+ Value* arg = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+
+ root->appendNewControlValue(
+ proc, Branch, Origin(), arg, FrequentedBlock(thenBlock), FrequentedBlock(elseBlock));
+
+ thenBlock->appendNewControlValue(
+ proc, Return, Origin(),
+ thenBlock->appendNew<Value>(
+ proc, Equal, Origin(), arg, thenBlock->appendNew<ConstPtrValue>(proc, Origin(), 0)));
+
+ elseBlock->appendNewControlValue(
+ proc, Return, Origin(),
+ elseBlock->appendNew<Value>(
+ proc, Equal, Origin(), arg, elseBlock->appendNew<ConstPtrValue>(proc, Origin(), 0)));
+
+ auto code = compile(proc);
+ CHECK(invoke<intptr_t>(*code, 0) == 1);
+ CHECK(invoke<intptr_t>(*code, 1) == 0);
+ CHECK(invoke<intptr_t>(*code, 42) == 0);
+}
+
+void testLShiftSelf32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, Shl, Origin(), arg, arg));
+
+ auto code = compile(proc);
+
+ auto check = [&] (int32_t value) {
+ CHECK(invoke<int32_t>(*code, value) == value << (value & 31));
+ };
+
+ check(0);
+ check(1);
+ check(31);
+ check(32);
+}
+
+void testRShiftSelf32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, SShr, Origin(), arg, arg));
+
+ auto code = compile(proc);
+
+ auto check = [&] (int32_t value) {
+ CHECK(invoke<int32_t>(*code, value) == value >> (value & 31));
+ };
+
+ check(0);
+ check(1);
+ check(31);
+ check(32);
+}
+
+void testURShiftSelf32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, ZShr, Origin(), arg, arg));
+
+ auto code = compile(proc);
+
+ auto check = [&] (uint32_t value) {
+ CHECK(invoke<uint32_t>(*code, value) == value >> (value & 31));
+ };
+
+ check(0);
+ check(1);
+ check(31);
+ check(32);
+}
+
+void testLShiftSelf64()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(), arg, root->appendNew<Value>(proc, Trunc, Origin(), arg)));
+
+ auto code = compile(proc);
+
+ auto check = [&] (int64_t value) {
+ CHECK(invoke<int64_t>(*code, value) == value << (value & 63));
+ };
+
+ check(0);
+ check(1);
+ check(31);
+ check(32);
+ check(63);
+ check(64);
+}
+
+void testRShiftSelf64()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, SShr, Origin(), arg, root->appendNew<Value>(proc, Trunc, Origin(), arg)));
+
+ auto code = compile(proc);
+
+ auto check = [&] (int64_t value) {
+ CHECK(invoke<int64_t>(*code, value) == value >> (value & 63));
+ };
+
+ check(0);
+ check(1);
+ check(31);
+ check(32);
+ check(63);
+ check(64);
+}
+
+void testURShiftSelf64()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, ZShr, Origin(), arg, root->appendNew<Value>(proc, Trunc, Origin(), arg)));
+
+ auto code = compile(proc);
+
+ auto check = [&] (uint64_t value) {
+ CHECK(invoke<uint64_t>(*code, value) == value >> (value & 63));
+ };
+
+ check(0);
+ check(1);
+ check(31);
+ check(32);
+ check(63);
+ check(64);
+}
+
+void testPatchpointDoubleRegs()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* arg = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Double, Origin());
+ patchpoint->append(arg, ValueRep(FPRInfo::fpRegT0));
+ patchpoint->resultConstraint = ValueRep(FPRInfo::fpRegT0);
+
+ unsigned numCalls = 0;
+ patchpoint->setGenerator(
+ [&] (CCallHelpers&, const StackmapGenerationParams&) {
+ numCalls++;
+ });
+
+ root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+ auto code = compile(proc);
+ CHECK(numCalls == 1);
+ CHECK(invoke<double>(*code, 42.5) == 42.5);
+}
+
+void testSpillDefSmallerThanUse()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ // Move32.
+ Value* arg32 = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* arg64 = root->appendNew<Value>(proc, ZExt32, Origin(), arg32);
+
+ // Make sure arg64 is on the stack.
+ PatchpointValue* forceSpill = root->appendNew<PatchpointValue>(proc, Int64, Origin());
+ RegisterSet clobberSet = RegisterSet::allGPRs();
+ clobberSet.exclude(RegisterSet::stackRegisters());
+ clobberSet.exclude(RegisterSet::reservedHardwareRegisters());
+ clobberSet.clear(GPRInfo::returnValueGPR); // Force the return value for aliasing below.
+ forceSpill->clobberLate(clobberSet);
+ forceSpill->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ jit.xor64(params[0].gpr(), params[0].gpr());
+ });
+
+ // On x86, Sub admit an address for any operand. If it uses the stack, the top bits must be zero.
+ Value* result = root->appendNew<Value>(proc, Sub, Origin(), forceSpill, arg64);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ CHECK(invoke<int64_t>(*code, 0xffffffff00000000) == 0);
+}
+
+void testSpillUseLargerThanDef()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* thenCase = proc.addBlock();
+ BasicBlock* elseCase = proc.addBlock();
+ BasicBlock* tail = proc.addBlock();
+
+ RegisterSet clobberSet = RegisterSet::allGPRs();
+ clobberSet.exclude(RegisterSet::stackRegisters());
+ clobberSet.exclude(RegisterSet::reservedHardwareRegisters());
+
+ Value* condition = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* argument = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ root->appendNewControlValue(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ condition),
+ FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+ Value* truncated = thenCase->appendNew<Value>(proc, ZExt32, Origin(),
+ thenCase->appendNew<Value>(proc, Trunc, Origin(), argument));
+ UpsilonValue* thenResult = thenCase->appendNew<UpsilonValue>(proc, Origin(), truncated);
+ thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ UpsilonValue* elseResult = elseCase->appendNew<UpsilonValue>(proc, Origin(), argument);
+ elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+ for (unsigned i = 0; i < 100; ++i) {
+ PatchpointValue* preventTailDuplication = tail->appendNew<PatchpointValue>(proc, Void, Origin());
+ preventTailDuplication->clobberLate(clobberSet);
+ preventTailDuplication->setGenerator([] (CCallHelpers&, const StackmapGenerationParams&) { });
+ }
+
+ PatchpointValue* forceSpill = tail->appendNew<PatchpointValue>(proc, Void, Origin());
+ forceSpill->clobberLate(clobberSet);
+ forceSpill->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ clobberSet.forEach([&] (Reg reg) {
+ jit.move(CCallHelpers::TrustedImm64(0xffffffffffffffff), reg.gpr());
+ });
+ });
+
+ Value* phi = tail->appendNew<Value>(proc, Phi, Int64, Origin());
+ thenResult->setPhi(phi);
+ elseResult->setPhi(phi);
+ tail->appendNewControlValue(proc, Return, Origin(), phi);
+
+ auto code = compile(proc);
+ CHECK(invoke<uint64_t>(*code, 1, 0xffffffff00000000) == 0);
+ CHECK(invoke<uint64_t>(*code, 0, 0xffffffff00000000) == 0xffffffff00000000);
+
+ // A second time since the previous run is still on the stack.
+ CHECK(invoke<uint64_t>(*code, 1, 0xffffffff00000000) == 0);
+
+}
+
+void testLateRegister()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ // This works by making all but 1 register be input to the first patchpoint as LateRegister.
+ // The other 1 register is just a regular Register input. We assert our result is the regular
+ // register input. There would be no other way for the register allocator to arrange things
+ // because LateRegister interferes with the result.
+ // Then, the second patchpoint takes the result of the first as an argument and asks for
+ // it in a register that was a LateRegister. This is to incentivize the register allocator
+ // to use that LateRegister as the result for the first patchpoint. But of course it can not do that.
+ // So it must issue a mov after the first patchpoint from the first's result into the second's input.
+
+ RegisterSet regs = RegisterSet::allGPRs();
+ regs.exclude(RegisterSet::stackRegisters());
+ regs.exclude(RegisterSet::reservedHardwareRegisters());
+ Vector<Value*> lateUseArgs;
+ unsigned result = 0;
+ for (GPRReg reg = CCallHelpers::firstRegister(); reg <= CCallHelpers::lastRegister(); reg = CCallHelpers::nextRegister(reg)) {
+ if (!regs.get(reg))
+ continue;
+ result++;
+ if (reg == GPRInfo::regT0)
+ continue;
+ Value* value = root->appendNew<Const64Value>(proc, Origin(), 1);
+ lateUseArgs.append(value);
+ }
+ Value* regularUse = root->appendNew<Const64Value>(proc, Origin(), 1);
+ PatchpointValue* firstPatchpoint = root->appendNew<PatchpointValue>(proc, Int64, Origin());
+ {
+ unsigned i = 0;
+ for (GPRReg reg = CCallHelpers::firstRegister(); reg <= CCallHelpers::lastRegister(); reg = CCallHelpers::nextRegister(reg)) {
+ if (!regs.get(reg))
+ continue;
+ if (reg == GPRInfo::regT0)
+ continue;
+ Value* value = lateUseArgs[i++];
+ firstPatchpoint->append(value, ValueRep::lateReg(reg));
+ }
+ firstPatchpoint->append(regularUse, ValueRep::reg(GPRInfo::regT0));
+ }
+
+ firstPatchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params[0].gpr() == GPRInfo::regT0);
+ // Note that regT0 should also start off as 1, so we're implicitly starting our add with 1, which is also an argument.
+ unsigned skipped = 0;
+ for (unsigned i = 1; i < params.size(); i++) {
+ if (params[i].gpr() == params[0].gpr()) {
+ skipped = i;
+ continue;
+ }
+ jit.add64(params[i].gpr(), params[0].gpr());
+ }
+ CHECK(!!skipped);
+ });
+
+ PatchpointValue* secondPatchpoint = root->appendNew<PatchpointValue>(proc, Int64, Origin());
+ secondPatchpoint->append(firstPatchpoint, ValueRep::reg(GPRInfo::regT1));
+ secondPatchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CHECK(params[1].gpr() == GPRInfo::regT1);
+ jit.nop();
+ jit.nop();
+ jit.move(params[1].gpr(), params[0].gpr());
+ jit.nop();
+ jit.nop();
+ });
+ root->appendNewControlValue(proc, Return, Origin(), secondPatchpoint);
+
+ auto code = compile(proc);
+ CHECK(invoke<uint64_t>(*code) == result);
+}
+
+void interpreterPrint(Vector<intptr_t>* stream, intptr_t value)
+{
+ stream->append(value);
+}
+
+void testInterpreter()
+{
+ // This implements a silly interpreter to test building custom switch statements using
+ // Patchpoint.
+
+ Procedure proc;
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* dispatch = proc.addBlock();
+ BasicBlock* addToDataPointer = proc.addBlock();
+ BasicBlock* addToCodePointer = proc.addBlock();
+ BasicBlock* addToCodePointerTaken = proc.addBlock();
+ BasicBlock* addToCodePointerNotTaken = proc.addBlock();
+ BasicBlock* addToData = proc.addBlock();
+ BasicBlock* print = proc.addBlock();
+ BasicBlock* stop = proc.addBlock();
+
+ Variable* dataPointer = proc.addVariable(pointerType());
+ Variable* codePointer = proc.addVariable(pointerType());
+
+ root->appendNew<VariableValue>(
+ proc, Set, Origin(), dataPointer,
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNew<VariableValue>(
+ proc, Set, Origin(), codePointer,
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ Value* context = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+
+ // NOTE: It's totally valid for this patchpoint to be tail-duplicated.
+ Value* codePointerValue =
+ dispatch->appendNew<VariableValue>(proc, B3::Get, Origin(), codePointer);
+ Value* opcode = dispatch->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(), codePointerValue);
+ PatchpointValue* polyJump = dispatch->appendNew<PatchpointValue>(proc, Void, Origin());
+ polyJump->effects = Effects();
+ polyJump->effects.terminal = true;
+ polyJump->appendSomeRegister(opcode);
+ polyJump->clobber(RegisterSet::macroScratchRegisters());
+ polyJump->numGPScratchRegisters++;
+ dispatch->appendSuccessor(FrequentedBlock(addToDataPointer));
+ dispatch->appendSuccessor(FrequentedBlock(addToCodePointer));
+ dispatch->appendSuccessor(FrequentedBlock(addToData));
+ dispatch->appendSuccessor(FrequentedBlock(print));
+ dispatch->appendSuccessor(FrequentedBlock(stop));
+
+ // Our "opcodes".
+ static const intptr_t AddDP = 0;
+ static const intptr_t AddCP = 1;
+ static const intptr_t Add = 2;
+ static const intptr_t Print = 3;
+ static const intptr_t Stop = 4;
+
+ polyJump->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
+
+ MacroAssemblerCodePtr* jumpTable = bitwise_cast<MacroAssemblerCodePtr*>(
+ params.proc().addDataSection(sizeof(MacroAssemblerCodePtr) * labels.size()));
+
+ jit.move(CCallHelpers::TrustedImmPtr(jumpTable), params.gpScratch(0));
+ jit.jump(CCallHelpers::BaseIndex(params.gpScratch(0), params[0].gpr(), CCallHelpers::timesPtr()));
+
+ jit.addLinkTask(
+ [&, jumpTable, labels] (LinkBuffer& linkBuffer) {
+ for (unsigned i = labels.size(); i--;)
+ jumpTable[i] = linkBuffer.locationOf(*labels[i]);
+ });
+ });
+
+ // AddDP <operand>: adds <operand> to DP.
+ codePointerValue =
+ addToDataPointer->appendNew<VariableValue>(proc, B3::Get, Origin(), codePointer);
+ addToDataPointer->appendNew<VariableValue>(
+ proc, Set, Origin(), dataPointer,
+ addToDataPointer->appendNew<Value>(
+ proc, B3::Add, Origin(),
+ addToDataPointer->appendNew<VariableValue>(proc, B3::Get, Origin(), dataPointer),
+ addToDataPointer->appendNew<Value>(
+ proc, Mul, Origin(),
+ addToDataPointer->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(), codePointerValue, sizeof(intptr_t)),
+ addToDataPointer->appendIntConstant(
+ proc, Origin(), pointerType(), sizeof(intptr_t)))));
+ addToDataPointer->appendNew<VariableValue>(
+ proc, Set, Origin(), codePointer,
+ addToDataPointer->appendNew<Value>(
+ proc, B3::Add, Origin(), codePointerValue,
+ addToDataPointer->appendIntConstant(
+ proc, Origin(), pointerType(), sizeof(intptr_t) * 2)));
+ addToDataPointer->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+
+ // AddCP <operand>: adds <operand> to CP if the current value at DP is non-zero, otherwise
+ // falls through normally.
+ codePointerValue =
+ addToCodePointer->appendNew<VariableValue>(proc, B3::Get, Origin(), codePointer);
+ Value* dataPointerValue =
+ addToCodePointer->appendNew<VariableValue>(proc, B3::Get, Origin(), dataPointer);
+ addToCodePointer->appendNewControlValue(
+ proc, Branch, Origin(),
+ addToCodePointer->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(), dataPointerValue),
+ FrequentedBlock(addToCodePointerTaken), FrequentedBlock(addToCodePointerNotTaken));
+ addToCodePointerTaken->appendNew<VariableValue>(
+ proc, Set, Origin(), codePointer,
+ addToCodePointerTaken->appendNew<Value>(
+ proc, B3::Add, Origin(), codePointerValue,
+ addToCodePointerTaken->appendNew<Value>(
+ proc, Mul, Origin(),
+ addToCodePointerTaken->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(), codePointerValue, sizeof(intptr_t)),
+ addToCodePointerTaken->appendIntConstant(
+ proc, Origin(), pointerType(), sizeof(intptr_t)))));
+ addToCodePointerTaken->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+ addToCodePointerNotTaken->appendNew<VariableValue>(
+ proc, Set, Origin(), codePointer,
+ addToCodePointerNotTaken->appendNew<Value>(
+ proc, B3::Add, Origin(), codePointerValue,
+ addToCodePointerNotTaken->appendIntConstant(
+ proc, Origin(), pointerType(), sizeof(intptr_t) * 2)));
+ addToCodePointerNotTaken->appendNewControlValue(
+ proc, Jump, Origin(), FrequentedBlock(dispatch));
+
+ // Add <operand>: adds <operand> to the slot pointed to by DP.
+ codePointerValue = addToData->appendNew<VariableValue>(proc, B3::Get, Origin(), codePointer);
+ dataPointerValue = addToData->appendNew<VariableValue>(proc, B3::Get, Origin(), dataPointer);
+ addToData->appendNew<MemoryValue>(
+ proc, Store, Origin(),
+ addToData->appendNew<Value>(
+ proc, B3::Add, Origin(),
+ addToData->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(), dataPointerValue),
+ addToData->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(), codePointerValue, sizeof(intptr_t))),
+ dataPointerValue);
+ addToData->appendNew<VariableValue>(
+ proc, Set, Origin(), codePointer,
+ addToData->appendNew<Value>(
+ proc, B3::Add, Origin(), codePointerValue,
+ addToData->appendIntConstant(proc, Origin(), pointerType(), sizeof(intptr_t) * 2)));
+ addToData->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+
+ // Print: "prints" the value pointed to by DP. What this actually means is that the value is
+ // appended to the stream vector by the interpreterPrint function.
+ codePointerValue = print->appendNew<VariableValue>(proc, B3::Get, Origin(), codePointer);
+ dataPointerValue = print->appendNew<VariableValue>(proc, B3::Get, Origin(), dataPointer);
+ print->appendNew<CCallValue>(
+ proc, Void, Origin(),
+ print->appendNew<ConstPtrValue>(
+ proc, Origin(), bitwise_cast<void*>(interpreterPrint)),
+ context,
+ print->appendNew<MemoryValue>(proc, Load, pointerType(), Origin(), dataPointerValue));
+ print->appendNew<VariableValue>(
+ proc, Set, Origin(), codePointer,
+ print->appendNew<Value>(
+ proc, B3::Add, Origin(), codePointerValue,
+ print->appendIntConstant(proc, Origin(), pointerType(), sizeof(intptr_t))));
+ print->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+
+ // Stop: returns.
+ stop->appendNewControlValue(
+ proc, Return, Origin(),
+ stop->appendIntConstant(proc, Origin(), pointerType(), 0));
+
+ auto interpreter = compile(proc);
+
+ Vector<intptr_t> data;
+ Vector<intptr_t> code;
+ Vector<intptr_t> stream;
+
+ data.append(1);
+ data.append(0);
+
+ if (shouldBeVerbose())
+ dataLog("data = ", listDump(data), "\n");
+
+ // We'll write a program that prints the numbers 1..100.
+ // We expect DP to point at #0.
+ code.append(AddCP);
+ code.append(6); // go to loop body
+
+ // Loop re-entry:
+ // We expect DP to point at #1 and for #1 to be offset by -100.
+ code.append(Add);
+ code.append(100);
+
+ code.append(AddDP);
+ code.append(-1);
+
+ // Loop header:
+ // We expect DP to point at #0.
+ code.append(AddDP);
+ code.append(1);
+
+ code.append(Add);
+ code.append(1);
+
+ code.append(Print);
+
+ code.append(Add);
+ code.append(-100);
+
+ // We want to stop if it's zero and continue if it's non-zero. AddCP takes the branch if it's
+ // non-zero.
+ code.append(AddCP);
+ code.append(-11); // go to loop re-entry.
+
+ code.append(Stop);
+
+ if (shouldBeVerbose())
+ dataLog("code = ", listDump(code), "\n");
+
+ CHECK(!invoke<intptr_t>(*interpreter, data.data(), code.data(), &stream));
+
+ CHECK(stream.size() == 100);
+ for (unsigned i = 0; i < 100; ++i)
+ CHECK(stream[i] == i + 1);
+
+ if (shouldBeVerbose())
+ dataLog("stream = ", listDump(stream), "\n");
+}
+
+void testReduceStrengthCheckBottomUseInAnotherBlock()
+{
+ Procedure proc;
+
+ BasicBlock* one = proc.addBlock();
+ BasicBlock* two = proc.addBlock();
+
+ CheckValue* check = one->appendNew<CheckValue>(
+ proc, Check, Origin(), one->appendNew<Const32Value>(proc, Origin(), 1));
+ check->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+ Value* arg = one->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ one->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(two));
+
+ check = two->appendNew<CheckValue>(
+ proc, CheckAdd, Origin(), arg,
+ two->appendNew<ConstPtrValue>(proc, Origin(), 1));
+ check->setGenerator(
+ [&] (CCallHelpers&, const StackmapGenerationParams&) {
+ CHECK(!"Should not execute");
+ });
+ two->appendNewControlValue(proc, Return, Origin(), check);
+
+ proc.resetReachability();
+ reduceStrength(proc);
+}
+
+void testResetReachabilityDanglingReference()
+{
+ Procedure proc;
+
+ BasicBlock* one = proc.addBlock();
+ BasicBlock* two = proc.addBlock();
+
+ UpsilonValue* upsilon = one->appendNew<UpsilonValue>(
+ proc, Origin(), one->appendNew<Const32Value>(proc, Origin(), 42));
+ one->appendNewControlValue(proc, Oops, Origin());
+
+ Value* phi = two->appendNew<Value>(proc, Phi, Int32, Origin());
+ upsilon->setPhi(phi);
+ two->appendNewControlValue(proc, Oops, Origin());
+
+ proc.resetReachability();
+ validate(proc);
+}
+
+void testEntrySwitchSimple()
+{
+ Procedure proc;
+ proc.setNumEntrypoints(3);
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* one = proc.addBlock();
+ BasicBlock* two = proc.addBlock();
+ BasicBlock* three = proc.addBlock();
+
+ root->appendNew<Value>(proc, EntrySwitch, Origin());
+ root->appendSuccessor(FrequentedBlock(one));
+ root->appendSuccessor(FrequentedBlock(two));
+ root->appendSuccessor(FrequentedBlock(three));
+
+ one->appendNew<Value>(
+ proc, Return, Origin(),
+ one->appendNew<Value>(
+ proc, Add, Origin(),
+ one->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ one->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ two->appendNew<Value>(
+ proc, Return, Origin(),
+ two->appendNew<Value>(
+ proc, Sub, Origin(),
+ two->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ two->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ three->appendNew<Value>(
+ proc, Return, Origin(),
+ three->appendNew<Value>(
+ proc, Mul, Origin(),
+ three->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ three->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ prepareForGeneration(proc);
+
+ CCallHelpers jit(vm);
+ generate(proc, jit);
+ LinkBuffer linkBuffer(*vm, jit, nullptr);
+ CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+ CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+ CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+
+ MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+
+ CHECK(invoke<int>(labelOne, 1, 2) == 3);
+ CHECK(invoke<int>(labelTwo, 1, 2) == -1);
+ CHECK(invoke<int>(labelThree, 1, 2) == 2);
+ CHECK(invoke<int>(labelOne, -1, 2) == 1);
+ CHECK(invoke<int>(labelTwo, -1, 2) == -3);
+ CHECK(invoke<int>(labelThree, -1, 2) == -2);
+}
+
+void testEntrySwitchNoEntrySwitch()
+{
+ Procedure proc;
+ proc.setNumEntrypoints(3);
+
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNew<Value>(
+ proc, Return, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1)));
+
+ prepareForGeneration(proc);
+
+ CCallHelpers jit(vm);
+ generate(proc, jit);
+ LinkBuffer linkBuffer(*vm, jit, nullptr);
+ CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+ CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+ CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+
+ MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+
+ CHECK_EQ(invoke<int>(labelOne, 1, 2), 3);
+ CHECK_EQ(invoke<int>(labelTwo, 1, 2), 3);
+ CHECK_EQ(invoke<int>(labelThree, 1, 2), 3);
+ CHECK_EQ(invoke<int>(labelOne, -1, 2), 1);
+ CHECK_EQ(invoke<int>(labelTwo, -1, 2), 1);
+ CHECK_EQ(invoke<int>(labelThree, -1, 2), 1);
+}
+
+void testEntrySwitchWithCommonPaths()
+{
+ Procedure proc;
+ proc.setNumEntrypoints(3);
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* one = proc.addBlock();
+ BasicBlock* two = proc.addBlock();
+ BasicBlock* three = proc.addBlock();
+ BasicBlock* end = proc.addBlock();
+
+ root->appendNew<Value>(proc, EntrySwitch, Origin());
+ root->appendSuccessor(FrequentedBlock(one));
+ root->appendSuccessor(FrequentedBlock(two));
+ root->appendSuccessor(FrequentedBlock(three));
+
+ UpsilonValue* upsilonOne = one->appendNew<UpsilonValue>(
+ proc, Origin(),
+ one->appendNew<Value>(
+ proc, Add, Origin(),
+ one->appendNew<Value>(
+ proc, Trunc, Origin(),
+ one->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ one->appendNew<Value>(
+ proc, Trunc, Origin(),
+ one->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+ one->appendNew<Value>(proc, Jump, Origin());
+ one->setSuccessors(FrequentedBlock(end));
+
+ UpsilonValue* upsilonTwo = two->appendNew<UpsilonValue>(
+ proc, Origin(),
+ two->appendNew<Value>(
+ proc, Sub, Origin(),
+ two->appendNew<Value>(
+ proc, Trunc, Origin(),
+ two->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ two->appendNew<Value>(
+ proc, Trunc, Origin(),
+ two->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+ two->appendNew<Value>(proc, Jump, Origin());
+ two->setSuccessors(FrequentedBlock(end));
+
+ UpsilonValue* upsilonThree = three->appendNew<UpsilonValue>(
+ proc, Origin(),
+ three->appendNew<Value>(
+ proc, Mul, Origin(),
+ three->appendNew<Value>(
+ proc, Trunc, Origin(),
+ three->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ three->appendNew<Value>(
+ proc, Trunc, Origin(),
+ three->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+ three->appendNew<Value>(proc, Jump, Origin());
+ three->setSuccessors(FrequentedBlock(end));
+
+ Value* phi = end->appendNew<Value>(proc, Phi, Int32, Origin());
+ upsilonOne->setPhi(phi);
+ upsilonTwo->setPhi(phi);
+ upsilonThree->setPhi(phi);
+
+ end->appendNew<Value>(
+ proc, Return, Origin(),
+ end->appendNew<Value>(
+ proc, chill(Mod), Origin(),
+ phi, end->appendNew<Value>(
+ proc, Trunc, Origin(),
+ end->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2))));
+
+ prepareForGeneration(proc);
+
+ CCallHelpers jit(vm);
+ generate(proc, jit);
+ LinkBuffer linkBuffer(*vm, jit, nullptr);
+ CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+ CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+ CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+
+ MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+
+ CHECK_EQ(invoke<int>(labelOne, 1, 2, 10), 3);
+ CHECK_EQ(invoke<int>(labelTwo, 1, 2, 10), -1);
+ CHECK_EQ(invoke<int>(labelThree, 1, 2, 10), 2);
+ CHECK_EQ(invoke<int>(labelOne, -1, 2, 10), 1);
+ CHECK_EQ(invoke<int>(labelTwo, -1, 2, 10), -3);
+ CHECK_EQ(invoke<int>(labelThree, -1, 2, 10), -2);
+ CHECK_EQ(invoke<int>(labelOne, 1, 2, 2), 1);
+ CHECK_EQ(invoke<int>(labelTwo, 1, 2, 2), -1);
+ CHECK_EQ(invoke<int>(labelThree, 1, 2, 2), 0);
+ CHECK_EQ(invoke<int>(labelOne, -1, 2, 2), 1);
+ CHECK_EQ(invoke<int>(labelTwo, -1, 2, 2), -1);
+ CHECK_EQ(invoke<int>(labelThree, -1, 2, 2), 0);
+ CHECK_EQ(invoke<int>(labelOne, 1, 2, 0), 0);
+ CHECK_EQ(invoke<int>(labelTwo, 1, 2, 0), 0);
+ CHECK_EQ(invoke<int>(labelThree, 1, 2, 0), 0);
+ CHECK_EQ(invoke<int>(labelOne, -1, 2, 0), 0);
+ CHECK_EQ(invoke<int>(labelTwo, -1, 2, 0), 0);
+ CHECK_EQ(invoke<int>(labelThree, -1, 2, 0), 0);
+}
+
+void testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint()
+{
+ Procedure proc;
+ proc.setNumEntrypoints(3);
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* negate = proc.addBlock();
+ BasicBlock* dispatch = proc.addBlock();
+ BasicBlock* one = proc.addBlock();
+ BasicBlock* two = proc.addBlock();
+ BasicBlock* three = proc.addBlock();
+ BasicBlock* end = proc.addBlock();
+
+ UpsilonValue* upsilonBase = root->appendNew<UpsilonValue>(
+ proc, Origin(), root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ root->appendNew<Value>(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR3),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0xff)));
+ root->setSuccessors(FrequentedBlock(negate), FrequentedBlock(dispatch));
+
+ UpsilonValue* upsilonNegate = negate->appendNew<UpsilonValue>(
+ proc, Origin(),
+ negate->appendNew<Value>(
+ proc, Neg, Origin(),
+ negate->appendNew<Value>(
+ proc, Trunc, Origin(),
+ negate->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0))));
+ negate->appendNew<Value>(proc, Jump, Origin());
+ negate->setSuccessors(FrequentedBlock(dispatch));
+
+ Value* arg0 = dispatch->appendNew<Value>(proc, Phi, Int32, Origin());
+ upsilonBase->setPhi(arg0);
+ upsilonNegate->setPhi(arg0);
+ dispatch->appendNew<Value>(proc, EntrySwitch, Origin());
+ dispatch->appendSuccessor(FrequentedBlock(one));
+ dispatch->appendSuccessor(FrequentedBlock(two));
+ dispatch->appendSuccessor(FrequentedBlock(three));
+
+ UpsilonValue* upsilonOne = one->appendNew<UpsilonValue>(
+ proc, Origin(),
+ one->appendNew<Value>(
+ proc, Add, Origin(),
+ arg0, one->appendNew<Value>(
+ proc, Trunc, Origin(),
+ one->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+ one->appendNew<Value>(proc, Jump, Origin());
+ one->setSuccessors(FrequentedBlock(end));
+
+ UpsilonValue* upsilonTwo = two->appendNew<UpsilonValue>(
+ proc, Origin(),
+ two->appendNew<Value>(
+ proc, Sub, Origin(),
+ arg0, two->appendNew<Value>(
+ proc, Trunc, Origin(),
+ two->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+ two->appendNew<Value>(proc, Jump, Origin());
+ two->setSuccessors(FrequentedBlock(end));
+
+ UpsilonValue* upsilonThree = three->appendNew<UpsilonValue>(
+ proc, Origin(),
+ three->appendNew<Value>(
+ proc, Mul, Origin(),
+ arg0, three->appendNew<Value>(
+ proc, Trunc, Origin(),
+ three->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1))));
+ three->appendNew<Value>(proc, Jump, Origin());
+ three->setSuccessors(FrequentedBlock(end));
+
+ Value* phi = end->appendNew<Value>(proc, Phi, Int32, Origin());
+ upsilonOne->setPhi(phi);
+ upsilonTwo->setPhi(phi);
+ upsilonThree->setPhi(phi);
+
+ end->appendNew<Value>(
+ proc, Return, Origin(),
+ end->appendNew<Value>(
+ proc, chill(Mod), Origin(),
+ phi, end->appendNew<Value>(
+ proc, Trunc, Origin(),
+ end->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2))));
+
+ prepareForGeneration(proc);
+
+ CCallHelpers jit(vm);
+ generate(proc, jit);
+ LinkBuffer linkBuffer(*vm, jit, nullptr);
+ CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+ CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+ CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+
+ MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+
+ CHECK_EQ(invoke<int>(labelOne, 1, 2, 10, false), 3);
+ CHECK_EQ(invoke<int>(labelTwo, 1, 2, 10, false), -1);
+ CHECK_EQ(invoke<int>(labelThree, 1, 2, 10, false), 2);
+ CHECK_EQ(invoke<int>(labelOne, -1, 2, 10, false), 1);
+ CHECK_EQ(invoke<int>(labelTwo, -1, 2, 10, false), -3);
+ CHECK_EQ(invoke<int>(labelThree, -1, 2, 10, false), -2);
+ CHECK_EQ(invoke<int>(labelOne, 1, 2, 10, true), 1);
+ CHECK_EQ(invoke<int>(labelTwo, 1, 2, 10, true), -3);
+ CHECK_EQ(invoke<int>(labelThree, 1, 2, 10, true), -2);
+ CHECK_EQ(invoke<int>(labelOne, -1, 2, 10, true), 3);
+ CHECK_EQ(invoke<int>(labelTwo, -1, 2, 10, true), -1);
+ CHECK_EQ(invoke<int>(labelThree, -1, 2, 10, true), 2);
+ CHECK_EQ(invoke<int>(labelOne, 1, 2, 2, false), 1);
+ CHECK_EQ(invoke<int>(labelTwo, 1, 2, 2, false), -1);
+ CHECK_EQ(invoke<int>(labelThree, 1, 2, 2, false), 0);
+ CHECK_EQ(invoke<int>(labelOne, -1, 2, 2, false), 1);
+ CHECK_EQ(invoke<int>(labelTwo, -1, 2, 2, false), -1);
+ CHECK_EQ(invoke<int>(labelThree, -1, 2, 2, false), 0);
+ CHECK_EQ(invoke<int>(labelOne, 1, 2, 0, false), 0);
+ CHECK_EQ(invoke<int>(labelTwo, 1, 2, 0, false), 0);
+ CHECK_EQ(invoke<int>(labelThree, 1, 2, 0, false), 0);
+ CHECK_EQ(invoke<int>(labelOne, -1, 2, 0, false), 0);
+ CHECK_EQ(invoke<int>(labelTwo, -1, 2, 0, false), 0);
+ CHECK_EQ(invoke<int>(labelThree, -1, 2, 0, false), 0);
+}
+
+void testEntrySwitchLoop()
+{
+ // This is a completely absurd use of EntrySwitch, where it impacts the loop condition. This
+ // should cause duplication of either nearly the entire Procedure. At time of writing, we ended
+ // up duplicating all of it, which is fine. It's important to test this case, to make sure that
+ // the duplication algorithm can handle interesting control flow.
+
+ Procedure proc;
+ proc.setNumEntrypoints(2);
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* loopHeader = proc.addBlock();
+ BasicBlock* loopFooter = proc.addBlock();
+ BasicBlock* end = proc.addBlock();
+
+ UpsilonValue* initialValue = root->appendNew<UpsilonValue>(
+ proc, Origin(), root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)));
+ root->appendNew<Value>(proc, Jump, Origin());
+ root->setSuccessors(loopHeader);
+
+ Value* valueInLoop = loopHeader->appendNew<Value>(proc, Phi, Int32, Origin());
+ initialValue->setPhi(valueInLoop);
+ Value* newValue = loopHeader->appendNew<Value>(
+ proc, Add, Origin(), valueInLoop,
+ loopHeader->appendNew<Const32Value>(proc, Origin(), 1));
+ loopHeader->appendNew<Value>(proc, EntrySwitch, Origin());
+ loopHeader->appendSuccessor(end);
+ loopHeader->appendSuccessor(loopFooter);
+
+ loopFooter->appendNew<UpsilonValue>(proc, Origin(), newValue, valueInLoop);
+ loopFooter->appendNew<Value>(
+ proc, Branch, Origin(),
+ loopFooter->appendNew<Value>(
+ proc, LessThan, Origin(), newValue,
+ loopFooter->appendNew<Const32Value>(proc, Origin(), 100)));
+ loopFooter->setSuccessors(loopHeader, end);
+
+ end->appendNew<Value>(proc, Return, Origin(), newValue);
+
+ prepareForGeneration(proc);
+
+ CCallHelpers jit(vm);
+ generate(proc, jit);
+ LinkBuffer linkBuffer(*vm, jit, nullptr);
+ CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+ CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+
+ MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+
+ CHECK(invoke<int>(labelOne, 0) == 1);
+ CHECK(invoke<int>(labelOne, 42) == 43);
+ CHECK(invoke<int>(labelOne, 1000) == 1001);
+
+ CHECK(invoke<int>(labelTwo, 0) == 100);
+ CHECK(invoke<int>(labelTwo, 42) == 100);
+ CHECK(invoke<int>(labelTwo, 1000) == 1001);
+}
+
+void testSomeEarlyRegister()
+{
+ auto run = [&] (bool succeed) {
+ Procedure proc;
+
+ BasicBlock* root = proc.addBlock();
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+ bool ranFirstPatchpoint = false;
+ patchpoint->setGenerator(
+ [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+ CHECK(params[0].gpr() == GPRInfo::returnValueGPR);
+ ranFirstPatchpoint = true;
+ });
+
+ Value* arg = patchpoint;
+
+ patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->appendSomeRegister(arg);
+ if (succeed)
+ patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+ bool ranSecondPatchpoint = false;
+ patchpoint->setGenerator(
+ [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+ if (succeed)
+ CHECK(params[0].gpr() != params[1].gpr());
+ else
+ CHECK(params[0].gpr() == params[1].gpr());
+ ranSecondPatchpoint = true;
+ });
+
+ root->appendNew<Value>(proc, Return, Origin(), patchpoint);
+
+ compile(proc);
+ CHECK(ranFirstPatchpoint);
+ CHECK(ranSecondPatchpoint);
+ };
+
+ run(true);
+ run(false);
+}
+
+void testBranchBitAndImmFusion(
+ B3::Opcode valueModifier, Type valueType, int64_t constant,
+ Air::Opcode expectedOpcode, Air::Arg::Kind firstKind)
+{
+ // Currently this test should pass on all CPUs. But some CPUs may not support this fused
+ // instruction. It's OK to skip this test on those CPUs.
+
+ Procedure proc;
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* one = proc.addBlock();
+ BasicBlock* two = proc.addBlock();
+
+ Value* left = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+
+ if (valueModifier != Identity) {
+ if (MemoryValue::accepts(valueModifier))
+ left = root->appendNew<MemoryValue>(proc, valueModifier, valueType, Origin(), left);
+ else
+ left = root->appendNew<Value>(proc, valueModifier, valueType, Origin(), left);
+ }
+
+ root->appendNew<Value>(
+ proc, Branch, Origin(),
+ root->appendNew<Value>(
+ proc, BitAnd, Origin(), left,
+ root->appendIntConstant(proc, Origin(), valueType, constant)));
+ root->setSuccessors(FrequentedBlock(one), FrequentedBlock(two));
+
+ one->appendNew<Value>(proc, Oops, Origin());
+ two->appendNew<Value>(proc, Oops, Origin());
+
+ lowerToAirForTesting(proc);
+
+ // The first basic block must end in a BranchTest64(resCond, tmp, bitImm).
+ Air::Inst terminal = proc.code()[0]->last();
+ CHECK_EQ(terminal.kind.opcode, expectedOpcode);
+ CHECK_EQ(terminal.args[0].kind(), Air::Arg::ResCond);
+ CHECK_EQ(terminal.args[1].kind(), firstKind);
+ CHECK(terminal.args[2].kind() == Air::Arg::BitImm || terminal.args[2].kind() == Air::Arg::BitImm64);
+}
+
+void testTerminalPatchpointThatNeedsToBeSpilled()
+{
+ // This is a unit test for how FTL's heap allocation fast paths behave.
+ Procedure proc;
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* success = proc.addBlock();
+ BasicBlock* slowPath = proc.addBlock();
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->effects.terminal = true;
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+
+ root->appendSuccessor(success);
+ root->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ jit.move(CCallHelpers::TrustedImm32(42), params[0].gpr());
+
+ CCallHelpers::Jump jumpToSuccess;
+ if (!params.fallsThroughToSuccessor(0))
+ jumpToSuccess = jit.jump();
+
+ Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ if (jumpToSuccess.isSet())
+ jumpToSuccess.linkTo(*labels[0], &jit);
+ });
+ });
+
+ Vector<Value*> args;
+ {
+ RegisterSet fillAllGPRsSet = RegisterSet::allGPRs();
+ fillAllGPRsSet.exclude(RegisterSet::stackRegisters());
+ fillAllGPRsSet.exclude(RegisterSet::reservedHardwareRegisters());
+
+ for (unsigned i = 0; i < fillAllGPRsSet.numberOfSetRegisters(); i++)
+ args.append(success->appendNew<Const32Value>(proc, Origin(), i));
+ }
+
+ {
+ // Now force all values into every available register.
+ PatchpointValue* p = success->appendNew<PatchpointValue>(proc, Void, Origin());
+ for (Value* v : args)
+ p->append(v, ValueRep::SomeRegister);
+ p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+ }
+
+ {
+ // Now require the original patchpoint to be materialized into a register.
+ PatchpointValue* p = success->appendNew<PatchpointValue>(proc, Void, Origin());
+ p->append(patchpoint, ValueRep::SomeRegister);
+ p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+ }
+
+ success->appendNew<Value>(proc, Return, Origin(), success->appendNew<Const32Value>(proc, Origin(), 10));
+
+ slowPath->appendNew<Value>(proc, Return, Origin(), slowPath->appendNew<Const32Value>(proc, Origin(), 20));
+
+ auto code = compile(proc);
+ CHECK_EQ(invoke<int>(*code), 10);
+}
+
+void testTerminalPatchpointThatNeedsToBeSpilled2()
+{
+ // This is a unit test for how FTL's heap allocation fast paths behave.
+ Procedure proc;
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* one = proc.addBlock();
+ BasicBlock* success = proc.addBlock();
+ BasicBlock* slowPath = proc.addBlock();
+
+ Value* arg = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+
+ root->appendNew<Value>(
+ proc, Branch, Origin(), arg);
+ root->appendSuccessor(one);
+ root->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+
+ PatchpointValue* patchpoint = one->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->effects.terminal = true;
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->append(arg, ValueRep::SomeRegister);
+
+ one->appendSuccessor(success);
+ one->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ jit.move(CCallHelpers::TrustedImm32(666), params[0].gpr());
+ auto goToFastPath = jit.branch32(CCallHelpers::Equal, params[1].gpr(), CCallHelpers::TrustedImm32(42));
+ auto jumpToSlow = jit.jump();
+
+ // Make sure the asserts here pass.
+ params.fallsThroughToSuccessor(0);
+ params.fallsThroughToSuccessor(1);
+
+ Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ goToFastPath.linkTo(*labels[0], &jit);
+ jumpToSlow.linkTo(*labels[1], &jit);
+ });
+ });
+
+ Vector<Value*> args;
+ {
+ RegisterSet fillAllGPRsSet = RegisterSet::allGPRs();
+ fillAllGPRsSet.exclude(RegisterSet::stackRegisters());
+ fillAllGPRsSet.exclude(RegisterSet::reservedHardwareRegisters());
+
+ for (unsigned i = 0; i < fillAllGPRsSet.numberOfSetRegisters(); i++)
+ args.append(success->appendNew<Const32Value>(proc, Origin(), i));
+ }
+
+ {
+ // Now force all values into every available register.
+ PatchpointValue* p = success->appendNew<PatchpointValue>(proc, Void, Origin());
+ for (Value* v : args)
+ p->append(v, ValueRep::SomeRegister);
+ p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+ }
+
+ {
+ // Now require the original patchpoint to be materialized into a register.
+ PatchpointValue* p = success->appendNew<PatchpointValue>(proc, Void, Origin());
+ p->append(patchpoint, ValueRep::SomeRegister);
+ p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+ }
+
+ success->appendNew<Value>(proc, Return, Origin(), patchpoint);
+
+ slowPath->appendNew<Value>(proc, Return, Origin(), arg);
+
+ auto original1 = Options::maxB3TailDupBlockSize();
+ auto original2 = Options::maxB3TailDupBlockSuccessors();
+
+ // Tail duplication will break the critical edge we're trying to test because it
+ // will clone the slowPath block for both edges to it!
+ Options::maxB3TailDupBlockSize() = 0;
+ Options::maxB3TailDupBlockSuccessors() = 0;
+
+ auto code = compile(proc);
+ CHECK_EQ(invoke<int>(*code, 1), 1);
+ CHECK_EQ(invoke<int>(*code, 0), 0);
+ CHECK_EQ(invoke<int>(*code, 42), 666);
+
+ Options::maxB3TailDupBlockSize() = original1;
+ Options::maxB3TailDupBlockSuccessors() = original2;
+}
+
+void testPatchpointTerminalReturnValue(bool successIsRare)
+{
+ // This is a unit test for how FTL's heap allocation fast paths behave.
+ Procedure proc;
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* success = proc.addBlock();
+ BasicBlock* slowPath = proc.addBlock();
+ BasicBlock* continuation = proc.addBlock();
+
+ Value* arg = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int32, Origin());
+ patchpoint->effects.terminal = true;
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+
+ if (successIsRare) {
+ root->appendSuccessor(FrequentedBlock(success, FrequencyClass::Rare));
+ root->appendSuccessor(slowPath);
+ } else {
+ root->appendSuccessor(success);
+ root->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+ }
+
+ patchpoint->appendSomeRegister(arg);
+
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ CCallHelpers::Jump jumpToSlow =
+ jit.branch32(CCallHelpers::Above, params[1].gpr(), CCallHelpers::TrustedImm32(42));
+
+ jit.add32(CCallHelpers::TrustedImm32(31), params[1].gpr(), params[0].gpr());
+
+ CCallHelpers::Jump jumpToSuccess;
+ if (!params.fallsThroughToSuccessor(0))
+ jumpToSuccess = jit.jump();
+
+ Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ jumpToSlow.linkTo(*labels[1], &jit);
+ if (jumpToSuccess.isSet())
+ jumpToSuccess.linkTo(*labels[0], &jit);
+ });
+ });
+
+ UpsilonValue* successUpsilon = success->appendNew<UpsilonValue>(proc, Origin(), patchpoint);
+ success->appendNew<Value>(proc, Jump, Origin());
+ success->setSuccessors(continuation);
+
+ UpsilonValue* slowPathUpsilon = slowPath->appendNew<UpsilonValue>(
+ proc, Origin(), slowPath->appendNew<Const32Value>(proc, Origin(), 666));
+ slowPath->appendNew<Value>(proc, Jump, Origin());
+ slowPath->setSuccessors(continuation);
+
+ Value* phi = continuation->appendNew<Value>(proc, Phi, Int32, Origin());
+ successUpsilon->setPhi(phi);
+ slowPathUpsilon->setPhi(phi);
+ continuation->appendNew<Value>(proc, Return, Origin(), phi);
+
+ auto code = compile(proc);
+ CHECK_EQ(invoke<int>(*code, 0), 31);
+ CHECK_EQ(invoke<int>(*code, 1), 32);
+ CHECK_EQ(invoke<int>(*code, 41), 72);
+ CHECK_EQ(invoke<int>(*code, 42), 73);
+ CHECK_EQ(invoke<int>(*code, 43), 666);
+ CHECK_EQ(invoke<int>(*code, -1), 666);
+}
+
+void testMemoryFence()
+{
+ Procedure proc;
+
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNew<FenceValue>(proc, Origin());
+ root->appendNew<Value>(proc, Return, Origin(), root->appendIntConstant(proc, Origin(), Int32, 42));
+
+ auto code = compile(proc);
+ CHECK_EQ(invoke<int>(*code), 42);
+ if (isX86())
+ checkUsesInstruction(*code, "lock or $0x0, (%rsp)");
+ if (isARM64())
+ checkUsesInstruction(*code, "dmb ish");
+ checkDoesNotUseInstruction(*code, "mfence");
+ checkDoesNotUseInstruction(*code, "dmb ishst");
+}
+
+void testStoreFence()
+{
+ Procedure proc;
+
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNew<FenceValue>(proc, Origin(), HeapRange::top(), HeapRange());
+ root->appendNew<Value>(proc, Return, Origin(), root->appendIntConstant(proc, Origin(), Int32, 42));
+
+ auto code = compile(proc);
+ CHECK_EQ(invoke<int>(*code), 42);
+ checkDoesNotUseInstruction(*code, "lock");
+ checkDoesNotUseInstruction(*code, "mfence");
+ if (isARM64())
+ checkUsesInstruction(*code, "dmb ishst");
+}
+
+void testLoadFence()
+{
+ Procedure proc;
+
+ BasicBlock* root = proc.addBlock();
+
+ root->appendNew<FenceValue>(proc, Origin(), HeapRange(), HeapRange::top());
+ root->appendNew<Value>(proc, Return, Origin(), root->appendIntConstant(proc, Origin(), Int32, 42));
+
+ auto code = compile(proc);
+ CHECK_EQ(invoke<int>(*code), 42);
+ checkDoesNotUseInstruction(*code, "lock");
+ checkDoesNotUseInstruction(*code, "mfence");
+ if (isARM64())
+ checkUsesInstruction(*code, "dmb ish");
+ checkDoesNotUseInstruction(*code, "dmb ishst");
+}
+
+void testTrappingLoad()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int x = 42;
+ MemoryValue* value = root->appendNew<MemoryValue>(
+ proc, trapping(Load), Int32, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &x));
+ Effects expectedEffects;
+ expectedEffects.exitsSideways = true;
+ expectedEffects.controlDependent= true;
+ expectedEffects.reads = HeapRange::top();
+ CHECK_EQ(value->range(), HeapRange::top());
+ CHECK_EQ(value->effects(), expectedEffects);
+ value->setRange(HeapRange(0));
+ CHECK_EQ(value->range(), HeapRange(0));
+ CHECK_EQ(value->effects(), expectedEffects); // We still reads top!
+ root->appendNew<Value>(proc, Return, Origin(), value);
+ CHECK_EQ(compileAndRun<int>(proc), 42);
+ unsigned trapsCount = 0;
+ for (Air::BasicBlock* block : proc.code()) {
+ for (Air::Inst& inst : *block) {
+ if (inst.kind.traps)
+ trapsCount++;
+ }
+ }
+ CHECK_EQ(trapsCount, 1u);
+}
+
+void testTrappingStore()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int x = 42;
+ MemoryValue* value = root->appendNew<MemoryValue>(
+ proc, trapping(Store), Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 111),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &x));
+ Effects expectedEffects;
+ expectedEffects.exitsSideways = true;
+ expectedEffects.controlDependent= true;
+ expectedEffects.reads = HeapRange::top();
+ expectedEffects.writes = HeapRange::top();
+ CHECK_EQ(value->range(), HeapRange::top());
+ CHECK_EQ(value->effects(), expectedEffects);
+ value->setRange(HeapRange(0));
+ CHECK_EQ(value->range(), HeapRange(0));
+ expectedEffects.writes = HeapRange(0);
+ CHECK_EQ(value->effects(), expectedEffects); // We still reads top!
+ root->appendNew<Value>(proc, Return, Origin());
+ compileAndRun<int>(proc);
+ CHECK_EQ(x, 111);
+ unsigned trapsCount = 0;
+ for (Air::BasicBlock* block : proc.code()) {
+ for (Air::Inst& inst : *block) {
+ if (inst.kind.traps)
+ trapsCount++;
+ }
+ }
+ CHECK_EQ(trapsCount, 1u);
+}
+
+void testTrappingLoadAddStore()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int x = 42;
+ ConstPtrValue* ptr = root->appendNew<ConstPtrValue>(proc, Origin(), &x);
+ root->appendNew<MemoryValue>(
+ proc, trapping(Store), Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<MemoryValue>(proc, trapping(Load), Int32, Origin(), ptr),
+ root->appendNew<Const32Value>(proc, Origin(), 3)),
+ ptr);
+ root->appendNew<Value>(proc, Return, Origin());
+ compileAndRun<int>(proc);
+ CHECK_EQ(x, 45);
+ bool traps = false;
+ for (Air::BasicBlock* block : proc.code()) {
+ for (Air::Inst& inst : *block) {
+ if (inst.kind.traps)
+ traps = true;
+ }
+ }
+ CHECK(traps);
+}
+
+void testTrappingLoadDCE()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int x = 42;
+ root->appendNew<MemoryValue>(
+ proc, trapping(Load), Int32, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), &x));
+ root->appendNew<Value>(proc, Return, Origin());
+ compileAndRun<int>(proc);
+ unsigned trapsCount = 0;
+ for (Air::BasicBlock* block : proc.code()) {
+ for (Air::Inst& inst : *block) {
+ if (inst.kind.traps)
+ trapsCount++;
+ }
+ }
+ CHECK_EQ(trapsCount, 1u);
+}
+
+void testTrappingStoreElimination()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ int x = 42;
+ Value* ptr = root->appendNew<ConstPtrValue>(proc, Origin(), &x);
+ root->appendNew<MemoryValue>(
+ proc, trapping(Store), Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 43),
+ ptr);
+ root->appendNew<MemoryValue>(
+ proc, trapping(Store), Origin(),
+ root->appendNew<Const32Value>(proc, Origin(), 44),
+ ptr);
+ root->appendNew<Value>(proc, Return, Origin());
+ compileAndRun<int>(proc);
+ unsigned storeCount = 0;
+ for (Value* value : proc.values()) {
+ if (MemoryValue::isStore(value->opcode()))
+ storeCount++;
+ }
+ CHECK_EQ(storeCount, 2u);
+}
+
+void testMoveConstants()
+{
+ auto check = [] (Procedure& proc) {
+ proc.resetReachability();
+
+ if (shouldBeVerbose()) {
+ dataLog("IR before:\n");
+ dataLog(proc);
+ }
+
+ moveConstants(proc);
+
+ if (shouldBeVerbose()) {
+ dataLog("IR after:\n");
+ dataLog(proc);
+ }
+
+ UseCounts useCounts(proc);
+ unsigned count = 0;
+ for (Value* value : proc.values()) {
+ if (useCounts.numUses(value) && value->hasInt64())
+ count++;
+ }
+
+ if (count == 1)
+ return;
+
+ crashLock.lock();
+ dataLog("Fail in testMoveConstants: got more than one Const64:\n");
+ dataLog(proc);
+ CRASH();
+ };
+
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* a = root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0x123412341234));
+ Value* b = root->appendNew<MemoryValue>(
+ proc, Load, pointerType(), Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0x123412341334));
+ root->appendNew<CCallValue>(proc, Void, Origin(), a, b);
+ root->appendNew<Value>(proc, Return, Origin());
+ check(proc);
+ }
+
+ {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* x = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* a = root->appendNew<Value>(
+ proc, Add, Origin(), x, root->appendNew<ConstPtrValue>(proc, Origin(), 0x123412341234));
+ Value* b = root->appendNew<Value>(
+ proc, Add, Origin(), x, root->appendNew<ConstPtrValue>(proc, Origin(), -0x123412341234));
+ root->appendNew<CCallValue>(proc, Void, Origin(), a, b);
+ root->appendNew<Value>(proc, Return, Origin());
+ check(proc);
+ }
+}
+
+void testPCOriginMapDoesntInsertNops()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ CCallHelpers::Label watchpointLabel;
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ watchpointLabel = jit.watchpointLabel();
+ });
+
+ patchpoint = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ CCallHelpers::Label labelIgnoringWatchpoints = jit.labelIgnoringWatchpoints();
+
+ CHECK(watchpointLabel == labelIgnoringWatchpoints);
+ });
+
+ root->appendNew<Value>(proc, Return, Origin());
+
+ compile(proc);
+}
+
+void testPinRegisters()
+{
+ auto go = [&] (bool pin) {
+ Procedure proc;
+ RegisterSet csrs;
+ csrs.merge(RegisterSet::calleeSaveRegisters());
+ csrs.exclude(RegisterSet::stackRegisters());
+ if (pin) {
+ csrs.forEach(
+ [&] (Reg reg) {
+ proc.pinRegister(reg);
+ });
+ }
+ BasicBlock* root = proc.addBlock();
+ Value* a = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* b = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* c = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR2);
+ Value* d = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::regCS0);
+ root->appendNew<CCallValue>(
+ proc, Void, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), static_cast<intptr_t>(0x1234)));
+ root->appendNew<CCallValue>(
+ proc, Void, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), static_cast<intptr_t>(0x1235)),
+ a, b, c);
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ patchpoint->appendSomeRegister(d);
+ patchpoint->setGenerator(
+ [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+ CHECK_EQ(params[0].gpr(), GPRInfo::regCS0);
+ });
+ root->appendNew<Value>(proc, Return, Origin());
+ auto code = compile(proc);
+ bool usesCSRs = false;
+ for (Air::BasicBlock* block : proc.code()) {
+ for (Air::Inst& inst : *block) {
+ if (inst.kind.opcode == Air::Patch && inst.origin == patchpoint)
+ continue;
+ inst.forEachTmpFast(
+ [&] (Air::Tmp tmp) {
+ if (tmp.isReg())
+ usesCSRs |= csrs.get(tmp.reg());
+ });
+ }
+ }
+ for (const RegisterAtOffset& regAtOffset : proc.calleeSaveRegisters())
+ usesCSRs |= csrs.get(regAtOffset.reg());
+ CHECK_EQ(usesCSRs, !pin);
+ };
+
+ go(true);
+ go(false);
+}
+
+void testX86LeaAddAddShlLeft()
+{
+ // Add(Add(Shl(@x, $c), @y), $d)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 2)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 100));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ checkUsesInstruction(*code, "lea 0x64(%rdi,%rsi,4), %rax");
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), (1 + (2 << 2)) + 100);
+}
+
+void testX86LeaAddAddShlRight()
+{
+ // Add(Add(@x, Shl(@y, $c)), $d)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 2))),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 100));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ checkUsesInstruction(*code, "lea 0x64(%rdi,%rsi,4), %rax");
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), (1 + (2 << 2)) + 100);
+}
+
+void testX86LeaAddAdd()
+{
+ // Add(Add(@x, @y), $c)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0)),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 100));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ checkDisassembly(
+ *code,
+ [&] (const char* disassembly) -> bool {
+ return strstr(disassembly, "lea 0x64(%rdi,%rsi), %rax")
+ || strstr(disassembly, "lea 0x64(%rsi,%rdi), %rax");
+ },
+ "Expected to find something like lea 0x64(%rdi,%rsi), %rax but didn't!");
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), (1 + 2) + 100);
+}
+
+void testX86LeaAddShlRight()
+{
+ // Add(Shl(@x, $c), @y)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 2)));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ checkUsesInstruction(*code, "lea (%rdi,%rsi,4), %rax");
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), 1 + (2 << 2));
+}
+
+void testX86LeaAddShlLeftScale1()
+{
+ // Add(Shl(@x, $c), @y)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 0)));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ checkDisassembly(
+ *code,
+ [&] (const char* disassembly) -> bool {
+ return strstr(disassembly, "lea (%rdi,%rsi), %rax")
+ || strstr(disassembly, "lea (%rsi,%rdi), %rax");
+ },
+ "Expected to find something like lea (%rdi,%rsi), %rax but didn't!");
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), 1 + 2);
+}
+
+void testX86LeaAddShlLeftScale2()
+{
+ // Add(Shl(@x, $c), @y)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 1)));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ checkUsesInstruction(*code, "lea (%rdi,%rsi,2), %rax");
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), 1 + (2 << 1));
+}
+
+void testX86LeaAddShlLeftScale4()
+{
+ // Add(Shl(@x, $c), @y)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 2)),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ checkUsesInstruction(*code, "lea (%rdi,%rsi,4), %rax");
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), 1 + (2 << 2));
+}
+
+void testX86LeaAddShlLeftScale8()
+{
+ // Add(Shl(@x, $c), @y)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 3)));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ checkUsesInstruction(*code, "lea (%rdi,%rsi,8), %rax");
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), 1 + (2 << 3));
+}
+
+void testAddShl32()
+{
+ // Add(Shl(@x, $c), @y)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 32)));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), 1 + (static_cast<intptr_t>(2) << static_cast<intptr_t>(32)));
+}
+
+void testAddShl64()
+{
+ // Add(Shl(@x, $c), @y)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 64)));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), 1 + 2);
+}
+
+void testAddShl65()
+{
+ // Add(Shl(@x, $c), @y)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* result = root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 65)));
+ root->appendNew<Value>(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ CHECK_EQ(invoke<intptr_t>(*code, 1, 2), 1 + (2 << 1));
+}
+
+void testReduceStrengthReassociation(bool flip)
+{
+ // Add(Add(@x, $c), @y) -> Add(Add(@x, @y), $c)
+ // and
+ // Add(@y, Add(@x, $c)) -> Add(Add(@x, @y), $c)
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+
+ Value* innerAdd = root->appendNew<Value>(
+ proc, Add, Origin(), arg1,
+ root->appendNew<ConstPtrValue>(proc, Origin(), 42));
+
+ Value* outerAdd;
+ if (flip)
+ outerAdd = root->appendNew<Value>(proc, Add, Origin(), arg2, innerAdd);
+ else
+ outerAdd = root->appendNew<Value>(proc, Add, Origin(), innerAdd, arg2);
+
+ root->appendNew<Value>(proc, Return, Origin(), outerAdd);
+
+ proc.resetReachability();
+
+ if (shouldBeVerbose()) {
+ dataLog("IR before reduceStrength:\n");
+ dataLog(proc);
+ }
+
+ reduceStrength(proc);
+
+ if (shouldBeVerbose()) {
+ dataLog("IR after reduceStrength:\n");
+ dataLog(proc);
+ }
+
+ CHECK_EQ(root->last()->opcode(), Return);
+ CHECK_EQ(root->last()->child(0)->opcode(), Add);
+ CHECK(root->last()->child(0)->child(1)->isIntPtr(42));
+ CHECK_EQ(root->last()->child(0)->child(0)->opcode(), Add);
+ CHECK(
+ (root->last()->child(0)->child(0)->child(0) == arg1 && root->last()->child(0)->child(0)->child(1) == arg2) ||
+ (root->last()->child(0)->child(0)->child(0) == arg2 && root->last()->child(0)->child(0)->child(1) == arg1));
+}
+
+void testLoadBaseIndexShift2()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNew<Value>(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 2)))));
+ auto code = compile(proc);
+ if (isX86())
+ checkUsesInstruction(*code, "(%rdi,%rsi,4)");
+ int32_t value = 12341234;
+ char* ptr = bitwise_cast<char*>(&value);
+ for (unsigned i = 0; i < 10; ++i)
+ CHECK_EQ(invoke<int32_t>(*code, ptr - (static_cast<intptr_t>(1) << static_cast<intptr_t>(2)) * i, i), 12341234);
+}
+
+void testLoadBaseIndexShift32()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNew<Value>(
+ proc, Return, Origin(),
+ root->appendNew<MemoryValue>(
+ proc, Load, Int32, Origin(),
+ root->appendNew<Value>(
+ proc, Add, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0),
+ root->appendNew<Value>(
+ proc, Shl, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1),
+ root->appendNew<Const32Value>(proc, Origin(), 32)))));
+ auto code = compile(proc);
+ int32_t value = 12341234;
+ char* ptr = bitwise_cast<char*>(&value);
+ for (unsigned i = 0; i < 10; ++i)
+ CHECK_EQ(invoke<int32_t>(*code, ptr - (static_cast<intptr_t>(1) << static_cast<intptr_t>(32)) * i, i), 12341234);
+}
+
+void testOptimizeMaterialization()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ root->appendNew<CCallValue>(
+ proc, Void, Origin(),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0x123423453456llu),
+ root->appendNew<ConstPtrValue>(proc, Origin(), 0x123423453456llu + 35));
+ root->appendNew<Value>(proc, Return, Origin());
+
+ auto code = compile(proc);
+ bool found = false;
+ for (Air::BasicBlock* block : proc.code()) {
+ for (Air::Inst& inst : *block) {
+ if (inst.kind.opcode != Air::Add64)
+ continue;
+ if (inst.args[0] != Air::Arg::imm(35))
+ continue;
+ found = true;
+ }
+ }
+ CHECK(found);
+}
+
+void testWasmBoundsCheck(unsigned offset)
+{
+ Procedure proc;
+ GPRReg pinned = GPRInfo::argumentGPR1;
+ proc.pinRegister(pinned);
+
+ proc.setWasmBoundsCheckGenerator([=] (CCallHelpers& jit, GPRReg pinnedGPR, unsigned actualOffset) {
+ CHECK_EQ(pinnedGPR, pinned);
+ CHECK_EQ(actualOffset, offset);
+
+ // This should always work because a function this simple should never have callee
+ // saves.
+ jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+ jit.ret();
+ });
+
+ BasicBlock* root = proc.addBlock();
+ Value* left = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ if (pointerType() != Int32)
+ left = root->appendNew<Value>(proc, Trunc, Origin(), left);
+ root->appendNew<WasmBoundsCheckValue>(proc, Origin(), left, pinned, offset);
+ Value* result = root->appendNew<Const32Value>(proc, Origin(), 0x42);
+ root->appendNewControlValue(proc, Return, Origin(), result);
+
+ auto code = compile(proc);
+ CHECK_EQ(invoke<int32_t>(*code, 1, 2 + offset), 0x42);
+ CHECK_EQ(invoke<int32_t>(*code, 3, 2 + offset), 42);
+ CHECK_EQ(invoke<int32_t>(*code, 2, 2 + offset), 42);
+}
+
+void testWasmAddress()
+{
+ Procedure proc;
+ GPRReg pinnedGPR = GPRInfo::argumentGPR2;
+ proc.pinRegister(pinnedGPR);
+
+ unsigned loopCount = 100;
+ Vector<unsigned> values(loopCount);
+ unsigned numToStore = 42;
+
+ BasicBlock* root = proc.addBlock();
+ BasicBlock* header = proc.addBlock();
+ BasicBlock* body = proc.addBlock();
+ BasicBlock* continuation = proc.addBlock();
+
+ // Root
+ Value* loopCountValue = root->appendNew<Value>(proc, Trunc, Origin(), root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* valueToStore = root->appendNew<Value>(proc, Trunc, Origin(), root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+ UpsilonValue* beginUpsilon = root->appendNew<UpsilonValue>(proc, Origin(), root->appendNew<Const32Value>(proc, Origin(), 0));
+ root->appendNewControlValue(proc, Jump, Origin(), header);
+
+ // Header
+ Value* indexPhi = header->appendNew<Value>(proc, Phi, Int32, Origin());
+ header->appendNewControlValue(proc, Branch, Origin(),
+ header->appendNew<Value>(proc, Below, Origin(), indexPhi, loopCountValue),
+ body, continuation);
+
+ // Body
+ Value* pointer = body->appendNew<Value>(proc, Mul, Origin(), indexPhi,
+ body->appendNew<Const32Value>(proc, Origin(), sizeof(unsigned)));
+ pointer = body->appendNew<Value>(proc, ZExt32, Origin(), pointer);
+ body->appendNew<MemoryValue>(proc, Store, Origin(), valueToStore,
+ body->appendNew<WasmAddressValue>(proc, Origin(), pointer, pinnedGPR));
+ UpsilonValue* incUpsilon = body->appendNew<UpsilonValue>(proc, Origin(),
+ body->appendNew<Value>(proc, Add, Origin(), indexPhi,
+ body->appendNew<Const32Value>(proc, Origin(), 1)));
+ body->appendNewControlValue(proc, Jump, Origin(), header);
+
+ // Continuation
+ continuation->appendNewControlValue(proc, Return, Origin());
+
+ beginUpsilon->setPhi(indexPhi);
+ incUpsilon->setPhi(indexPhi);
+
+
+ auto code = compile(proc);
+ invoke<void>(*code, loopCount, numToStore, values.data());
+ for (unsigned value : values)
+ CHECK_EQ(numToStore, value);
+}
+
+// Make sure the compiler does not try to optimize anything out.
+NEVER_INLINE double zero()
+{
+ return 0.;
+}
+
+double negativeZero()
+{
+ return -zero();
+}
+
+#define RUN(test) do { \
+ if (!shouldRun(#test)) \
+ break; \
+ tasks.append( \
+ createSharedTask<void()>( \
+ [&] () { \
+ dataLog(#test "...\n"); \
+ test; \
+ dataLog(#test ": OK!\n"); \
+ })); \
+ } while (false);
+
+#define RUN_UNARY(test, values) \
+ for (auto a : values) { \
+ CString testStr = toCString(#test, "(", a.name, ")"); \
+ if (!shouldRun(testStr.data())) \
+ continue; \
+ tasks.append(createSharedTask<void()>( \
+ [=] () { \
+ dataLog(toCString(testStr, "...\n")); \
+ test(a.value); \
+ dataLog(toCString(testStr, ": OK!\n")); \
+ })); \
+ }
+
+#define RUN_BINARY(test, valuesA, valuesB) \
+ for (auto a : valuesA) { \
+ for (auto b : valuesB) { \
+ CString testStr = toCString(#test, "(", a.name, ", ", b.name, ")"); \
+ if (!shouldRun(testStr.data())) \
+ continue; \
+ tasks.append(createSharedTask<void()>( \
+ [=] () { \
+ dataLog(toCString(testStr, "...\n")); \
+ test(a.value, b.value); \
+ dataLog(toCString(testStr, ": OK!\n")); \
+ })); \
+ } \
+ }
+
+void run(const char* filter)
+{
+ JSC::initializeThreading();
+ vm = &VM::create(LargeHeap).leakRef();
+
+ Deque<RefPtr<SharedTask<void()>>> tasks;
+
+ auto shouldRun = [&] (const char* testName) -> bool {
+ return !filter || !!strcasestr(testName, filter);
+ };
+
+ // We run this test first because it fiddles with some
+ // JSC options.
+ testTerminalPatchpointThatNeedsToBeSpilled2();
+
+ RUN(test42());
+ RUN(testLoad42());
+ RUN(testLoadOffsetImm9Max());
+ RUN(testLoadOffsetImm9MaxPlusOne());
+ RUN(testLoadOffsetImm9MaxPlusTwo());
+ RUN(testLoadOffsetImm9Min());
+ RUN(testLoadOffsetImm9MinMinusOne());
+ RUN(testLoadOffsetScaledUnsignedImm12Max());
+ RUN(testLoadOffsetScaledUnsignedOverImm12Max());
+ RUN(testArg(43));
+ RUN(testReturnConst64(5));
+ RUN(testReturnConst64(-42));
+ RUN(testReturnVoid());
+
+ RUN(testAddArg(111));
+ RUN(testAddArgs(1, 1));
+ RUN(testAddArgs(1, 2));
+ RUN(testAddArgImm(1, 2));
+ RUN(testAddArgImm(0, 2));
+ RUN(testAddArgImm(1, 0));
+ RUN(testAddImmArg(1, 2));
+ RUN(testAddImmArg(0, 2));
+ RUN(testAddImmArg(1, 0));
+ RUN_BINARY(testAddArgMem, int64Operands(), int64Operands());
+ RUN_BINARY(testAddMemArg, int64Operands(), int64Operands());
+ RUN_BINARY(testAddImmMem, int64Operands(), int64Operands());
+ RUN_UNARY(testAddArg32, int32Operands());
+ RUN(testAddArgs32(1, 1));
+ RUN(testAddArgs32(1, 2));
+ RUN_BINARY(testAddArgMem32, int32Operands(), int32Operands());
+ RUN_BINARY(testAddMemArg32, int32Operands(), int32Operands());
+ RUN_BINARY(testAddImmMem32, int32Operands(), int32Operands());
+ RUN(testAddArgZeroImmZDef());
+ RUN(testAddLoadTwice());
+
+ RUN(testAddArgDouble(M_PI));
+ RUN(testAddArgsDouble(M_PI, 1));
+ RUN(testAddArgsDouble(M_PI, -M_PI));
+ RUN(testAddArgImmDouble(M_PI, 1));
+ RUN(testAddArgImmDouble(M_PI, 0));
+ RUN(testAddArgImmDouble(M_PI, negativeZero()));
+ RUN(testAddArgImmDouble(0, 0));
+ RUN(testAddArgImmDouble(0, negativeZero()));
+ RUN(testAddArgImmDouble(negativeZero(), 0));
+ RUN(testAddArgImmDouble(negativeZero(), negativeZero()));
+ RUN(testAddImmArgDouble(M_PI, 1));
+ RUN(testAddImmArgDouble(M_PI, 0));
+ RUN(testAddImmArgDouble(M_PI, negativeZero()));
+ RUN(testAddImmArgDouble(0, 0));
+ RUN(testAddImmArgDouble(0, negativeZero()));
+ RUN(testAddImmArgDouble(negativeZero(), 0));
+ RUN(testAddImmArgDouble(negativeZero(), negativeZero()));
+ RUN(testAddImmsDouble(M_PI, 1));
+ RUN(testAddImmsDouble(M_PI, 0));
+ RUN(testAddImmsDouble(M_PI, negativeZero()));
+ RUN(testAddImmsDouble(0, 0));
+ RUN(testAddImmsDouble(0, negativeZero()));
+ RUN(testAddImmsDouble(negativeZero(), negativeZero()));
+ RUN_UNARY(testAddArgFloat, floatingPointOperands<float>());
+ RUN_BINARY(testAddArgsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testAddFPRArgsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testAddArgImmFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testAddImmArgFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testAddImmsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_UNARY(testAddArgFloatWithUselessDoubleConversion, floatingPointOperands<float>());
+ RUN_BINARY(testAddArgsFloatWithUselessDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testAddArgsFloatWithEffectfulDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+
+ RUN(testMulArg(5));
+ RUN(testMulAddArg(5));
+ RUN(testMulAddArg(85));
+ RUN(testMulArgStore(5));
+ RUN(testMulArgStore(85));
+ RUN(testMulArgs(1, 1));
+ RUN(testMulArgs(1, 2));
+ RUN(testMulArgs(3, 3));
+ RUN(testMulArgImm(1, 2));
+ RUN(testMulArgImm(1, 4));
+ RUN(testMulArgImm(1, 8));
+ RUN(testMulArgImm(1, 16));
+ RUN(testMulArgImm(1, 0x80000000llu));
+ RUN(testMulArgImm(1, 0x800000000000llu));
+ RUN(testMulArgImm(7, 2));
+ RUN(testMulArgImm(7, 4));
+ RUN(testMulArgImm(7, 8));
+ RUN(testMulArgImm(7, 16));
+ RUN(testMulArgImm(7, 0x80000000llu));
+ RUN(testMulArgImm(7, 0x800000000000llu));
+ RUN(testMulArgImm(-42, 2));
+ RUN(testMulArgImm(-42, 4));
+ RUN(testMulArgImm(-42, 8));
+ RUN(testMulArgImm(-42, 16));
+ RUN(testMulArgImm(-42, 0x80000000llu));
+ RUN(testMulArgImm(-42, 0x800000000000llu));
+ RUN(testMulArgImm(0, 2));
+ RUN(testMulArgImm(1, 0));
+ RUN(testMulArgImm(3, 3));
+ RUN(testMulArgImm(3, -1));
+ RUN(testMulArgImm(-3, -1));
+ RUN(testMulArgImm(0, -1));
+ RUN(testMulImmArg(1, 2));
+ RUN(testMulImmArg(0, 2));
+ RUN(testMulImmArg(1, 0));
+ RUN(testMulImmArg(3, 3));
+ RUN(testMulArgs32(1, 1));
+ RUN(testMulArgs32(1, 2));
+ RUN(testMulLoadTwice());
+ RUN(testMulAddArgsLeft());
+ RUN(testMulAddArgsRight());
+ RUN(testMulAddArgsLeft32());
+ RUN(testMulAddArgsRight32());
+ RUN(testMulSubArgsLeft());
+ RUN(testMulSubArgsRight());
+ RUN(testMulSubArgsLeft32());
+ RUN(testMulSubArgsRight32());
+ RUN(testMulNegArgs());
+ RUN(testMulNegArgs32());
+
+ RUN_UNARY(testMulArgDouble, floatingPointOperands<double>());
+ RUN_BINARY(testMulArgsDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testMulArgImmDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testMulImmArgDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testMulImmsDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_UNARY(testMulArgFloat, floatingPointOperands<float>());
+ RUN_BINARY(testMulArgsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testMulArgImmFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testMulImmArgFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testMulImmsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_UNARY(testMulArgFloatWithUselessDoubleConversion, floatingPointOperands<float>());
+ RUN_BINARY(testMulArgsFloatWithUselessDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testMulArgsFloatWithEffectfulDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+
+ RUN(testDivArgDouble(M_PI));
+ RUN(testDivArgsDouble(M_PI, 1));
+ RUN(testDivArgsDouble(M_PI, -M_PI));
+ RUN(testDivArgImmDouble(M_PI, 1));
+ RUN(testDivArgImmDouble(M_PI, 0));
+ RUN(testDivArgImmDouble(M_PI, negativeZero()));
+ RUN(testDivArgImmDouble(0, 0));
+ RUN(testDivArgImmDouble(0, negativeZero()));
+ RUN(testDivArgImmDouble(negativeZero(), 0));
+ RUN(testDivArgImmDouble(negativeZero(), negativeZero()));
+ RUN(testDivImmArgDouble(M_PI, 1));
+ RUN(testDivImmArgDouble(M_PI, 0));
+ RUN(testDivImmArgDouble(M_PI, negativeZero()));
+ RUN(testDivImmArgDouble(0, 0));
+ RUN(testDivImmArgDouble(0, negativeZero()));
+ RUN(testDivImmArgDouble(negativeZero(), 0));
+ RUN(testDivImmArgDouble(negativeZero(), negativeZero()));
+ RUN(testDivImmsDouble(M_PI, 1));
+ RUN(testDivImmsDouble(M_PI, 0));
+ RUN(testDivImmsDouble(M_PI, negativeZero()));
+ RUN(testDivImmsDouble(0, 0));
+ RUN(testDivImmsDouble(0, negativeZero()));
+ RUN(testDivImmsDouble(negativeZero(), negativeZero()));
+ RUN_UNARY(testDivArgFloat, floatingPointOperands<float>());
+ RUN_BINARY(testDivArgsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testDivArgImmFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testDivImmArgFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testDivImmsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_UNARY(testDivArgFloatWithUselessDoubleConversion, floatingPointOperands<float>());
+ RUN_BINARY(testDivArgsFloatWithUselessDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testDivArgsFloatWithEffectfulDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+
+ RUN_BINARY(testUDivArgsInt32, int32Operands(), int32Operands());
+ RUN_BINARY(testUDivArgsInt64, int64Operands(), int64Operands());
+
+ RUN_UNARY(testModArgDouble, floatingPointOperands<double>());
+ RUN_BINARY(testModArgsDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testModArgImmDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testModImmArgDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testModImmsDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_UNARY(testModArgFloat, floatingPointOperands<float>());
+ RUN_BINARY(testModArgsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testModArgImmFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testModImmArgFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testModImmsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+
+ RUN_BINARY(testUModArgsInt32, int32Operands(), int32Operands());
+ RUN_BINARY(testUModArgsInt64, int64Operands(), int64Operands());
+
+ RUN(testSubArg(24));
+ RUN(testSubArgs(1, 1));
+ RUN(testSubArgs(1, 2));
+ RUN(testSubArgs(13, -42));
+ RUN(testSubArgs(-13, 42));
+ RUN(testSubArgImm(1, 1));
+ RUN(testSubArgImm(1, 2));
+ RUN(testSubArgImm(13, -42));
+ RUN(testSubArgImm(-13, 42));
+ RUN(testSubArgImm(42, 0));
+ RUN(testSubImmArg(1, 1));
+ RUN(testSubImmArg(1, 2));
+ RUN(testSubImmArg(13, -42));
+ RUN(testSubImmArg(-13, 42));
+ RUN_BINARY(testSubArgMem, int64Operands(), int64Operands());
+ RUN_BINARY(testSubMemArg, int64Operands(), int64Operands());
+ RUN_BINARY(testSubImmMem, int32Operands(), int32Operands());
+ RUN_BINARY(testSubMemImm, int32Operands(), int32Operands());
+ RUN_UNARY(testNegValueSubOne, int32Operands());
+
+ RUN(testSubArgs32(1, 1));
+ RUN(testSubArgs32(1, 2));
+ RUN(testSubArgs32(13, -42));
+ RUN(testSubArgs32(-13, 42));
+ RUN(testSubArgImm32(1, 1));
+ RUN(testSubArgImm32(1, 2));
+ RUN(testSubArgImm32(13, -42));
+ RUN(testSubArgImm32(-13, 42));
+ RUN(testSubImmArg32(1, 1));
+ RUN(testSubImmArg32(1, 2));
+ RUN(testSubImmArg32(13, -42));
+ RUN(testSubImmArg32(-13, 42));
+ RUN_BINARY(testSubArgMem32, int32Operands(), int32Operands());
+ RUN_BINARY(testSubMemArg32, int32Operands(), int32Operands());
+ RUN_BINARY(testSubImmMem32, int32Operands(), int32Operands());
+ RUN_BINARY(testSubMemImm32, int32Operands(), int32Operands());
+ RUN_UNARY(testNegValueSubOne32, int64Operands());
+
+ RUN_UNARY(testSubArgDouble, floatingPointOperands<double>());
+ RUN_BINARY(testSubArgsDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testSubArgImmDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testSubImmArgDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testSubImmsDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_UNARY(testSubArgFloat, floatingPointOperands<float>());
+ RUN_BINARY(testSubArgsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testSubArgImmFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testSubImmArgFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testSubImmsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_UNARY(testSubArgFloatWithUselessDoubleConversion, floatingPointOperands<float>());
+ RUN_BINARY(testSubArgsFloatWithUselessDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testSubArgsFloatWithEffectfulDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+
+ RUN_UNARY(testNegDouble, floatingPointOperands<double>());
+ RUN_UNARY(testNegFloat, floatingPointOperands<float>());
+ RUN_UNARY(testNegFloatWithUselessDoubleConversion, floatingPointOperands<float>());
+
+ RUN(testBitAndArgs(43, 43));
+ RUN(testBitAndArgs(43, 0));
+ RUN(testBitAndArgs(10, 3));
+ RUN(testBitAndArgs(42, 0xffffffffffffffff));
+ RUN(testBitAndSameArg(43));
+ RUN(testBitAndSameArg(0));
+ RUN(testBitAndSameArg(3));
+ RUN(testBitAndSameArg(0xffffffffffffffff));
+ RUN(testBitAndImms(43, 43));
+ RUN(testBitAndImms(43, 0));
+ RUN(testBitAndImms(10, 3));
+ RUN(testBitAndImms(42, 0xffffffffffffffff));
+ RUN(testBitAndArgImm(43, 43));
+ RUN(testBitAndArgImm(43, 0));
+ RUN(testBitAndArgImm(10, 3));
+ RUN(testBitAndArgImm(42, 0xffffffffffffffff));
+ RUN(testBitAndArgImm(42, 0xff));
+ RUN(testBitAndArgImm(300, 0xff));
+ RUN(testBitAndArgImm(-300, 0xff));
+ RUN(testBitAndArgImm(42, 0xffff));
+ RUN(testBitAndArgImm(40000, 0xffff));
+ RUN(testBitAndArgImm(-40000, 0xffff));
+ RUN(testBitAndImmArg(43, 43));
+ RUN(testBitAndImmArg(43, 0));
+ RUN(testBitAndImmArg(10, 3));
+ RUN(testBitAndImmArg(42, 0xffffffffffffffff));
+ RUN(testBitAndBitAndArgImmImm(2, 7, 3));
+ RUN(testBitAndBitAndArgImmImm(1, 6, 6));
+ RUN(testBitAndBitAndArgImmImm(0xffff, 24, 7));
+ RUN(testBitAndImmBitAndArgImm(7, 2, 3));
+ RUN(testBitAndImmBitAndArgImm(6, 1, 6));
+ RUN(testBitAndImmBitAndArgImm(24, 0xffff, 7));
+ RUN(testBitAndArgs32(43, 43));
+ RUN(testBitAndArgs32(43, 0));
+ RUN(testBitAndArgs32(10, 3));
+ RUN(testBitAndArgs32(42, 0xffffffff));
+ RUN(testBitAndSameArg32(43));
+ RUN(testBitAndSameArg32(0));
+ RUN(testBitAndSameArg32(3));
+ RUN(testBitAndSameArg32(0xffffffff));
+ RUN(testBitAndImms32(43, 43));
+ RUN(testBitAndImms32(43, 0));
+ RUN(testBitAndImms32(10, 3));
+ RUN(testBitAndImms32(42, 0xffffffff));
+ RUN(testBitAndArgImm32(43, 43));
+ RUN(testBitAndArgImm32(43, 0));
+ RUN(testBitAndArgImm32(10, 3));
+ RUN(testBitAndArgImm32(42, 0xffffffff));
+ RUN(testBitAndImmArg32(43, 43));
+ RUN(testBitAndImmArg32(43, 0));
+ RUN(testBitAndImmArg32(10, 3));
+ RUN(testBitAndImmArg32(42, 0xffffffff));
+ RUN(testBitAndImmArg32(42, 0xff));
+ RUN(testBitAndImmArg32(300, 0xff));
+ RUN(testBitAndImmArg32(-300, 0xff));
+ RUN(testBitAndImmArg32(42, 0xffff));
+ RUN(testBitAndImmArg32(40000, 0xffff));
+ RUN(testBitAndImmArg32(-40000, 0xffff));
+ RUN(testBitAndBitAndArgImmImm32(2, 7, 3));
+ RUN(testBitAndBitAndArgImmImm32(1, 6, 6));
+ RUN(testBitAndBitAndArgImmImm32(0xffff, 24, 7));
+ RUN(testBitAndImmBitAndArgImm32(7, 2, 3));
+ RUN(testBitAndImmBitAndArgImm32(6, 1, 6));
+ RUN(testBitAndImmBitAndArgImm32(24, 0xffff, 7));
+ RUN_BINARY(testBitAndWithMaskReturnsBooleans, int64Operands(), int64Operands());
+ RUN_UNARY(testBitAndArgDouble, floatingPointOperands<double>());
+ RUN_BINARY(testBitAndArgsDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testBitAndArgImmDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testBitAndImmsDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_UNARY(testBitAndArgFloat, floatingPointOperands<float>());
+ RUN_BINARY(testBitAndArgsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBitAndArgImmFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBitAndImmsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBitAndArgsFloatWithUselessDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+
+ RUN(testBitOrArgs(43, 43));
+ RUN(testBitOrArgs(43, 0));
+ RUN(testBitOrArgs(10, 3));
+ RUN(testBitOrArgs(42, 0xffffffffffffffff));
+ RUN(testBitOrSameArg(43));
+ RUN(testBitOrSameArg(0));
+ RUN(testBitOrSameArg(3));
+ RUN(testBitOrSameArg(0xffffffffffffffff));
+ RUN(testBitOrImms(43, 43));
+ RUN(testBitOrImms(43, 0));
+ RUN(testBitOrImms(10, 3));
+ RUN(testBitOrImms(42, 0xffffffffffffffff));
+ RUN(testBitOrArgImm(43, 43));
+ RUN(testBitOrArgImm(43, 0));
+ RUN(testBitOrArgImm(10, 3));
+ RUN(testBitOrArgImm(42, 0xffffffffffffffff));
+ RUN(testBitOrImmArg(43, 43));
+ RUN(testBitOrImmArg(43, 0));
+ RUN(testBitOrImmArg(10, 3));
+ RUN(testBitOrImmArg(42, 0xffffffffffffffff));
+ RUN(testBitOrBitOrArgImmImm(2, 7, 3));
+ RUN(testBitOrBitOrArgImmImm(1, 6, 6));
+ RUN(testBitOrBitOrArgImmImm(0xffff, 24, 7));
+ RUN(testBitOrImmBitOrArgImm(7, 2, 3));
+ RUN(testBitOrImmBitOrArgImm(6, 1, 6));
+ RUN(testBitOrImmBitOrArgImm(24, 0xffff, 7));
+ RUN(testBitOrArgs32(43, 43));
+ RUN(testBitOrArgs32(43, 0));
+ RUN(testBitOrArgs32(10, 3));
+ RUN(testBitOrArgs32(42, 0xffffffff));
+ RUN(testBitOrSameArg32(43));
+ RUN(testBitOrSameArg32(0));
+ RUN(testBitOrSameArg32(3));
+ RUN(testBitOrSameArg32(0xffffffff));
+ RUN(testBitOrImms32(43, 43));
+ RUN(testBitOrImms32(43, 0));
+ RUN(testBitOrImms32(10, 3));
+ RUN(testBitOrImms32(42, 0xffffffff));
+ RUN(testBitOrArgImm32(43, 43));
+ RUN(testBitOrArgImm32(43, 0));
+ RUN(testBitOrArgImm32(10, 3));
+ RUN(testBitOrArgImm32(42, 0xffffffff));
+ RUN(testBitOrImmArg32(43, 43));
+ RUN(testBitOrImmArg32(43, 0));
+ RUN(testBitOrImmArg32(10, 3));
+ RUN(testBitOrImmArg32(42, 0xffffffff));
+ RUN(testBitOrBitOrArgImmImm32(2, 7, 3));
+ RUN(testBitOrBitOrArgImmImm32(1, 6, 6));
+ RUN(testBitOrBitOrArgImmImm32(0xffff, 24, 7));
+ RUN(testBitOrImmBitOrArgImm32(7, 2, 3));
+ RUN(testBitOrImmBitOrArgImm32(6, 1, 6));
+ RUN(testBitOrImmBitOrArgImm32(24, 0xffff, 7));
+ RUN_UNARY(testBitOrArgDouble, floatingPointOperands<double>());
+ RUN_BINARY(testBitOrArgsDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testBitOrArgImmDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testBitOrImmsDouble, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_UNARY(testBitOrArgFloat, floatingPointOperands<float>());
+ RUN_BINARY(testBitOrArgsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBitOrArgImmFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBitOrImmsFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBitOrArgsFloatWithUselessDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+
+ RUN_BINARY(testBitXorArgs, int64Operands(), int64Operands());
+ RUN_UNARY(testBitXorSameArg, int64Operands());
+ RUN_BINARY(testBitXorImms, int64Operands(), int64Operands());
+ RUN_BINARY(testBitXorArgImm, int64Operands(), int64Operands());
+ RUN_BINARY(testBitXorImmArg, int64Operands(), int64Operands());
+ RUN(testBitXorBitXorArgImmImm(2, 7, 3));
+ RUN(testBitXorBitXorArgImmImm(1, 6, 6));
+ RUN(testBitXorBitXorArgImmImm(0xffff, 24, 7));
+ RUN(testBitXorImmBitXorArgImm(7, 2, 3));
+ RUN(testBitXorImmBitXorArgImm(6, 1, 6));
+ RUN(testBitXorImmBitXorArgImm(24, 0xffff, 7));
+ RUN(testBitXorArgs32(43, 43));
+ RUN(testBitXorArgs32(43, 0));
+ RUN(testBitXorArgs32(10, 3));
+ RUN(testBitXorArgs32(42, 0xffffffff));
+ RUN(testBitXorSameArg32(43));
+ RUN(testBitXorSameArg32(0));
+ RUN(testBitXorSameArg32(3));
+ RUN(testBitXorSameArg32(0xffffffff));
+ RUN(testBitXorImms32(43, 43));
+ RUN(testBitXorImms32(43, 0));
+ RUN(testBitXorImms32(10, 3));
+ RUN(testBitXorImms32(42, 0xffffffff));
+ RUN(testBitXorArgImm32(43, 43));
+ RUN(testBitXorArgImm32(43, 0));
+ RUN(testBitXorArgImm32(10, 3));
+ RUN(testBitXorArgImm32(42, 0xffffffff));
+ RUN(testBitXorImmArg32(43, 43));
+ RUN(testBitXorImmArg32(43, 0));
+ RUN(testBitXorImmArg32(10, 3));
+ RUN(testBitXorImmArg32(42, 0xffffffff));
+ RUN(testBitXorBitXorArgImmImm32(2, 7, 3));
+ RUN(testBitXorBitXorArgImmImm32(1, 6, 6));
+ RUN(testBitXorBitXorArgImmImm32(0xffff, 24, 7));
+ RUN(testBitXorImmBitXorArgImm32(7, 2, 3));
+ RUN(testBitXorImmBitXorArgImm32(6, 1, 6));
+ RUN(testBitXorImmBitXorArgImm32(24, 0xffff, 7));
+
+ RUN_UNARY(testBitNotArg, int64Operands());
+ RUN_UNARY(testBitNotImm, int64Operands());
+ RUN_UNARY(testBitNotMem, int64Operands());
+ RUN_UNARY(testBitNotArg32, int32Operands());
+ RUN_UNARY(testBitNotImm32, int32Operands());
+ RUN_UNARY(testBitNotMem32, int32Operands());
+ RUN_BINARY(testBitNotOnBooleanAndBranch32, int32Operands(), int32Operands());
+
+ RUN(testShlArgs(1, 0));
+ RUN(testShlArgs(1, 1));
+ RUN(testShlArgs(1, 62));
+ RUN(testShlArgs(0xffffffffffffffff, 0));
+ RUN(testShlArgs(0xffffffffffffffff, 1));
+ RUN(testShlArgs(0xffffffffffffffff, 63));
+ RUN(testShlImms(1, 0));
+ RUN(testShlImms(1, 1));
+ RUN(testShlImms(1, 62));
+ RUN(testShlImms(1, 65));
+ RUN(testShlImms(0xffffffffffffffff, 0));
+ RUN(testShlImms(0xffffffffffffffff, 1));
+ RUN(testShlImms(0xffffffffffffffff, 63));
+ RUN(testShlArgImm(1, 0));
+ RUN(testShlArgImm(1, 1));
+ RUN(testShlArgImm(1, 62));
+ RUN(testShlArgImm(1, 65));
+ RUN(testShlArgImm(0xffffffffffffffff, 0));
+ RUN(testShlArgImm(0xffffffffffffffff, 1));
+ RUN(testShlArgImm(0xffffffffffffffff, 63));
+ RUN(testShlArg32(2));
+ RUN(testShlArgs32(1, 0));
+ RUN(testShlArgs32(1, 1));
+ RUN(testShlArgs32(1, 62));
+ RUN(testShlImms32(1, 33));
+ RUN(testShlArgs32(0xffffffff, 0));
+ RUN(testShlArgs32(0xffffffff, 1));
+ RUN(testShlArgs32(0xffffffff, 63));
+ RUN(testShlImms32(1, 0));
+ RUN(testShlImms32(1, 1));
+ RUN(testShlImms32(1, 62));
+ RUN(testShlImms32(1, 33));
+ RUN(testShlImms32(0xffffffff, 0));
+ RUN(testShlImms32(0xffffffff, 1));
+ RUN(testShlImms32(0xffffffff, 63));
+ RUN(testShlArgImm32(1, 0));
+ RUN(testShlArgImm32(1, 1));
+ RUN(testShlArgImm32(1, 62));
+ RUN(testShlArgImm32(0xffffffff, 0));
+ RUN(testShlArgImm32(0xffffffff, 1));
+ RUN(testShlArgImm32(0xffffffff, 63));
+
+ RUN(testSShrArgs(1, 0));
+ RUN(testSShrArgs(1, 1));
+ RUN(testSShrArgs(1, 62));
+ RUN(testSShrArgs(0xffffffffffffffff, 0));
+ RUN(testSShrArgs(0xffffffffffffffff, 1));
+ RUN(testSShrArgs(0xffffffffffffffff, 63));
+ RUN(testSShrImms(1, 0));
+ RUN(testSShrImms(1, 1));
+ RUN(testSShrImms(1, 62));
+ RUN(testSShrImms(1, 65));
+ RUN(testSShrImms(0xffffffffffffffff, 0));
+ RUN(testSShrImms(0xffffffffffffffff, 1));
+ RUN(testSShrImms(0xffffffffffffffff, 63));
+ RUN(testSShrArgImm(1, 0));
+ RUN(testSShrArgImm(1, 1));
+ RUN(testSShrArgImm(1, 62));
+ RUN(testSShrArgImm(1, 65));
+ RUN(testSShrArgImm(0xffffffffffffffff, 0));
+ RUN(testSShrArgImm(0xffffffffffffffff, 1));
+ RUN(testSShrArgImm(0xffffffffffffffff, 63));
+ RUN(testSShrArg32(32));
+ RUN(testSShrArgs32(1, 0));
+ RUN(testSShrArgs32(1, 1));
+ RUN(testSShrArgs32(1, 62));
+ RUN(testSShrArgs32(1, 33));
+ RUN(testSShrArgs32(0xffffffff, 0));
+ RUN(testSShrArgs32(0xffffffff, 1));
+ RUN(testSShrArgs32(0xffffffff, 63));
+ RUN(testSShrImms32(1, 0));
+ RUN(testSShrImms32(1, 1));
+ RUN(testSShrImms32(1, 62));
+ RUN(testSShrImms32(1, 33));
+ RUN(testSShrImms32(0xffffffff, 0));
+ RUN(testSShrImms32(0xffffffff, 1));
+ RUN(testSShrImms32(0xffffffff, 63));
+ RUN(testSShrArgImm32(1, 0));
+ RUN(testSShrArgImm32(1, 1));
+ RUN(testSShrArgImm32(1, 62));
+ RUN(testSShrArgImm32(0xffffffff, 0));
+ RUN(testSShrArgImm32(0xffffffff, 1));
+ RUN(testSShrArgImm32(0xffffffff, 63));
+
+ RUN(testZShrArgs(1, 0));
+ RUN(testZShrArgs(1, 1));
+ RUN(testZShrArgs(1, 62));
+ RUN(testZShrArgs(0xffffffffffffffff, 0));
+ RUN(testZShrArgs(0xffffffffffffffff, 1));
+ RUN(testZShrArgs(0xffffffffffffffff, 63));
+ RUN(testZShrImms(1, 0));
+ RUN(testZShrImms(1, 1));
+ RUN(testZShrImms(1, 62));
+ RUN(testZShrImms(1, 65));
+ RUN(testZShrImms(0xffffffffffffffff, 0));
+ RUN(testZShrImms(0xffffffffffffffff, 1));
+ RUN(testZShrImms(0xffffffffffffffff, 63));
+ RUN(testZShrArgImm(1, 0));
+ RUN(testZShrArgImm(1, 1));
+ RUN(testZShrArgImm(1, 62));
+ RUN(testZShrArgImm(1, 65));
+ RUN(testZShrArgImm(0xffffffffffffffff, 0));
+ RUN(testZShrArgImm(0xffffffffffffffff, 1));
+ RUN(testZShrArgImm(0xffffffffffffffff, 63));
+ RUN(testZShrArg32(32));
+ RUN(testZShrArgs32(1, 0));
+ RUN(testZShrArgs32(1, 1));
+ RUN(testZShrArgs32(1, 62));
+ RUN(testZShrArgs32(1, 33));
+ RUN(testZShrArgs32(0xffffffff, 0));
+ RUN(testZShrArgs32(0xffffffff, 1));
+ RUN(testZShrArgs32(0xffffffff, 63));
+ RUN(testZShrImms32(1, 0));
+ RUN(testZShrImms32(1, 1));
+ RUN(testZShrImms32(1, 62));
+ RUN(testZShrImms32(1, 33));
+ RUN(testZShrImms32(0xffffffff, 0));
+ RUN(testZShrImms32(0xffffffff, 1));
+ RUN(testZShrImms32(0xffffffff, 63));
+ RUN(testZShrArgImm32(1, 0));
+ RUN(testZShrArgImm32(1, 1));
+ RUN(testZShrArgImm32(1, 62));
+ RUN(testZShrArgImm32(0xffffffff, 0));
+ RUN(testZShrArgImm32(0xffffffff, 1));
+ RUN(testZShrArgImm32(0xffffffff, 63));
+
+ RUN_UNARY(testClzArg64, int64Operands());
+ RUN_UNARY(testClzMem64, int64Operands());
+ RUN_UNARY(testClzArg32, int32Operands());
+ RUN_UNARY(testClzMem32, int64Operands());
+
+ RUN_UNARY(testAbsArg, floatingPointOperands<double>());
+ RUN_UNARY(testAbsImm, floatingPointOperands<double>());
+ RUN_UNARY(testAbsMem, floatingPointOperands<double>());
+ RUN_UNARY(testAbsAbsArg, floatingPointOperands<double>());
+ RUN_UNARY(testAbsBitwiseCastArg, floatingPointOperands<double>());
+ RUN_UNARY(testBitwiseCastAbsBitwiseCastArg, floatingPointOperands<double>());
+ RUN_UNARY(testAbsArg, floatingPointOperands<float>());
+ RUN_UNARY(testAbsImm, floatingPointOperands<float>());
+ RUN_UNARY(testAbsMem, floatingPointOperands<float>());
+ RUN_UNARY(testAbsAbsArg, floatingPointOperands<float>());
+ RUN_UNARY(testAbsBitwiseCastArg, floatingPointOperands<float>());
+ RUN_UNARY(testBitwiseCastAbsBitwiseCastArg, floatingPointOperands<float>());
+ RUN_UNARY(testAbsArgWithUselessDoubleConversion, floatingPointOperands<float>());
+ RUN_UNARY(testAbsArgWithEffectfulDoubleConversion, floatingPointOperands<float>());
+
+ RUN_UNARY(testCeilArg, floatingPointOperands<double>());
+ RUN_UNARY(testCeilImm, floatingPointOperands<double>());
+ RUN_UNARY(testCeilMem, floatingPointOperands<double>());
+ RUN_UNARY(testCeilCeilArg, floatingPointOperands<double>());
+ RUN_UNARY(testFloorCeilArg, floatingPointOperands<double>());
+ RUN_UNARY(testCeilIToD64, int64Operands());
+ RUN_UNARY(testCeilIToD32, int32Operands());
+ RUN_UNARY(testCeilArg, floatingPointOperands<float>());
+ RUN_UNARY(testCeilImm, floatingPointOperands<float>());
+ RUN_UNARY(testCeilMem, floatingPointOperands<float>());
+ RUN_UNARY(testCeilCeilArg, floatingPointOperands<float>());
+ RUN_UNARY(testFloorCeilArg, floatingPointOperands<float>());
+ RUN_UNARY(testCeilArgWithUselessDoubleConversion, floatingPointOperands<float>());
+ RUN_UNARY(testCeilArgWithEffectfulDoubleConversion, floatingPointOperands<float>());
+
+ RUN_UNARY(testFloorArg, floatingPointOperands<double>());
+ RUN_UNARY(testFloorImm, floatingPointOperands<double>());
+ RUN_UNARY(testFloorMem, floatingPointOperands<double>());
+ RUN_UNARY(testFloorFloorArg, floatingPointOperands<double>());
+ RUN_UNARY(testCeilFloorArg, floatingPointOperands<double>());
+ RUN_UNARY(testFloorIToD64, int64Operands());
+ RUN_UNARY(testFloorIToD32, int32Operands());
+ RUN_UNARY(testFloorArg, floatingPointOperands<float>());
+ RUN_UNARY(testFloorImm, floatingPointOperands<float>());
+ RUN_UNARY(testFloorMem, floatingPointOperands<float>());
+ RUN_UNARY(testFloorFloorArg, floatingPointOperands<float>());
+ RUN_UNARY(testCeilFloorArg, floatingPointOperands<float>());
+ RUN_UNARY(testFloorArgWithUselessDoubleConversion, floatingPointOperands<float>());
+ RUN_UNARY(testFloorArgWithEffectfulDoubleConversion, floatingPointOperands<float>());
+
+ RUN_UNARY(testSqrtArg, floatingPointOperands<double>());
+ RUN_UNARY(testSqrtImm, floatingPointOperands<double>());
+ RUN_UNARY(testSqrtMem, floatingPointOperands<double>());
+ RUN_UNARY(testSqrtArg, floatingPointOperands<float>());
+ RUN_UNARY(testSqrtImm, floatingPointOperands<float>());
+ RUN_UNARY(testSqrtMem, floatingPointOperands<float>());
+ RUN_UNARY(testSqrtArgWithUselessDoubleConversion, floatingPointOperands<float>());
+ RUN_UNARY(testSqrtArgWithEffectfulDoubleConversion, floatingPointOperands<float>());
+
+ RUN_BINARY(testCompareTwoFloatToDouble, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testCompareOneFloatToDouble, floatingPointOperands<float>(), floatingPointOperands<double>());
+ RUN_BINARY(testCompareFloatToDoubleThroughPhi, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_UNARY(testDoubleToFloatThroughPhi, floatingPointOperands<float>());
+ RUN(testReduceFloatToDoubleValidates());
+ RUN_UNARY(testDoubleProducerPhiToFloatConversion, floatingPointOperands<float>());
+ RUN_UNARY(testDoubleProducerPhiToFloatConversionWithDoubleConsumer, floatingPointOperands<float>());
+ RUN_BINARY(testDoubleProducerPhiWithNonFloatConst, floatingPointOperands<float>(), floatingPointOperands<double>());
+
+ RUN_UNARY(testDoubleArgToInt64BitwiseCast, floatingPointOperands<double>());
+ RUN_UNARY(testDoubleImmToInt64BitwiseCast, floatingPointOperands<double>());
+ RUN_UNARY(testTwoBitwiseCastOnDouble, floatingPointOperands<double>());
+ RUN_UNARY(testBitwiseCastOnDoubleInMemory, floatingPointOperands<double>());
+ RUN_UNARY(testBitwiseCastOnDoubleInMemoryIndexed, floatingPointOperands<double>());
+ RUN_UNARY(testInt64BArgToDoubleBitwiseCast, int64Operands());
+ RUN_UNARY(testInt64BImmToDoubleBitwiseCast, int64Operands());
+ RUN_UNARY(testTwoBitwiseCastOnInt64, int64Operands());
+ RUN_UNARY(testBitwiseCastOnInt64InMemory, int64Operands());
+ RUN_UNARY(testBitwiseCastOnInt64InMemoryIndexed, int64Operands());
+ RUN_UNARY(testFloatImmToInt32BitwiseCast, floatingPointOperands<float>());
+ RUN_UNARY(testBitwiseCastOnFloatInMemory, floatingPointOperands<float>());
+ RUN_UNARY(testInt32BArgToFloatBitwiseCast, int32Operands());
+ RUN_UNARY(testInt32BImmToFloatBitwiseCast, int32Operands());
+ RUN_UNARY(testTwoBitwiseCastOnInt32, int32Operands());
+ RUN_UNARY(testBitwiseCastOnInt32InMemory, int32Operands());
+
+ RUN_UNARY(testConvertDoubleToFloatArg, floatingPointOperands<double>());
+ RUN_UNARY(testConvertDoubleToFloatImm, floatingPointOperands<double>());
+ RUN_UNARY(testConvertDoubleToFloatMem, floatingPointOperands<double>());
+ RUN_UNARY(testConvertFloatToDoubleArg, floatingPointOperands<float>());
+ RUN_UNARY(testConvertFloatToDoubleImm, floatingPointOperands<float>());
+ RUN_UNARY(testConvertFloatToDoubleMem, floatingPointOperands<float>());
+ RUN_UNARY(testConvertDoubleToFloatToDoubleToFloat, floatingPointOperands<double>());
+ RUN_UNARY(testStoreFloat, floatingPointOperands<double>());
+ RUN_UNARY(testStoreDoubleConstantAsFloat, floatingPointOperands<double>());
+ RUN_UNARY(testLoadFloatConvertDoubleConvertFloatStoreFloat, floatingPointOperands<float>());
+ RUN_UNARY(testFroundArg, floatingPointOperands<double>());
+ RUN_UNARY(testFroundMem, floatingPointOperands<double>());
+
+ RUN(testIToD64Arg());
+ RUN(testIToF64Arg());
+ RUN(testIToD32Arg());
+ RUN(testIToF32Arg());
+ RUN(testIToD64Mem());
+ RUN(testIToF64Mem());
+ RUN(testIToD32Mem());
+ RUN(testIToF32Mem());
+ RUN_UNARY(testIToD64Imm, int64Operands());
+ RUN_UNARY(testIToF64Imm, int64Operands());
+ RUN_UNARY(testIToD32Imm, int32Operands());
+ RUN_UNARY(testIToF32Imm, int32Operands());
+ RUN(testIToDReducedToIToF64Arg());
+ RUN(testIToDReducedToIToF32Arg());
+
+ RUN(testStore32(44));
+ RUN(testStoreConstant(49));
+ RUN(testStoreConstantPtr(49));
+ RUN(testStore8Arg());
+ RUN(testStore8Imm());
+ RUN(testStorePartial8BitRegisterOnX86());
+ RUN(testStore16Arg());
+ RUN(testStore16Imm());
+ RUN(testTrunc((static_cast<int64_t>(1) << 40) + 42));
+ RUN(testAdd1(45));
+ RUN(testAdd1Ptr(51));
+ RUN(testAdd1Ptr(bitwise_cast<intptr_t>(vm)));
+ RUN(testNeg32(52));
+ RUN(testNegPtr(53));
+ RUN(testStoreAddLoad32(46));
+ RUN(testStoreAddLoadImm32(46));
+ RUN(testStoreAddLoad64(4600));
+ RUN(testStoreAddLoadImm64(4600));
+ RUN(testStoreAddLoad8(4, Load8Z));
+ RUN(testStoreAddLoadImm8(4, Load8Z));
+ RUN(testStoreAddLoad8(4, Load8S));
+ RUN(testStoreAddLoadImm8(4, Load8S));
+ RUN(testStoreAddLoad16(6, Load16Z));
+ RUN(testStoreAddLoadImm16(6, Load16Z));
+ RUN(testStoreAddLoad16(6, Load16S));
+ RUN(testStoreAddLoadImm16(6, Load16S));
+ RUN(testStoreAddLoad32Index(46));
+ RUN(testStoreAddLoadImm32Index(46));
+ RUN(testStoreAddLoad64Index(4600));
+ RUN(testStoreAddLoadImm64Index(4600));
+ RUN(testStoreAddLoad8Index(4, Load8Z));
+ RUN(testStoreAddLoadImm8Index(4, Load8Z));
+ RUN(testStoreAddLoad8Index(4, Load8S));
+ RUN(testStoreAddLoadImm8Index(4, Load8S));
+ RUN(testStoreAddLoad16Index(6, Load16Z));
+ RUN(testStoreAddLoadImm16Index(6, Load16Z));
+ RUN(testStoreAddLoad16Index(6, Load16S));
+ RUN(testStoreAddLoadImm16Index(6, Load16S));
+ RUN(testStoreSubLoad(46));
+ RUN(testStoreAddLoadInterference(52));
+ RUN(testStoreAddAndLoad(47, 0xffff));
+ RUN(testStoreAddAndLoad(470000, 0xffff));
+ RUN(testStoreNegLoad32(54));
+ RUN(testStoreNegLoadPtr(55));
+ RUN(testAdd1Uncommuted(48));
+ RUN(testLoadOffset());
+ RUN(testLoadOffsetNotConstant());
+ RUN(testLoadOffsetUsingAdd());
+ RUN(testLoadOffsetUsingAddInterference());
+ RUN(testLoadOffsetUsingAddNotConstant());
+ RUN(testLoadAddrShift(0));
+ RUN(testLoadAddrShift(1));
+ RUN(testLoadAddrShift(2));
+ RUN(testLoadAddrShift(3));
+ RUN(testFramePointer());
+ RUN(testOverrideFramePointer());
+ RUN(testStackSlot());
+ RUN(testLoadFromFramePointer());
+ RUN(testStoreLoadStackSlot(50));
+
+ RUN(testBranch());
+ RUN(testBranchPtr());
+ RUN(testDiamond());
+ RUN(testBranchNotEqual());
+ RUN(testBranchNotEqualCommute());
+ RUN(testBranchNotEqualNotEqual());
+ RUN(testBranchEqual());
+ RUN(testBranchEqualEqual());
+ RUN(testBranchEqualCommute());
+ RUN(testBranchEqualEqual1());
+ RUN_BINARY(testBranchEqualOrUnorderedArgs, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testBranchEqualOrUnorderedArgs, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBranchEqualOrUnorderedDoubleArgImm, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testBranchEqualOrUnorderedFloatArgImm, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBranchEqualOrUnorderedDoubleImms, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testBranchEqualOrUnorderedFloatImms, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBranchEqualOrUnorderedFloatWithUselessDoubleConversion, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands<double>(), floatingPointOperands<double>());
+ RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN(testBranchFold(42));
+ RUN(testBranchFold(0));
+ RUN(testDiamondFold(42));
+ RUN(testDiamondFold(0));
+ RUN(testBranchNotEqualFoldPtr(42));
+ RUN(testBranchNotEqualFoldPtr(0));
+ RUN(testBranchEqualFoldPtr(42));
+ RUN(testBranchEqualFoldPtr(0));
+ RUN(testBranchLoadPtr());
+ RUN(testBranchLoad32());
+ RUN(testBranchLoad8S());
+ RUN(testBranchLoad8Z());
+ RUN(testBranchLoad16S());
+ RUN(testBranchLoad16Z());
+ RUN(testBranch8WithLoad8ZIndex());
+
+ RUN(testComplex(64, 128));
+ RUN(testComplex(4, 128));
+ RUN(testComplex(4, 256));
+ RUN(testComplex(4, 384));
+
+ RUN(testSimplePatchpoint());
+ RUN(testSimplePatchpointWithoutOuputClobbersGPArgs());
+ RUN(testSimplePatchpointWithOuputClobbersGPArgs());
+ RUN(testSimplePatchpointWithoutOuputClobbersFPArgs());
+ RUN(testSimplePatchpointWithOuputClobbersFPArgs());
+ RUN(testPatchpointWithEarlyClobber());
+ RUN(testPatchpointCallArg());
+ RUN(testPatchpointFixedRegister());
+ RUN(testPatchpointAny(ValueRep::WarmAny));
+ RUN(testPatchpointAny(ValueRep::ColdAny));
+ RUN(testPatchpointGPScratch());
+ RUN(testPatchpointFPScratch());
+ RUN(testPatchpointLotsOfLateAnys());
+ RUN(testPatchpointAnyImm(ValueRep::WarmAny));
+ RUN(testPatchpointAnyImm(ValueRep::ColdAny));
+ RUN(testPatchpointAnyImm(ValueRep::LateColdAny));
+ RUN(testPatchpointManyImms());
+ RUN(testPatchpointWithRegisterResult());
+ RUN(testPatchpointWithStackArgumentResult());
+ RUN(testPatchpointWithAnyResult());
+ RUN(testSimpleCheck());
+ RUN(testCheckFalse());
+ RUN(testCheckTrue());
+ RUN(testCheckLessThan());
+ RUN(testCheckMegaCombo());
+ RUN(testCheckTrickyMegaCombo());
+ RUN(testCheckTwoMegaCombos());
+ RUN(testCheckTwoNonRedundantMegaCombos());
+ RUN(testCheckAddImm());
+ RUN(testCheckAddImmCommute());
+ RUN(testCheckAddImmSomeRegister());
+ RUN(testCheckAdd());
+ RUN(testCheckAdd64());
+ RUN(testCheckAddFold(100, 200));
+ RUN(testCheckAddFoldFail(2147483647, 100));
+ RUN(testCheckAddArgumentAliasing64());
+ RUN(testCheckAddArgumentAliasing32());
+ RUN(testCheckAddSelfOverflow64());
+ RUN(testCheckAddSelfOverflow32());
+ RUN(testCheckSubImm());
+ RUN(testCheckSubBadImm());
+ RUN(testCheckSub());
+ RUN(testCheckSub64());
+ RUN(testCheckSubFold(100, 200));
+ RUN(testCheckSubFoldFail(-2147483647, 100));
+ RUN(testCheckNeg());
+ RUN(testCheckNeg64());
+ RUN(testCheckMul());
+ RUN(testCheckMulMemory());
+ RUN(testCheckMul2());
+ RUN(testCheckMul64());
+ RUN(testCheckMulFold(100, 200));
+ RUN(testCheckMulFoldFail(2147483647, 100));
+ RUN(testCheckMulArgumentAliasing64());
+ RUN(testCheckMulArgumentAliasing32());
+
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(Equal, a, b); }, int64Operands(), int64Operands());
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(NotEqual, a, b); }, int64Operands(), int64Operands());
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(LessThan, a, b); }, int64Operands(), int64Operands());
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(GreaterThan, a, b); }, int64Operands(), int64Operands());
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(LessEqual, a, b); }, int64Operands(), int64Operands());
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(GreaterEqual, a, b); }, int64Operands(), int64Operands());
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(Below, a, b); }, int64Operands(), int64Operands());
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(Above, a, b); }, int64Operands(), int64Operands());
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(BelowEqual, a, b); }, int64Operands(), int64Operands());
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(AboveEqual, a, b); }, int64Operands(), int64Operands());
+ RUN_BINARY([](int32_t a, int32_t b) { testCompare(BitAnd, a, b); }, int64Operands(), int64Operands());
+
+ RUN(testEqualDouble(42, 42, true));
+ RUN(testEqualDouble(0, -0, true));
+ RUN(testEqualDouble(42, 43, false));
+ RUN(testEqualDouble(PNaN, 42, false));
+ RUN(testEqualDouble(42, PNaN, false));
+ RUN(testEqualDouble(PNaN, PNaN, false));
+
+ RUN(testLoad<Int32>(60));
+ RUN(testLoad<Int32>(-60));
+ RUN(testLoad<Int32>(1000));
+ RUN(testLoad<Int32>(-1000));
+ RUN(testLoad<Int32>(1000000));
+ RUN(testLoad<Int32>(-1000000));
+ RUN(testLoad<Int32>(1000000000));
+ RUN(testLoad<Int32>(-1000000000));
+ RUN_UNARY(testLoad<Int64>, int64Operands());
+ RUN_UNARY(testLoad<Float>, floatingPointOperands<float>());
+ RUN_UNARY(testLoad<Double>, floatingPointOperands<double>());
+
+ RUN(testLoad<int8_t>(Load8S, 60));
+ RUN(testLoad<int8_t>(Load8S, -60));
+ RUN(testLoad<int8_t>(Load8S, 1000));
+ RUN(testLoad<int8_t>(Load8S, -1000));
+ RUN(testLoad<int8_t>(Load8S, 1000000));
+ RUN(testLoad<int8_t>(Load8S, -1000000));
+ RUN(testLoad<int8_t>(Load8S, 1000000000));
+ RUN(testLoad<int8_t>(Load8S, -1000000000));
+
+ RUN(testLoad<uint8_t>(Load8Z, 60));
+ RUN(testLoad<uint8_t>(Load8Z, -60));
+ RUN(testLoad<uint8_t>(Load8Z, 1000));
+ RUN(testLoad<uint8_t>(Load8Z, -1000));
+ RUN(testLoad<uint8_t>(Load8Z, 1000000));
+ RUN(testLoad<uint8_t>(Load8Z, -1000000));
+ RUN(testLoad<uint8_t>(Load8Z, 1000000000));
+ RUN(testLoad<uint8_t>(Load8Z, -1000000000));
+
+ RUN(testLoad<int16_t>(Load16S, 60));
+ RUN(testLoad<int16_t>(Load16S, -60));
+ RUN(testLoad<int16_t>(Load16S, 1000));
+ RUN(testLoad<int16_t>(Load16S, -1000));
+ RUN(testLoad<int16_t>(Load16S, 1000000));
+ RUN(testLoad<int16_t>(Load16S, -1000000));
+ RUN(testLoad<int16_t>(Load16S, 1000000000));
+ RUN(testLoad<int16_t>(Load16S, -1000000000));
+
+ RUN(testLoad<uint16_t>(Load16Z, 60));
+ RUN(testLoad<uint16_t>(Load16Z, -60));
+ RUN(testLoad<uint16_t>(Load16Z, 1000));
+ RUN(testLoad<uint16_t>(Load16Z, -1000));
+ RUN(testLoad<uint16_t>(Load16Z, 1000000));
+ RUN(testLoad<uint16_t>(Load16Z, -1000000));
+ RUN(testLoad<uint16_t>(Load16Z, 1000000000));
+ RUN(testLoad<uint16_t>(Load16Z, -1000000000));
+
+ RUN(testSpillGP());
+ RUN(testSpillFP());
+
+ RUN(testInt32ToDoublePartialRegisterStall());
+ RUN(testInt32ToDoublePartialRegisterWithoutStall());
+
+ RUN(testCallSimple(1, 2));
+ RUN(testCallRare(1, 2));
+ RUN(testCallRareLive(1, 2, 3));
+ RUN(testCallSimplePure(1, 2));
+ RUN(testCallFunctionWithHellaArguments());
+
+ RUN(testReturnDouble(0.0));
+ RUN(testReturnDouble(negativeZero()));
+ RUN(testReturnDouble(42.5));
+ RUN_UNARY(testReturnFloat, floatingPointOperands<float>());
+
+ RUN(testCallSimpleDouble(1, 2));
+ RUN(testCallFunctionWithHellaDoubleArguments());
+ RUN_BINARY(testCallSimpleFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN(testCallFunctionWithHellaFloatArguments());
+
+ RUN(testChillDiv(4, 2, 2));
+ RUN(testChillDiv(1, 0, 0));
+ RUN(testChillDiv(0, 0, 0));
+ RUN(testChillDiv(1, -1, -1));
+ RUN(testChillDiv(-2147483647 - 1, 0, 0));
+ RUN(testChillDiv(-2147483647 - 1, 1, -2147483647 - 1));
+ RUN(testChillDiv(-2147483647 - 1, -1, -2147483647 - 1));
+ RUN(testChillDiv(-2147483647 - 1, 2, -1073741824));
+ RUN(testChillDiv64(4, 2, 2));
+ RUN(testChillDiv64(1, 0, 0));
+ RUN(testChillDiv64(0, 0, 0));
+ RUN(testChillDiv64(1, -1, -1));
+ RUN(testChillDiv64(-9223372036854775807ll - 1, 0, 0));
+ RUN(testChillDiv64(-9223372036854775807ll - 1, 1, -9223372036854775807ll - 1));
+ RUN(testChillDiv64(-9223372036854775807ll - 1, -1, -9223372036854775807ll - 1));
+ RUN(testChillDiv64(-9223372036854775807ll - 1, 2, -4611686018427387904));
+ RUN(testChillDivTwice(4, 2, 6, 2, 5));
+ RUN(testChillDivTwice(4, 0, 6, 2, 3));
+ RUN(testChillDivTwice(4, 2, 6, 0, 2));
+
+ RUN_UNARY(testModArg, int64Operands());
+ RUN_BINARY(testModArgs, int64Operands(), int64Operands());
+ RUN_BINARY(testModImms, int64Operands(), int64Operands());
+ RUN_UNARY(testModArg32, int32Operands());
+ RUN_BINARY(testModArgs32, int32Operands(), int32Operands());
+ RUN_BINARY(testModImms32, int32Operands(), int32Operands());
+ RUN_UNARY(testChillModArg, int64Operands());
+ RUN_BINARY(testChillModArgs, int64Operands(), int64Operands());
+ RUN_BINARY(testChillModImms, int64Operands(), int64Operands());
+ RUN_UNARY(testChillModArg32, int32Operands());
+ RUN_BINARY(testChillModArgs32, int32Operands(), int32Operands());
+ RUN_BINARY(testChillModImms32, int32Operands(), int32Operands());
+
+ RUN(testSwitch(0, 1));
+ RUN(testSwitch(1, 1));
+ RUN(testSwitch(2, 1));
+ RUN(testSwitch(2, 2));
+ RUN(testSwitch(10, 1));
+ RUN(testSwitch(10, 2));
+ RUN(testSwitch(100, 1));
+ RUN(testSwitch(100, 100));
+
+ RUN(testSwitchChillDiv(0, 1));
+ RUN(testSwitchChillDiv(1, 1));
+ RUN(testSwitchChillDiv(2, 1));
+ RUN(testSwitchChillDiv(2, 2));
+ RUN(testSwitchChillDiv(10, 1));
+ RUN(testSwitchChillDiv(10, 2));
+ RUN(testSwitchChillDiv(100, 1));
+ RUN(testSwitchChillDiv(100, 100));
+
+ RUN(testSwitchTargettingSameBlock());
+ RUN(testSwitchTargettingSameBlockFoldPathConstant());
+
+ RUN(testTrunc(0));
+ RUN(testTrunc(1));
+ RUN(testTrunc(-1));
+ RUN(testTrunc(1000000000000ll));
+ RUN(testTrunc(-1000000000000ll));
+ RUN(testTruncFold(0));
+ RUN(testTruncFold(1));
+ RUN(testTruncFold(-1));
+ RUN(testTruncFold(1000000000000ll));
+ RUN(testTruncFold(-1000000000000ll));
+
+ RUN(testZExt32(0));
+ RUN(testZExt32(1));
+ RUN(testZExt32(-1));
+ RUN(testZExt32(1000000000ll));
+ RUN(testZExt32(-1000000000ll));
+ RUN(testZExt32Fold(0));
+ RUN(testZExt32Fold(1));
+ RUN(testZExt32Fold(-1));
+ RUN(testZExt32Fold(1000000000ll));
+ RUN(testZExt32Fold(-1000000000ll));
+
+ RUN(testSExt32(0));
+ RUN(testSExt32(1));
+ RUN(testSExt32(-1));
+ RUN(testSExt32(1000000000ll));
+ RUN(testSExt32(-1000000000ll));
+ RUN(testSExt32Fold(0));
+ RUN(testSExt32Fold(1));
+ RUN(testSExt32Fold(-1));
+ RUN(testSExt32Fold(1000000000ll));
+ RUN(testSExt32Fold(-1000000000ll));
+
+ RUN(testTruncZExt32(0));
+ RUN(testTruncZExt32(1));
+ RUN(testTruncZExt32(-1));
+ RUN(testTruncZExt32(1000000000ll));
+ RUN(testTruncZExt32(-1000000000ll));
+ RUN(testTruncSExt32(0));
+ RUN(testTruncSExt32(1));
+ RUN(testTruncSExt32(-1));
+ RUN(testTruncSExt32(1000000000ll));
+ RUN(testTruncSExt32(-1000000000ll));
+
+ RUN(testSExt8(0));
+ RUN(testSExt8(1));
+ RUN(testSExt8(42));
+ RUN(testSExt8(-1));
+ RUN(testSExt8(0xff));
+ RUN(testSExt8(0x100));
+ RUN(testSExt8Fold(0));
+ RUN(testSExt8Fold(1));
+ RUN(testSExt8Fold(42));
+ RUN(testSExt8Fold(-1));
+ RUN(testSExt8Fold(0xff));
+ RUN(testSExt8Fold(0x100));
+ RUN(testSExt8SExt8(0));
+ RUN(testSExt8SExt8(1));
+ RUN(testSExt8SExt8(42));
+ RUN(testSExt8SExt8(-1));
+ RUN(testSExt8SExt8(0xff));
+ RUN(testSExt8SExt8(0x100));
+ RUN(testSExt8SExt16(0));
+ RUN(testSExt8SExt16(1));
+ RUN(testSExt8SExt16(42));
+ RUN(testSExt8SExt16(-1));
+ RUN(testSExt8SExt16(0xff));
+ RUN(testSExt8SExt16(0x100));
+ RUN(testSExt8SExt16(0xffff));
+ RUN(testSExt8SExt16(0x10000));
+ RUN(testSExt8BitAnd(0, 0));
+ RUN(testSExt8BitAnd(1, 0));
+ RUN(testSExt8BitAnd(42, 0));
+ RUN(testSExt8BitAnd(-1, 0));
+ RUN(testSExt8BitAnd(0xff, 0));
+ RUN(testSExt8BitAnd(0x100, 0));
+ RUN(testSExt8BitAnd(0xffff, 0));
+ RUN(testSExt8BitAnd(0x10000, 0));
+ RUN(testSExt8BitAnd(0, 0xf));
+ RUN(testSExt8BitAnd(1, 0xf));
+ RUN(testSExt8BitAnd(42, 0xf));
+ RUN(testSExt8BitAnd(-1, 0xf));
+ RUN(testSExt8BitAnd(0xff, 0xf));
+ RUN(testSExt8BitAnd(0x100, 0xf));
+ RUN(testSExt8BitAnd(0xffff, 0xf));
+ RUN(testSExt8BitAnd(0x10000, 0xf));
+ RUN(testSExt8BitAnd(0, 0xff));
+ RUN(testSExt8BitAnd(1, 0xff));
+ RUN(testSExt8BitAnd(42, 0xff));
+ RUN(testSExt8BitAnd(-1, 0xff));
+ RUN(testSExt8BitAnd(0xff, 0xff));
+ RUN(testSExt8BitAnd(0x100, 0xff));
+ RUN(testSExt8BitAnd(0xffff, 0xff));
+ RUN(testSExt8BitAnd(0x10000, 0xff));
+ RUN(testSExt8BitAnd(0, 0x80));
+ RUN(testSExt8BitAnd(1, 0x80));
+ RUN(testSExt8BitAnd(42, 0x80));
+ RUN(testSExt8BitAnd(-1, 0x80));
+ RUN(testSExt8BitAnd(0xff, 0x80));
+ RUN(testSExt8BitAnd(0x100, 0x80));
+ RUN(testSExt8BitAnd(0xffff, 0x80));
+ RUN(testSExt8BitAnd(0x10000, 0x80));
+ RUN(testBitAndSExt8(0, 0xf));
+ RUN(testBitAndSExt8(1, 0xf));
+ RUN(testBitAndSExt8(42, 0xf));
+ RUN(testBitAndSExt8(-1, 0xf));
+ RUN(testBitAndSExt8(0xff, 0xf));
+ RUN(testBitAndSExt8(0x100, 0xf));
+ RUN(testBitAndSExt8(0xffff, 0xf));
+ RUN(testBitAndSExt8(0x10000, 0xf));
+ RUN(testBitAndSExt8(0, 0xff));
+ RUN(testBitAndSExt8(1, 0xff));
+ RUN(testBitAndSExt8(42, 0xff));
+ RUN(testBitAndSExt8(-1, 0xff));
+ RUN(testBitAndSExt8(0xff, 0xff));
+ RUN(testBitAndSExt8(0x100, 0xff));
+ RUN(testBitAndSExt8(0xffff, 0xff));
+ RUN(testBitAndSExt8(0x10000, 0xff));
+ RUN(testBitAndSExt8(0, 0xfff));
+ RUN(testBitAndSExt8(1, 0xfff));
+ RUN(testBitAndSExt8(42, 0xfff));
+ RUN(testBitAndSExt8(-1, 0xfff));
+ RUN(testBitAndSExt8(0xff, 0xfff));
+ RUN(testBitAndSExt8(0x100, 0xfff));
+ RUN(testBitAndSExt8(0xffff, 0xfff));
+ RUN(testBitAndSExt8(0x10000, 0xfff));
+
+ RUN(testSExt16(0));
+ RUN(testSExt16(1));
+ RUN(testSExt16(42));
+ RUN(testSExt16(-1));
+ RUN(testSExt16(0xffff));
+ RUN(testSExt16(0x10000));
+ RUN(testSExt16Fold(0));
+ RUN(testSExt16Fold(1));
+ RUN(testSExt16Fold(42));
+ RUN(testSExt16Fold(-1));
+ RUN(testSExt16Fold(0xffff));
+ RUN(testSExt16Fold(0x10000));
+ RUN(testSExt16SExt8(0));
+ RUN(testSExt16SExt8(1));
+ RUN(testSExt16SExt8(42));
+ RUN(testSExt16SExt8(-1));
+ RUN(testSExt16SExt8(0xffff));
+ RUN(testSExt16SExt8(0x10000));
+ RUN(testSExt16SExt16(0));
+ RUN(testSExt16SExt16(1));
+ RUN(testSExt16SExt16(42));
+ RUN(testSExt16SExt16(-1));
+ RUN(testSExt16SExt16(0xffff));
+ RUN(testSExt16SExt16(0x10000));
+ RUN(testSExt16SExt16(0xffffff));
+ RUN(testSExt16SExt16(0x1000000));
+ RUN(testSExt16BitAnd(0, 0));
+ RUN(testSExt16BitAnd(1, 0));
+ RUN(testSExt16BitAnd(42, 0));
+ RUN(testSExt16BitAnd(-1, 0));
+ RUN(testSExt16BitAnd(0xffff, 0));
+ RUN(testSExt16BitAnd(0x10000, 0));
+ RUN(testSExt16BitAnd(0xffffff, 0));
+ RUN(testSExt16BitAnd(0x1000000, 0));
+ RUN(testSExt16BitAnd(0, 0xf));
+ RUN(testSExt16BitAnd(1, 0xf));
+ RUN(testSExt16BitAnd(42, 0xf));
+ RUN(testSExt16BitAnd(-1, 0xf));
+ RUN(testSExt16BitAnd(0xffff, 0xf));
+ RUN(testSExt16BitAnd(0x10000, 0xf));
+ RUN(testSExt16BitAnd(0xffffff, 0xf));
+ RUN(testSExt16BitAnd(0x1000000, 0xf));
+ RUN(testSExt16BitAnd(0, 0xffff));
+ RUN(testSExt16BitAnd(1, 0xffff));
+ RUN(testSExt16BitAnd(42, 0xffff));
+ RUN(testSExt16BitAnd(-1, 0xffff));
+ RUN(testSExt16BitAnd(0xffff, 0xffff));
+ RUN(testSExt16BitAnd(0x10000, 0xffff));
+ RUN(testSExt16BitAnd(0xffffff, 0xffff));
+ RUN(testSExt16BitAnd(0x1000000, 0xffff));
+ RUN(testSExt16BitAnd(0, 0x8000));
+ RUN(testSExt16BitAnd(1, 0x8000));
+ RUN(testSExt16BitAnd(42, 0x8000));
+ RUN(testSExt16BitAnd(-1, 0x8000));
+ RUN(testSExt16BitAnd(0xffff, 0x8000));
+ RUN(testSExt16BitAnd(0x10000, 0x8000));
+ RUN(testSExt16BitAnd(0xffffff, 0x8000));
+ RUN(testSExt16BitAnd(0x1000000, 0x8000));
+ RUN(testBitAndSExt16(0, 0xf));
+ RUN(testBitAndSExt16(1, 0xf));
+ RUN(testBitAndSExt16(42, 0xf));
+ RUN(testBitAndSExt16(-1, 0xf));
+ RUN(testBitAndSExt16(0xffff, 0xf));
+ RUN(testBitAndSExt16(0x10000, 0xf));
+ RUN(testBitAndSExt16(0xffffff, 0xf));
+ RUN(testBitAndSExt16(0x1000000, 0xf));
+ RUN(testBitAndSExt16(0, 0xffff));
+ RUN(testBitAndSExt16(1, 0xffff));
+ RUN(testBitAndSExt16(42, 0xffff));
+ RUN(testBitAndSExt16(-1, 0xffff));
+ RUN(testBitAndSExt16(0xffff, 0xffff));
+ RUN(testBitAndSExt16(0x10000, 0xffff));
+ RUN(testBitAndSExt16(0xffffff, 0xffff));
+ RUN(testBitAndSExt16(0x1000000, 0xffff));
+ RUN(testBitAndSExt16(0, 0xfffff));
+ RUN(testBitAndSExt16(1, 0xfffff));
+ RUN(testBitAndSExt16(42, 0xfffff));
+ RUN(testBitAndSExt16(-1, 0xfffff));
+ RUN(testBitAndSExt16(0xffff, 0xfffff));
+ RUN(testBitAndSExt16(0x10000, 0xfffff));
+ RUN(testBitAndSExt16(0xffffff, 0xfffff));
+ RUN(testBitAndSExt16(0x1000000, 0xfffff));
+
+ RUN(testSExt32BitAnd(0, 0));
+ RUN(testSExt32BitAnd(1, 0));
+ RUN(testSExt32BitAnd(42, 0));
+ RUN(testSExt32BitAnd(-1, 0));
+ RUN(testSExt32BitAnd(0x80000000, 0));
+ RUN(testSExt32BitAnd(0, 0xf));
+ RUN(testSExt32BitAnd(1, 0xf));
+ RUN(testSExt32BitAnd(42, 0xf));
+ RUN(testSExt32BitAnd(-1, 0xf));
+ RUN(testSExt32BitAnd(0x80000000, 0xf));
+ RUN(testSExt32BitAnd(0, 0x80000000));
+ RUN(testSExt32BitAnd(1, 0x80000000));
+ RUN(testSExt32BitAnd(42, 0x80000000));
+ RUN(testSExt32BitAnd(-1, 0x80000000));
+ RUN(testSExt32BitAnd(0x80000000, 0x80000000));
+ RUN(testBitAndSExt32(0, 0xf));
+ RUN(testBitAndSExt32(1, 0xf));
+ RUN(testBitAndSExt32(42, 0xf));
+ RUN(testBitAndSExt32(-1, 0xf));
+ RUN(testBitAndSExt32(0xffff, 0xf));
+ RUN(testBitAndSExt32(0x10000, 0xf));
+ RUN(testBitAndSExt32(0xffffff, 0xf));
+ RUN(testBitAndSExt32(0x1000000, 0xf));
+ RUN(testBitAndSExt32(0, 0xffff00000000llu));
+ RUN(testBitAndSExt32(1, 0xffff00000000llu));
+ RUN(testBitAndSExt32(42, 0xffff00000000llu));
+ RUN(testBitAndSExt32(-1, 0xffff00000000llu));
+ RUN(testBitAndSExt32(0x80000000, 0xffff00000000llu));
+
+ RUN(testBasicSelect());
+ RUN(testSelectTest());
+ RUN(testSelectCompareDouble());
+ RUN_BINARY(testSelectCompareFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testSelectCompareFloatToDouble, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN(testSelectDouble());
+ RUN(testSelectDoubleTest());
+ RUN(testSelectDoubleCompareDouble());
+ RUN_BINARY(testSelectDoubleCompareFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN_BINARY(testSelectFloatCompareFloat, floatingPointOperands<float>(), floatingPointOperands<float>());
+ RUN(testSelectDoubleCompareDoubleWithAliasing());
+ RUN(testSelectFloatCompareFloatWithAliasing());
+ RUN(testSelectFold(42));
+ RUN(testSelectFold(43));
+ RUN(testSelectInvert());
+ RUN(testCheckSelect());
+ RUN(testCheckSelectCheckSelect());
+ RUN(testCheckSelectAndCSE());
+ RUN_BINARY(testPowDoubleByIntegerLoop, floatingPointOperands<double>(), int64Operands());
+
+ RUN(testTruncOrHigh());
+ RUN(testTruncOrLow());
+ RUN(testBitAndOrHigh());
+ RUN(testBitAndOrLow());
+
+ RUN(testBranch64Equal(0, 0));
+ RUN(testBranch64Equal(1, 1));
+ RUN(testBranch64Equal(-1, -1));
+ RUN(testBranch64Equal(1, -1));
+ RUN(testBranch64Equal(-1, 1));
+ RUN(testBranch64EqualImm(0, 0));
+ RUN(testBranch64EqualImm(1, 1));
+ RUN(testBranch64EqualImm(-1, -1));
+ RUN(testBranch64EqualImm(1, -1));
+ RUN(testBranch64EqualImm(-1, 1));
+ RUN(testBranch64EqualMem(0, 0));
+ RUN(testBranch64EqualMem(1, 1));
+ RUN(testBranch64EqualMem(-1, -1));
+ RUN(testBranch64EqualMem(1, -1));
+ RUN(testBranch64EqualMem(-1, 1));
+ RUN(testBranch64EqualMemImm(0, 0));
+ RUN(testBranch64EqualMemImm(1, 1));
+ RUN(testBranch64EqualMemImm(-1, -1));
+ RUN(testBranch64EqualMemImm(1, -1));
+ RUN(testBranch64EqualMemImm(-1, 1));
+
+ RUN(testStore8Load8Z(0));
+ RUN(testStore8Load8Z(123));
+ RUN(testStore8Load8Z(12345));
+ RUN(testStore8Load8Z(-123));
+
+ RUN(testStore16Load16Z(0));
+ RUN(testStore16Load16Z(123));
+ RUN(testStore16Load16Z(12345));
+ RUN(testStore16Load16Z(12345678));
+ RUN(testStore16Load16Z(-123));
+
+ RUN(testSShrShl32(42, 24, 24));
+ RUN(testSShrShl32(-42, 24, 24));
+ RUN(testSShrShl32(4200, 24, 24));
+ RUN(testSShrShl32(-4200, 24, 24));
+ RUN(testSShrShl32(4200000, 24, 24));
+ RUN(testSShrShl32(-4200000, 24, 24));
+
+ RUN(testSShrShl32(42, 16, 16));
+ RUN(testSShrShl32(-42, 16, 16));
+ RUN(testSShrShl32(4200, 16, 16));
+ RUN(testSShrShl32(-4200, 16, 16));
+ RUN(testSShrShl32(4200000, 16, 16));
+ RUN(testSShrShl32(-4200000, 16, 16));
+
+ RUN(testSShrShl32(42, 8, 8));
+ RUN(testSShrShl32(-42, 8, 8));
+ RUN(testSShrShl32(4200, 8, 8));
+ RUN(testSShrShl32(-4200, 8, 8));
+ RUN(testSShrShl32(4200000, 8, 8));
+ RUN(testSShrShl32(-4200000, 8, 8));
+ RUN(testSShrShl32(420000000, 8, 8));
+ RUN(testSShrShl32(-420000000, 8, 8));
+
+ RUN(testSShrShl64(42, 56, 56));
+ RUN(testSShrShl64(-42, 56, 56));
+ RUN(testSShrShl64(4200, 56, 56));
+ RUN(testSShrShl64(-4200, 56, 56));
+ RUN(testSShrShl64(4200000, 56, 56));
+ RUN(testSShrShl64(-4200000, 56, 56));
+ RUN(testSShrShl64(420000000, 56, 56));
+ RUN(testSShrShl64(-420000000, 56, 56));
+ RUN(testSShrShl64(42000000000, 56, 56));
+ RUN(testSShrShl64(-42000000000, 56, 56));
+
+ RUN(testSShrShl64(42, 48, 48));
+ RUN(testSShrShl64(-42, 48, 48));
+ RUN(testSShrShl64(4200, 48, 48));
+ RUN(testSShrShl64(-4200, 48, 48));
+ RUN(testSShrShl64(4200000, 48, 48));
+ RUN(testSShrShl64(-4200000, 48, 48));
+ RUN(testSShrShl64(420000000, 48, 48));
+ RUN(testSShrShl64(-420000000, 48, 48));
+ RUN(testSShrShl64(42000000000, 48, 48));
+ RUN(testSShrShl64(-42000000000, 48, 48));
+
+ RUN(testSShrShl64(42, 32, 32));
+ RUN(testSShrShl64(-42, 32, 32));
+ RUN(testSShrShl64(4200, 32, 32));
+ RUN(testSShrShl64(-4200, 32, 32));
+ RUN(testSShrShl64(4200000, 32, 32));
+ RUN(testSShrShl64(-4200000, 32, 32));
+ RUN(testSShrShl64(420000000, 32, 32));
+ RUN(testSShrShl64(-420000000, 32, 32));
+ RUN(testSShrShl64(42000000000, 32, 32));
+ RUN(testSShrShl64(-42000000000, 32, 32));
+
+ RUN(testSShrShl64(42, 24, 24));
+ RUN(testSShrShl64(-42, 24, 24));
+ RUN(testSShrShl64(4200, 24, 24));
+ RUN(testSShrShl64(-4200, 24, 24));
+ RUN(testSShrShl64(4200000, 24, 24));
+ RUN(testSShrShl64(-4200000, 24, 24));
+ RUN(testSShrShl64(420000000, 24, 24));
+ RUN(testSShrShl64(-420000000, 24, 24));
+ RUN(testSShrShl64(42000000000, 24, 24));
+ RUN(testSShrShl64(-42000000000, 24, 24));
+
+ RUN(testSShrShl64(42, 16, 16));
+ RUN(testSShrShl64(-42, 16, 16));
+ RUN(testSShrShl64(4200, 16, 16));
+ RUN(testSShrShl64(-4200, 16, 16));
+ RUN(testSShrShl64(4200000, 16, 16));
+ RUN(testSShrShl64(-4200000, 16, 16));
+ RUN(testSShrShl64(420000000, 16, 16));
+ RUN(testSShrShl64(-420000000, 16, 16));
+ RUN(testSShrShl64(42000000000, 16, 16));
+ RUN(testSShrShl64(-42000000000, 16, 16));
+
+ RUN(testSShrShl64(42, 8, 8));
+ RUN(testSShrShl64(-42, 8, 8));
+ RUN(testSShrShl64(4200, 8, 8));
+ RUN(testSShrShl64(-4200, 8, 8));
+ RUN(testSShrShl64(4200000, 8, 8));
+ RUN(testSShrShl64(-4200000, 8, 8));
+ RUN(testSShrShl64(420000000, 8, 8));
+ RUN(testSShrShl64(-420000000, 8, 8));
+ RUN(testSShrShl64(42000000000, 8, 8));
+ RUN(testSShrShl64(-42000000000, 8, 8));
+
+ RUN(testCheckMul64SShr());
+
+ RUN_BINARY(testRotR, int32Operands(), int32Operands());
+ RUN_BINARY(testRotR, int64Operands(), int32Operands());
+ RUN_BINARY(testRotL, int32Operands(), int32Operands());
+ RUN_BINARY(testRotL, int64Operands(), int32Operands());
+
+ RUN_BINARY(testRotRWithImmShift, int32Operands(), int32Operands());
+ RUN_BINARY(testRotRWithImmShift, int64Operands(), int32Operands());
+ RUN_BINARY(testRotLWithImmShift, int32Operands(), int32Operands());
+ RUN_BINARY(testRotLWithImmShift, int64Operands(), int32Operands());
+
+ RUN(testComputeDivisionMagic<int32_t>(2, -2147483647, 0));
+ RUN(testTrivialInfiniteLoop());
+ RUN(testFoldPathEqual());
+
+ RUN(testRShiftSelf32());
+ RUN(testURShiftSelf32());
+ RUN(testLShiftSelf32());
+ RUN(testRShiftSelf64());
+ RUN(testURShiftSelf64());
+ RUN(testLShiftSelf64());
+
+ RUN(testPatchpointDoubleRegs());
+ RUN(testSpillDefSmallerThanUse());
+ RUN(testSpillUseLargerThanDef());
+ RUN(testLateRegister());
+ RUN(testInterpreter());
+ RUN(testReduceStrengthCheckBottomUseInAnotherBlock());
+ RUN(testResetReachabilityDanglingReference());
+
+ RUN(testEntrySwitchSimple());
+ RUN(testEntrySwitchNoEntrySwitch());
+ RUN(testEntrySwitchWithCommonPaths());
+ RUN(testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint());
+ RUN(testEntrySwitchLoop());
+
+ RUN(testSomeEarlyRegister());
+ RUN(testPatchpointTerminalReturnValue(true));
+ RUN(testPatchpointTerminalReturnValue(false));
+ RUN(testTerminalPatchpointThatNeedsToBeSpilled());
+
+ RUN(testMemoryFence());
+ RUN(testStoreFence());
+ RUN(testLoadFence());
+ RUN(testTrappingLoad());
+ RUN(testTrappingStore());
+ RUN(testTrappingLoadAddStore());
+ RUN(testTrappingLoadDCE());
+ RUN(testTrappingStoreElimination());
+ RUN(testMoveConstants());
+ RUN(testPCOriginMapDoesntInsertNops());
+ RUN(testPinRegisters());
+ RUN(testReduceStrengthReassociation(true));
+ RUN(testReduceStrengthReassociation(false));
+ RUN(testAddShl32());
+ RUN(testAddShl64());
+ RUN(testAddShl65());
+ RUN(testLoadBaseIndexShift2());
+ RUN(testLoadBaseIndexShift32());
+ RUN(testOptimizeMaterialization());
+
+ RUN(testWasmBoundsCheck(0));
+ RUN(testWasmBoundsCheck(100));
+ RUN(testWasmBoundsCheck(10000));
+ RUN(testWasmBoundsCheck(std::numeric_limits<unsigned>::max() - 5));
+ RUN(testWasmAddress());
+
+ if (isX86()) {
+ RUN(testBranchBitAndImmFusion(Identity, Int64, 1, Air::BranchTest32, Air::Arg::Tmp));
+ RUN(testBranchBitAndImmFusion(Identity, Int64, 0xff, Air::BranchTest32, Air::Arg::Tmp));
+ RUN(testBranchBitAndImmFusion(Trunc, Int32, 1, Air::BranchTest32, Air::Arg::Tmp));
+ RUN(testBranchBitAndImmFusion(Trunc, Int32, 0xff, Air::BranchTest32, Air::Arg::Tmp));
+ RUN(testBranchBitAndImmFusion(Load8S, Int32, 1, Air::BranchTest8, Air::Arg::Addr));
+ RUN(testBranchBitAndImmFusion(Load8Z, Int32, 1, Air::BranchTest8, Air::Arg::Addr));
+ RUN(testBranchBitAndImmFusion(Load, Int32, 1, Air::BranchTest32, Air::Arg::Addr));
+ RUN(testBranchBitAndImmFusion(Load, Int64, 1, Air::BranchTest32, Air::Arg::Addr));
+ RUN(testX86LeaAddAddShlLeft());
+ RUN(testX86LeaAddAddShlRight());
+ RUN(testX86LeaAddAdd());
+ RUN(testX86LeaAddShlRight());
+ RUN(testX86LeaAddShlLeftScale1());
+ RUN(testX86LeaAddShlLeftScale2());
+ RUN(testX86LeaAddShlLeftScale4());
+ RUN(testX86LeaAddShlLeftScale8());
+ }
+
+ if (isARM64()) {
+ RUN(testTernarySubInstructionSelection(Identity, Int64, Air::Sub64));
+ RUN(testTernarySubInstructionSelection(Trunc, Int32, Air::Sub32));
+ }
+
+ if (tasks.isEmpty())
+ usage();
+
+ Lock lock;
+
+ Vector<ThreadIdentifier> threads;
+ for (unsigned i = filter ? 1 : WTF::numberOfProcessorCores(); i--;) {
+ threads.append(
+ createThread(
+ "testb3 thread",
+ [&] () {
+ for (;;) {
+ RefPtr<SharedTask<void()>> task;
+ {
+ LockHolder locker(lock);
+ if (tasks.isEmpty())
+ return;
+ task = tasks.takeFirst();
+ }
+
+ task->run();
+ }
+ }));
+ }
+
+ for (ThreadIdentifier thread : threads)
+ waitForThreadCompletion(thread);
+ crashLock.lock();
+}
+
+} // anonymous namespace
+
+#else // ENABLE(B3_JIT)
+
+static void run(const char*)
+{
+ dataLog("B3 JIT is not enabled.\n");
+}
+
+#endif // ENABLE(B3_JIT)
+
+int main(int argc, char** argv)
+{
+ const char* filter = nullptr;
+ switch (argc) {
+ case 1:
+ break;
+ case 2:
+ filter = argv[1];
+ break;
+ default:
+ usage();
+ break;
+ }
+
+ run(filter);
+ return 0;
+}
+