summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/s390
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/s390')
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc962
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h2
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc2
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc1446
4 files changed, 1222 insertions, 1190 deletions
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 8e9db3dcb0..f46740c9ae 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -135,26 +135,28 @@ static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
}
-static inline bool HasRegisterInput(Instruction* instr, int index) {
- return instr->InputAt(index)->IsRegister();
-}
-
static inline bool HasFPRegisterInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsFPRegister();
}
-static inline bool HasImmediateInput(Instruction* instr, size_t index) {
- return instr->InputAt(index)->IsImmediate();
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsRegister() ||
+ HasFPRegisterInput(instr, index);
}
-static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
- return instr->InputAt(index)->IsStackSlot();
+static inline bool HasImmediateInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsImmediate();
}
static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsFPStackSlot();
}
+static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsStackSlot() ||
+ HasFPStackSlotInput(instr, index);
+}
+
namespace {
class OutOfLineLoadNAN32 final : public OutOfLineCode {
@@ -307,6 +309,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
case kS390_Add64:
case kS390_Sub32:
case kS390_Sub64:
+ case kS390_Abs64:
+ case kS390_Abs32:
+ case kS390_Mul32:
return overflow;
default:
break;
@@ -318,6 +323,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
case kS390_Add64:
case kS390_Sub32:
case kS390_Sub64:
+ case kS390_Abs64:
+ case kS390_Abs32:
+ case kS390_Mul32:
return nooverflow;
default:
break;
@@ -330,175 +338,182 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
return kNoCondition;
}
-typedef void (MacroAssembler::*RRTypeInstr)(Register, Register);
-typedef void (MacroAssembler::*RMTypeInstr)(Register, const MemOperand&);
-typedef void (MacroAssembler::*RITypeInstr)(Register, const Operand&);
-typedef void (MacroAssembler::*RRRTypeInstr)(Register, Register, Register);
-typedef void (MacroAssembler::*RRMTypeInstr)(Register, Register,
- const MemOperand&);
-typedef void (MacroAssembler::*RRITypeInstr)(Register, Register,
- const Operand&);
-
-#define CHECK_AND_ZERO_EXT_OUTPUT(num) \
- { \
- CHECK(HasImmediateInput(instr, (num))); \
- int doZeroExt = i.InputInt32(num); \
- if (doZeroExt) masm->LoadlW(i.OutputRegister(), i.OutputRegister()); \
- }
-
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRTypeInstr rr_instr,
- RMTypeInstr rm_instr, RITypeInstr ri_instr) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- int zeroExtIndex = 2;
- if (mode != kMode_None) {
- size_t first_index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &first_index);
- zeroExtIndex = first_index;
- CHECK(rm_instr != NULL);
- (masm->*rm_instr)(i.OutputRegister(), operand);
- } else if (HasRegisterInput(instr, 1)) {
- (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
- } else {
- UNREACHABLE();
+#define GET_MEMOPERAND32(ret, fi) \
+ ([&](int& ret) { \
+ AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+ MemOperand mem(r0); \
+ if (mode != kMode_None) { \
+ size_t first_index = (fi); \
+ mem = i.MemoryOperand(&mode, &first_index); \
+ ret = first_index; \
+ } else { \
+ mem = i.InputStackSlot32(fi); \
+ } \
+ return mem; \
+ })(ret)
+
+#define GET_MEMOPERAND(ret, fi) \
+ ([&](int& ret) { \
+ AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+ MemOperand mem(r0); \
+ if (mode != kMode_None) { \
+ size_t first_index = (fi); \
+ mem = i.MemoryOperand(&mode, &first_index); \
+ ret = first_index; \
+ } else { \
+ mem = i.InputStackSlot(fi); \
+ } \
+ return mem; \
+ })(ret)
+
+#define RRInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputRegister().is(i.InputRegister(0))); \
+ __ instr(i.OutputRegister(), i.InputRegister(1)); \
+ return 2; \
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
-}
+#define RIInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputRegister().is(i.InputRegister(0))); \
+ __ instr(i.OutputRegister(), i.InputImmediate(1)); \
+ return 2; \
+ }
+#define RMInstr(instr, GETMEM) \
+ [&]() { \
+ DCHECK(i.OutputRegister().is(i.InputRegister(0))); \
+ int ret = 2; \
+ __ instr(i.OutputRegister(), GETMEM(ret, 1)); \
+ return ret; \
+ }
+#define RM32Instr(instr) RMInstr(instr, GET_MEMOPERAND32)
+#define RM64Instr(instr) RMInstr(instr, GET_MEMOPERAND)
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRRTypeInstr rrr_instr,
- RMTypeInstr rm_instr, RITypeInstr ri_instr) {
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- int zeroExtIndex = 2;
- if (mode != kMode_None) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- size_t first_index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &first_index);
- zeroExtIndex = first_index;
- CHECK(rm_instr != NULL);
- (masm->*rm_instr)(i.OutputRegister(), operand);
- } else if (HasRegisterInput(instr, 1)) {
- (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
- } else {
- UNREACHABLE();
+#define RRRInstr(instr) \
+ [&]() { \
+ __ instr(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); \
+ return 2; \
+ }
+#define RRIInstr(instr) \
+ [&]() { \
+ __ instr(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1)); \
+ return 2; \
+ }
+#define RRMInstr(instr, GETMEM) \
+ [&]() { \
+ int ret = 2; \
+ __ instr(i.OutputRegister(), i.InputRegister(0), GETMEM(ret, 1)); \
+ return ret; \
+ }
+#define RRM32Instr(instr) RRMInstr(instr, GET_MEMOPERAND32)
+#define RRM64Instr(instr) RRMInstr(instr, GET_MEMOPERAND)
+
+#define DDInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputDoubleRegister().is(i.InputDoubleRegister(0))); \
+ __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
+ return 2; \
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
-}
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRRTypeInstr rrr_instr,
- RMTypeInstr rm_instr, RRITypeInstr rri_instr) {
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- int zeroExtIndex = 2;
- if (mode != kMode_None) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- size_t first_index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &first_index);
- zeroExtIndex = first_index;
- CHECK(rm_instr != NULL);
- (masm->*rm_instr)(i.OutputRegister(), operand);
- } else if (HasRegisterInput(instr, 1)) {
- (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
- } else {
- UNREACHABLE();
+#define DMInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputDoubleRegister().is(i.InputDoubleRegister(0))); \
+ int ret = 2; \
+ __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1)); \
+ return ret; \
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
-}
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRRTypeInstr rrr_instr,
- RRMTypeInstr rrm_instr, RRITypeInstr rri_instr) {
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- int zeroExtIndex = 2;
- if (mode != kMode_None) {
- size_t first_index = 1;
- MemOperand operand = i.MemoryOperand(&mode, &first_index);
- zeroExtIndex = first_index;
- CHECK(rrm_instr != NULL);
- (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0), operand);
- } else if (HasRegisterInput(instr, 1)) {
- (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputStackSlot32(1));
- } else {
- UNREACHABLE();
+#define DMTInstr(instr) \
+ [&]() { \
+ DCHECK(i.OutputDoubleRegister().is(i.InputDoubleRegister(0))); \
+ int ret = 2; \
+ __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1), \
+ kScratchDoubleReg); \
+ return ret; \
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
-}
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRRTypeInstr rrr_instr,
- RRITypeInstr rri_instr) {
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- CHECK(mode == kMode_None);
- int zeroExtIndex = 2;
- if (HasRegisterInput(instr, 1)) {
- (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
- i.InputImmediate(1));
- } else {
- UNREACHABLE();
+#define R_MInstr(instr) \
+ [&]() { \
+ int ret = 2; \
+ __ instr(i.OutputRegister(), GET_MEMOPERAND(ret, 0)); \
+ return ret; \
+ }
+
+#define R_DInstr(instr) \
+ [&]() { \
+ __ instr(i.OutputRegister(), i.InputDoubleRegister(0)); \
+ return 2; \
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+
+#define D_DInstr(instr) \
+ [&]() { \
+ __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ return 2; \
+ }
+
+#define D_MInstr(instr) \
+ [&]() { \
+ int ret = 2; \
+ __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0)); \
+ return ret; \
+ }
+
+#define D_MTInstr(instr) \
+ [&]() { \
+ int ret = 2; \
+ __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0), \
+ kScratchDoubleReg); \
+ return ret; \
+ }
+
+static int nullInstr() {
+ UNREACHABLE();
+ return -1;
}
-void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
- Instruction* instr, RRTypeInstr rr_instr,
- RITypeInstr ri_instr) {
+template <int numOfOperand, class RType, class MType, class IType>
+static inline int AssembleOp(Instruction* instr, RType r, MType m, IType i) {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
- CHECK(mode == kMode_None);
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- int zeroExtIndex = 2;
- if (HasRegisterInput(instr, 1)) {
- (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+ if (mode != kMode_None || HasStackSlotInput(instr, numOfOperand - 1)) {
+ return m();
+ } else if (HasRegisterInput(instr, numOfOperand - 1)) {
+ return r();
+ } else if (HasImmediateInput(instr, numOfOperand - 1)) {
+ return i();
} else {
UNREACHABLE();
+ return -1;
}
- CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
}
-#define ASSEMBLE_BIN_OP(instr1, instr2, instr3) \
- AssembleBinOp(i, masm(), instr, &MacroAssembler::instr1, \
- &MacroAssembler::instr2, &MacroAssembler::instr3)
+template <class _RR, class _RM, class _RI>
+static inline int AssembleBinOp(Instruction* instr, _RR _rr, _RM _rm, _RI _ri) {
+ return AssembleOp<2>(instr, _rr, _rm, _ri);
+}
-#undef CHECK_AND_ZERO_EXT_OUTPUT
+template <class _R, class _M, class _I>
+static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
+ return AssembleOp<1>(instr, _r, _m, _i);
+}
-} // namespace
+#define ASSEMBLE_BIN_OP(_rr, _rm, _ri) AssembleBinOp(instr, _rr, _rm, _ri)
+#define ASSEMBLE_UNARY_OP(_r, _m, _i) AssembleUnaryOp(instr, _r, _m, _i)
+#ifdef V8_TARGET_ARCH_S390X
#define CHECK_AND_ZERO_EXT_OUTPUT(num) \
- { \
- CHECK(HasImmediateInput(instr, (num))); \
- int doZeroExt = i.InputInt32(num); \
+ ([&](int index) { \
+ DCHECK(HasImmediateInput(instr, (index))); \
+ int doZeroExt = i.InputInt32(index); \
if (doZeroExt) __ LoadlW(i.OutputRegister(), i.OutputRegister()); \
- }
+ })(num)
+
+#define ASSEMBLE_BIN32_OP(_rr, _rm, _ri) \
+ { CHECK_AND_ZERO_EXT_OUTPUT(AssembleBinOp(instr, _rr, _rm, _ri)); }
+#else
+#define ASSEMBLE_BIN32_OP ASSEMBLE_BIN_OP
+#define CHECK_AND_ZERO_EXT_OUTPUT(num)
+#endif
+
+} // namespace
#define ASSEMBLE_FLOAT_UNOP(asm_instr) \
do { \
@@ -511,19 +526,6 @@ void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
i.InputDoubleRegister(1)); \
} while (0)
-#define ASSEMBLE_BINOP(asm_instr) \
- do { \
- if (HasRegisterInput(instr, 1)) { \
- __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
- i.InputRegister(1)); \
- } else if (HasImmediateInput(instr, 1)) { \
- __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
- i.InputImmediate(1)); \
- } else { \
- UNIMPLEMENTED(); \
- } \
- } while (0)
-
#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
do { \
AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
@@ -1351,78 +1353,91 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()));
break;
}
+ case kS390_Abs32:
+ // TODO(john.yan): zero-ext
+ __ lpr(i.OutputRegister(0), i.InputRegister(0));
+ break;
+ case kS390_Abs64:
+ __ lpgr(i.OutputRegister(0), i.InputRegister(0));
+ break;
case kS390_And32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- ASSEMBLE_BIN_OP(nrk, And, nilf);
+ ASSEMBLE_BIN32_OP(RRRInstr(nrk), RM32Instr(And), RIInstr(nilf));
} else {
- ASSEMBLE_BIN_OP(nr, And, nilf);
+ ASSEMBLE_BIN32_OP(RRInstr(nr), RM32Instr(And), RIInstr(nilf));
}
break;
case kS390_And64:
- ASSEMBLE_BINOP(AndP);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(RRRInstr(ngrk), RM64Instr(ng), nullInstr);
+ } else {
+ ASSEMBLE_BIN_OP(RRInstr(ngr), RM64Instr(ng), nullInstr);
+ }
break;
case kS390_Or32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- ASSEMBLE_BIN_OP(ork, Or, oilf);
+ ASSEMBLE_BIN32_OP(RRRInstr(ork), RM32Instr(Or), RIInstr(oilf));
} else {
- ASSEMBLE_BIN_OP(or_z, Or, oilf);
+ ASSEMBLE_BIN32_OP(RRInstr(or_z), RM32Instr(Or), RIInstr(oilf));
}
break;
case kS390_Or64:
- ASSEMBLE_BINOP(OrP);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(RRRInstr(ogrk), RM64Instr(og), nullInstr);
+ } else {
+ ASSEMBLE_BIN_OP(RRInstr(ogr), RM64Instr(og), nullInstr);
+ }
break;
case kS390_Xor32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- ASSEMBLE_BIN_OP(xrk, Xor, xilf);
+ ASSEMBLE_BIN32_OP(RRRInstr(xrk), RM32Instr(Xor), RIInstr(xilf));
} else {
- ASSEMBLE_BIN_OP(xr, Xor, xilf);
+ ASSEMBLE_BIN32_OP(RRInstr(xr), RM32Instr(Xor), RIInstr(xilf));
}
break;
case kS390_Xor64:
- ASSEMBLE_BINOP(XorP);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(RRRInstr(xgrk), RM64Instr(xg), nullInstr);
+ } else {
+ ASSEMBLE_BIN_OP(RRInstr(xgr), RM64Instr(xg), nullInstr);
+ }
break;
case kS390_ShiftLeft32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::ShiftLeft,
- &MacroAssembler::ShiftLeft);
+ ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeft), nullInstr, RRIInstr(ShiftLeft));
} else {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::sll,
- &MacroAssembler::sll);
+ ASSEMBLE_BIN32_OP(RRInstr(sll), nullInstr, RIInstr(sll));
}
break;
-#if V8_TARGET_ARCH_S390X
case kS390_ShiftLeft64:
- ASSEMBLE_BINOP(sllg);
+ ASSEMBLE_BIN_OP(RRRInstr(sllg), nullInstr, RRIInstr(sllg));
break;
-#endif
case kS390_ShiftRight32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::srlk,
- &MacroAssembler::srlk);
+ ASSEMBLE_BIN32_OP(RRRInstr(srlk), nullInstr, RRIInstr(srlk));
} else {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::srl,
- &MacroAssembler::srl);
+ ASSEMBLE_BIN32_OP(RRInstr(srl), nullInstr, RIInstr(srl));
}
break;
-#if V8_TARGET_ARCH_S390X
case kS390_ShiftRight64:
- ASSEMBLE_BINOP(srlg);
+ ASSEMBLE_BIN_OP(RRRInstr(srlg), nullInstr, RRIInstr(srlg));
break;
-#endif
case kS390_ShiftRightArith32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::srak,
- &MacroAssembler::srak);
+ ASSEMBLE_BIN32_OP(RRRInstr(srak), nullInstr, RRIInstr(srak));
} else {
- AssembleBinOp(i, masm(), instr, &MacroAssembler::sra,
- &MacroAssembler::sra);
+ ASSEMBLE_BIN32_OP(RRInstr(sra), nullInstr, RIInstr(sra));
}
break;
-#if V8_TARGET_ARCH_S390X
case kS390_ShiftRightArith64:
- ASSEMBLE_BINOP(srag);
+ ASSEMBLE_BIN_OP(RRRInstr(srag), nullInstr, RRIInstr(srag));
break;
-#endif
#if !V8_TARGET_ARCH_S390X
case kS390_AddPair:
// i.InputRegister(0) ... left low word.
@@ -1499,6 +1514,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#endif
case kS390_RotRight32: {
+ // zero-ext
if (HasRegisterInput(instr, 1)) {
__ LoadComplementRR(kScratchReg, i.InputRegister(1));
__ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
@@ -1509,16 +1525,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK_AND_ZERO_EXT_OUTPUT(2);
break;
}
-#if V8_TARGET_ARCH_S390X
case kS390_RotRight64:
if (HasRegisterInput(instr, 1)) {
- __ LoadComplementRR(kScratchReg, i.InputRegister(1));
+ __ lcgr(kScratchReg, i.InputRegister(1));
__ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else {
+ DCHECK(HasImmediateInput(instr, 1));
__ rllg(i.OutputRegister(), i.InputRegister(0),
Operand(64 - i.InputInt32(1)));
}
break;
+ // TODO(john.yan): clean up kS390_RotLeftAnd...
case kS390_RotLeftAndClear64:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = i.InputInt32(1);
@@ -1566,191 +1583,126 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
}
break;
-#endif
case kS390_Add32: {
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- ASSEMBLE_BIN_OP(ark, Add32, Add32_RRI);
+ ASSEMBLE_BIN32_OP(RRRInstr(ark), RM32Instr(Add32), RRIInstr(Add32));
} else {
- ASSEMBLE_BIN_OP(ar, Add32, Add32_RI);
+ ASSEMBLE_BIN32_OP(RRInstr(ar), RM32Instr(Add32), RIInstr(Add32));
}
break;
}
case kS390_Add64:
- ASSEMBLE_BINOP(AddP);
- break;
- case kS390_AddFloat:
- // Ensure we don't clobber right/InputReg(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- ASSEMBLE_FLOAT_UNOP(aebr);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(RRRInstr(agrk), RM64Instr(ag), RRIInstr(AddP));
} else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ aebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ ASSEMBLE_BIN_OP(RRInstr(agr), RM64Instr(ag), RIInstr(agfi));
}
break;
+ case kS390_AddFloat:
+ ASSEMBLE_BIN_OP(DDInstr(aebr), DMTInstr(AddFloat32), nullInstr);
+ break;
case kS390_AddDouble:
- // Ensure we don't clobber right/InputReg(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- ASSEMBLE_FLOAT_UNOP(adbr);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(adbr), DMTInstr(AddFloat64), nullInstr);
break;
case kS390_Sub32:
+ // zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
- ASSEMBLE_BIN_OP(srk, Sub32, Sub32_RRI);
+ ASSEMBLE_BIN32_OP(RRRInstr(srk), RM32Instr(Sub32), RRIInstr(Sub32));
} else {
- ASSEMBLE_BIN_OP(sr, Sub32, Sub32_RI);
+ ASSEMBLE_BIN32_OP(RRInstr(sr), RM32Instr(Sub32), RIInstr(Sub32));
}
break;
case kS390_Sub64:
- ASSEMBLE_BINOP(SubP);
- break;
- case kS390_SubFloat:
- // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ sebr(i.OutputDoubleRegister(), kScratchDoubleReg);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ ASSEMBLE_BIN_OP(RRRInstr(sgrk), RM64Instr(sg), RRIInstr(SubP));
} else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- }
- __ sebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ ASSEMBLE_BIN_OP(RRInstr(sgr), RM64Instr(sg), RIInstr(SubP));
}
break;
+ case kS390_SubFloat:
+ ASSEMBLE_BIN_OP(DDInstr(sebr), DMTInstr(SubFloat32), nullInstr);
+ break;
case kS390_SubDouble:
- // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ sdbr(i.OutputDoubleRegister(), kScratchDoubleReg);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- }
- __ sdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(sdbr), DMTInstr(SubFloat64), nullInstr);
break;
case kS390_Mul32:
- ASSEMBLE_BIN_OP(Mul32, Mul32, Mul32);
+ // zero-ext
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ ASSEMBLE_BIN32_OP(RRRInstr(msrkc), RM32Instr(msc), RIInstr(Mul32));
+ } else {
+ ASSEMBLE_BIN32_OP(RRInstr(Mul32), RM32Instr(Mul32), RIInstr(Mul32));
+ }
break;
case kS390_Mul32WithOverflow:
- ASSEMBLE_BIN_OP(Mul32WithOverflowIfCCUnequal,
- Mul32WithOverflowIfCCUnequal,
- Mul32WithOverflowIfCCUnequal);
+ // zero-ext
+ ASSEMBLE_BIN32_OP(RRRInstr(Mul32WithOverflowIfCCUnequal),
+ RRM32Instr(Mul32WithOverflowIfCCUnequal),
+ RRIInstr(Mul32WithOverflowIfCCUnequal));
break;
case kS390_Mul64:
- CHECK(i.OutputRegister().is(i.InputRegister(0)));
- if (HasRegisterInput(instr, 1)) {
- __ Mul64(i.InputRegister(0), i.InputRegister(1));
- } else if (HasImmediateInput(instr, 1)) {
- __ Mul64(i.InputRegister(0), i.InputImmediate(1));
- } else if (HasStackSlotInput(instr, 1)) {
- __ Mul64(i.InputRegister(0), i.InputStackSlot(1));
- } else {
- UNIMPLEMENTED();
- }
+ ASSEMBLE_BIN_OP(RRInstr(Mul64), RM64Instr(Mul64), RIInstr(Mul64));
break;
case kS390_MulHigh32:
- ASSEMBLE_BIN_OP(MulHigh32, MulHigh32, MulHigh32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(MulHigh32), RRM32Instr(MulHigh32),
+ RRIInstr(MulHigh32));
break;
case kS390_MulHighU32:
- ASSEMBLE_BIN_OP(MulHighU32, MulHighU32, MulHighU32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(MulHighU32), RRM32Instr(MulHighU32),
+ RRIInstr(MulHighU32));
break;
case kS390_MulFloat:
- // Ensure we don't clobber right
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- ASSEMBLE_FLOAT_UNOP(meebr);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ meebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(meebr), DMTInstr(MulFloat32), nullInstr);
break;
case kS390_MulDouble:
- // Ensure we don't clobber right
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- ASSEMBLE_FLOAT_UNOP(mdbr);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ mdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(mdbr), DMTInstr(MulFloat64), nullInstr);
break;
-#if V8_TARGET_ARCH_S390X
case kS390_Div64:
- __ LoadRR(r1, i.InputRegister(0));
- __ dsgr(r0, i.InputRegister(1)); // R1: Dividend
- __ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
+ ASSEMBLE_BIN_OP(RRRInstr(Div64), RRM64Instr(Div64), nullInstr);
break;
-#endif
case kS390_Div32: {
- ASSEMBLE_BIN_OP(Div32, Div32, Div32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(Div32), RRM32Instr(Div32), nullInstr);
break;
}
-#if V8_TARGET_ARCH_S390X
case kS390_DivU64:
- __ LoadRR(r1, i.InputRegister(0));
- __ LoadImmP(r0, Operand::Zero());
- __ dlgr(r0, i.InputRegister(1)); // R0:R1: Dividend
- __ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
+ ASSEMBLE_BIN_OP(RRRInstr(DivU64), RRM64Instr(DivU64), nullInstr);
break;
-#endif
case kS390_DivU32: {
- ASSEMBLE_BIN_OP(DivU32, DivU32, DivU32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(DivU32), RRM32Instr(DivU32), nullInstr);
break;
}
case kS390_DivFloat:
- // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ debr(i.OutputDoubleRegister(), kScratchDoubleReg);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ debr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(debr), DMTInstr(DivFloat32), nullInstr);
break;
case kS390_DivDouble:
- // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
- if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
- __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ ddbr(i.OutputDoubleRegister(), kScratchDoubleReg);
- } else {
- if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
- __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ ddbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ ASSEMBLE_BIN_OP(DDInstr(ddbr), DMTInstr(DivFloat64), nullInstr);
break;
case kS390_Mod32:
- ASSEMBLE_BIN_OP(Mod32, Mod32, Mod32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(Mod32), RRM32Instr(Mod32), nullInstr);
break;
case kS390_ModU32:
- ASSEMBLE_BIN_OP(ModU32, ModU32, ModU32);
+ // zero-ext
+ ASSEMBLE_BIN_OP(RRRInstr(ModU32), RRM32Instr(ModU32), nullInstr);
break;
-#if V8_TARGET_ARCH_S390X
case kS390_Mod64:
- __ LoadRR(r1, i.InputRegister(0));
- __ dsgr(r0, i.InputRegister(1)); // R1: Dividend
- __ ltgr(i.OutputRegister(), r0); // Copy R0: Remainder to output
+ ASSEMBLE_BIN_OP(RRRInstr(Mod64), RRM64Instr(Mod64), nullInstr);
break;
case kS390_ModU64:
- __ LoadRR(r1, i.InputRegister(0));
- __ LoadImmP(r0, Operand::Zero());
- __ dlgr(r0, i.InputRegister(1)); // R0:R1: Dividend
- __ ltgr(i.OutputRegister(), r0); // Copy R0: Remainder to output
+ ASSEMBLE_BIN_OP(RRRInstr(ModU64), RRM64Instr(ModU64), nullInstr);
break;
-#endif
case kS390_AbsFloat:
__ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_SqrtFloat:
- ASSEMBLE_FLOAT_UNOP(sqebr);
+ ASSEMBLE_UNARY_OP(D_DInstr(sqebr), nullInstr, nullInstr);
+ break;
+ case kS390_SqrtDouble:
+ ASSEMBLE_UNARY_OP(D_DInstr(sqdbr), nullInstr, nullInstr);
break;
case kS390_FloorFloat:
__ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1856,9 +1808,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_AbsDouble:
__ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kS390_SqrtDouble:
- ASSEMBLE_FLOAT_UNOP(sqdbr);
- break;
case kS390_FloorDouble:
__ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
@@ -1876,10 +1825,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
break;
case kS390_NegFloat:
- ASSEMBLE_FLOAT_UNOP(lcebr);
+ ASSEMBLE_UNARY_OP(D_DInstr(lcebr), nullInstr, nullInstr);
break;
case kS390_NegDouble:
- ASSEMBLE_FLOAT_UNOP(lcdbr);
+ ASSEMBLE_UNARY_OP(D_DInstr(lcdbr), nullInstr, nullInstr);
break;
case kS390_Cntlz32: {
__ llgfr(i.OutputRegister(), i.InputRegister(0));
@@ -1923,6 +1872,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 1)) {
__ And(r0, i.InputRegister(0), i.InputRegister(1));
} else {
+ // detect tmlh/tmhl/tmhh case
Operand opnd = i.InputImmediate(1);
if (is_uint16(opnd.immediate())) {
__ tmll(i.InputRegister(0), opnd);
@@ -2002,7 +1952,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lhr(i.OutputRegister(), i.InputRegister(0));
CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
-#if V8_TARGET_ARCH_S390X
case kS390_ExtendSignWord32:
__ lgfr(i.OutputRegister(), i.InputRegister(0));
break;
@@ -2014,140 +1963,139 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// sign extend
__ lgfr(i.OutputRegister(), i.InputRegister(0));
break;
+ // Convert Fixed to Floating Point
case kS390_Int64ToFloat32:
- __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+ __ ConvertInt64ToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
break;
case kS390_Int64ToDouble:
- __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ __ ConvertInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
break;
case kS390_Uint64ToFloat32:
- __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
- i.OutputDoubleRegister());
+ __ ConvertUnsignedInt64ToFloat(i.OutputDoubleRegister(),
+ i.InputRegister(0));
break;
case kS390_Uint64ToDouble:
- __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
- i.OutputDoubleRegister());
+ __ ConvertUnsignedInt64ToDouble(i.OutputDoubleRegister(),
+ i.InputRegister(0));
break;
-#endif
case kS390_Int32ToFloat32:
- __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+ __ ConvertIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
break;
case kS390_Int32ToDouble:
- __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ __ ConvertIntToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
break;
case kS390_Uint32ToFloat32:
- __ ConvertUnsignedIntToFloat(i.InputRegister(0),
- i.OutputDoubleRegister());
+ __ ConvertUnsignedIntToFloat(i.OutputDoubleRegister(),
+ i.InputRegister(0));
break;
case kS390_Uint32ToDouble:
- __ ConvertUnsignedIntToDouble(i.InputRegister(0),
- i.OutputDoubleRegister());
+ __ ConvertUnsignedIntToDouble(i.OutputDoubleRegister(),
+ i.InputRegister(0));
+ break;
+ case kS390_DoubleToInt32: {
+ Label done;
+ __ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
+ kRoundToNearest);
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ __ bind(&done);
+ break;
+ }
+ case kS390_DoubleToUint32: {
+ Label done;
+ __ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
+ i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ __ bind(&done);
break;
- case kS390_DoubleToInt32:
- case kS390_DoubleToUint32:
+ }
case kS390_DoubleToInt64: {
-#if V8_TARGET_ARCH_S390X
- bool check_conversion =
- (opcode == kS390_DoubleToInt64 && i.OutputCount() > 1);
-#endif
- __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
-#if !V8_TARGET_ARCH_S390X
- kScratchReg,
-#endif
- i.OutputRegister(0), kScratchDoubleReg);
-#if V8_TARGET_ARCH_S390X
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
+ Label done;
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand(1));
}
-#endif
+ __ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand::Zero());
+ } else {
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ }
+ __ bind(&done);
break;
}
- case kS390_Float32ToInt32: {
- bool check_conversion = (i.OutputCount() > 1);
- __ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
- kScratchDoubleReg, kRoundToZero);
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
+ case kS390_DoubleToUint64: {
+ Label done;
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand(1));
}
+ __ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
+ i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand::Zero());
+ } else {
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ }
+ __ bind(&done);
+ break;
+ }
+ case kS390_Float32ToInt32: {
+ Label done;
+ __ ConvertFloat32ToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
+ kRoundToZero);
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ __ bind(&done);
break;
}
case kS390_Float32ToUint32: {
- bool check_conversion = (i.OutputCount() > 1);
- __ ConvertFloat32ToUnsignedInt32(i.InputDoubleRegister(0),
- i.OutputRegister(0), kScratchDoubleReg);
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
- }
+ Label done;
+ __ ConvertFloat32ToUnsignedInt32(i.OutputRegister(0),
+ i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ lghi(i.OutputRegister(0), Operand::Zero());
+ __ bind(&done);
break;
}
-#if V8_TARGET_ARCH_S390X
case kS390_Float32ToUint64: {
- bool check_conversion = (i.OutputCount() > 1);
- __ ConvertFloat32ToUnsignedInt64(i.InputDoubleRegister(0),
- i.OutputRegister(0), kScratchDoubleReg);
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
+ Label done;
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand(1));
+ }
+ __ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
+ i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand::Zero());
+ } else {
+ __ lghi(i.OutputRegister(0), Operand::Zero());
}
+ __ bind(&done);
break;
}
-#endif
case kS390_Float32ToInt64: {
-#if V8_TARGET_ARCH_S390X
- bool check_conversion =
- (opcode == kS390_Float32ToInt64 && i.OutputCount() > 1);
-#endif
- __ ConvertFloat32ToInt64(i.InputDoubleRegister(0),
-#if !V8_TARGET_ARCH_S390X
- kScratchReg,
-#endif
- i.OutputRegister(0), kScratchDoubleReg);
-#if V8_TARGET_ARCH_S390X
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
+ Label done;
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand(1));
}
-#endif
- break;
- }
-#if V8_TARGET_ARCH_S390X
- case kS390_DoubleToUint64: {
- bool check_conversion = (i.OutputCount() > 1);
- __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
- i.OutputRegister(0), kScratchDoubleReg);
- if (check_conversion) {
- Label conversion_done;
- __ LoadImmP(i.OutputRegister(1), Operand::Zero());
- __ b(Condition(1), &conversion_done); // special case
- __ LoadImmP(i.OutputRegister(1), Operand(1));
- __ bind(&conversion_done);
+ __ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
+ __ b(Condition(0xe), &done, Label::kNear); // normal case
+ if (i.OutputCount() > 1) {
+ __ lghi(i.OutputRegister(1), Operand::Zero());
+ } else {
+ __ lghi(i.OutputRegister(0), Operand::Zero());
}
+ __ bind(&done);
break;
}
-#endif
case kS390_DoubleToFloat32:
- __ ledbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ ASSEMBLE_UNARY_OP(D_DInstr(ledbr), nullInstr, nullInstr);
break;
case kS390_Float32ToDouble:
- __ ldebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ ASSEMBLE_UNARY_OP(D_DInstr(ldebr), D_MTInstr(LoadFloat32ToDouble),
+ nullInstr);
break;
case kS390_DoubleExtractLowWord32:
__ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -2158,13 +2106,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
break;
case kS390_DoubleInsertLowWord32:
- __ lgdr(kScratchReg, i.OutputDoubleRegister());
+ __ lgdr(kScratchReg, i.InputDoubleRegister(0));
__ lr(kScratchReg, i.InputRegister(1));
__ ldgr(i.OutputDoubleRegister(), kScratchReg);
break;
case kS390_DoubleInsertHighWord32:
__ sllg(kScratchReg, i.InputRegister(1), Operand(32));
- __ lgdr(r0, i.OutputDoubleRegister());
+ __ lgdr(r0, i.InputDoubleRegister(0));
__ lr(kScratchReg, r0);
__ ldgr(i.OutputDoubleRegister(), kScratchReg);
break;
@@ -2176,15 +2124,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ldgr(i.OutputDoubleRegister(), kScratchReg);
break;
case kS390_LoadWordS8:
- ASSEMBLE_LOAD_INTEGER(LoadlB);
-#if V8_TARGET_ARCH_S390X
- __ lgbr(i.OutputRegister(), i.OutputRegister());
-#else
- __ lbr(i.OutputRegister(), i.OutputRegister());
-#endif
+ ASSEMBLE_LOAD_INTEGER(LoadB);
break;
case kS390_BitcastFloat32ToInt32:
- __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadlW), nullInstr);
break;
case kS390_BitcastInt32ToFloat32:
__ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
@@ -2231,11 +2174,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_LoadReverse64RR:
__ lrvgr(i.OutputRegister(), i.InputRegister(0));
break;
-#if V8_TARGET_ARCH_S390X
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
break;
-#endif
case kS390_LoadAndTestWord32: {
ASSEMBLE_LOADANDTEST32(ltr, lt_z);
break;
@@ -2283,12 +2224,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lay(i.OutputRegister(), i.MemoryOperand());
break;
case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
-#if V8_TARGET_ARCH_S390X
- __ lgbr(i.OutputRegister(), i.OutputRegister());
-#else
- __ lbr(i.OutputRegister(), i.OutputRegister());
-#endif
+ ASSEMBLE_CHECKED_LOAD_INTEGER(LoadB);
break;
case kCheckedLoadUint8:
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
@@ -2361,6 +2297,138 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
__ StoreW(i.InputRegister(0), i.MemoryOperand(NULL, 1));
break;
+// 0x aa bb cc dd
+// index = 3..2..1..0
+#define ATOMIC_EXCHANGE(start, end, shift_amount, offset) \
+ { \
+ Label do_cs; \
+ __ LoadlW(output, MemOperand(r1)); \
+ __ bind(&do_cs); \
+ __ llgfr(r0, output); \
+ __ risbg(r0, value, Operand(start), Operand(end), Operand(shift_amount), \
+ false); \
+ __ csy(output, r0, MemOperand(r1, offset)); \
+ __ bne(&do_cs, Label::kNear); \
+ __ srl(output, Operand(shift_amount)); \
+ }
+#ifdef V8_TARGET_BIG_ENDIAN
+#define ATOMIC_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 8 * idx; \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = (3 - idx) * 8; \
+ ATOMIC_EXCHANGE(start, end, shift_amount, -idx); \
+ }
+#define ATOMIC_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 16 * idx; \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = (1 - idx) * 16; \
+ ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
+ }
+#else
+#define ATOMIC_EXCHANGE_BYTE(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 8 * (3 - idx); \
+ constexpr int end = start + 7; \
+ constexpr int shift_amount = idx * 8; \
+ ATOMIC_EXCHANGE(start, end, shift_amount, -idx); \
+ }
+#define ATOMIC_EXCHANGE_HALFWORD(i) \
+ { \
+ constexpr int idx = (i); \
+ static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
+ constexpr int start = 32 + 16 * (1 - idx); \
+ constexpr int end = start + 15; \
+ constexpr int shift_amount = idx * 16; \
+ ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
+ }
+#endif
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8: {
+ Register base = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register output = i.OutputRegister();
+ Label three, two, one, done;
+ __ la(r1, MemOperand(base, index));
+ __ tmll(r1, Operand(3));
+ __ b(Condition(1), &three);
+ __ b(Condition(2), &two);
+ __ b(Condition(4), &one);
+
+ // end with 0b00
+ ATOMIC_EXCHANGE_BYTE(0);
+ __ b(&done);
+
+ // ending with 0b01
+ __ bind(&one);
+ ATOMIC_EXCHANGE_BYTE(1);
+ __ b(&done);
+
+ // ending with 0b10
+ __ bind(&two);
+ ATOMIC_EXCHANGE_BYTE(2);
+ __ b(&done);
+
+ // ending with 0b11
+ __ bind(&three);
+ ATOMIC_EXCHANGE_BYTE(3);
+
+ __ bind(&done);
+ if (opcode == kAtomicExchangeInt8) {
+ __ lbr(output, output);
+ } else {
+ __ llcr(output, output);
+ }
+ break;
+ }
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16: {
+ Register base = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register output = i.OutputRegister();
+ Label two, unaligned, done;
+ __ la(r1, MemOperand(base, index));
+ __ tmll(r1, Operand(3));
+ __ b(Condition(2), &two);
+
+ // end with 0b00
+ ATOMIC_EXCHANGE_HALFWORD(0);
+ __ b(&done);
+
+ // ending with 0b10
+ __ bind(&two);
+ ATOMIC_EXCHANGE_HALFWORD(1);
+
+ __ bind(&done);
+ if (opcode == kAtomicExchangeInt8) {
+ __ lhr(output, output);
+ } else {
+ __ llhr(output, output);
+ }
+ break;
+ }
+ case kAtomicExchangeWord32: {
+ Register base = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register output = i.OutputRegister();
+ Label do_cs;
+ __ lay(r1, MemOperand(base, index));
+ __ LoadlW(output, MemOperand(r1));
+ __ bind(&do_cs);
+ __ cs(output, value, MemOperand(r1));
+ __ bne(&do_cs, Label::kNear);
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -2481,8 +2549,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// Overflow checked for add/sub only.
DCHECK((condition != kOverflow && condition != kNotOverflow) ||
- (op == kS390_Add32 || kS390_Add64 || op == kS390_Sub32 ||
- op == kS390_Sub64));
+ (op == kS390_Add32 || op == kS390_Add64 || op == kS390_Sub32 ||
+ op == kS390_Sub64 || op == kS390_Mul32));
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
@@ -2495,6 +2563,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
: Operand(1));
__ bunordered(&done);
}
+
+ // TODO(john.yan): use load imm high on condition here
__ LoadImmP(reg, Operand::Zero());
__ LoadImmP(kScratchReg, Operand(1));
// locr is sufficient since reg's upper 32 is guarrantee to be 0
@@ -2543,7 +2613,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
+ }
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2669,6 +2741,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
+void CodeGenerator::FinishCode() {}
+
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
S390OperandConverter g(this, nullptr);
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
index b99e79f68b..d415de6587 100644
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -12,6 +12,8 @@ namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(S390_Abs32) \
+ V(S390_Abs64) \
V(S390_And32) \
V(S390_And64) \
V(S390_Or32) \
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index d6ec3deaab..352e63af07 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -13,6 +13,8 @@ bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
+ case kS390_Abs32:
+ case kS390_Abs64:
case kS390_And32:
case kS390_And64:
case kS390_Or32:
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index e591d3caeb..228ec3c0d5 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -43,20 +43,28 @@ OperandModes immediateModeMask =
OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
OperandMode::kUint32Imm | OperandMode::kInt20Imm;
-#define AndOperandMode \
- ((OperandMode::kBitWiseCommonMode | OperandMode::kUint32Imm | \
- OperandMode::kAllowRM | (CpuFeatures::IsSupported(DISTINCT_OPS) \
- ? OperandMode::kAllowRRR \
- : OperandMode::kBitWiseCommonMode)))
-
-#define OrOperandMode AndOperandMode
-#define XorOperandMode AndOperandMode
-
-#define ShiftOperandMode \
- ((OperandMode::kBitWiseCommonMode | OperandMode::kShift64Imm | \
- (CpuFeatures::IsSupported(DISTINCT_OPS) \
- ? OperandMode::kAllowRRR \
- : OperandMode::kBitWiseCommonMode)))
+#define AndCommonMode \
+ ((OperandMode::kAllowRM | \
+ (CpuFeatures::IsSupported(DISTINCT_OPS) ? OperandMode::kAllowRRR \
+ : OperandMode::kNone)))
+#define And64OperandMode AndCommonMode
+#define Or64OperandMode And64OperandMode
+#define Xor64OperandMode And64OperandMode
+
+#define And32OperandMode \
+ (AndCommonMode | OperandMode::kAllowRI | OperandMode::kUint32Imm)
+#define Or32OperandMode And32OperandMode
+#define Xor32OperandMode And32OperandMode
+
+#define Shift32OperandMode \
+ ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
+ (CpuFeatures::IsSupported(DISTINCT_OPS) \
+ ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
+ : OperandMode::kNone)))
+
+#define Shift64OperandMode \
+ ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
+ OperandMode::kAllowRRR | OperandMode::kAllowRRI))
#define AddOperandMode \
((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
@@ -241,8 +249,11 @@ namespace {
bool S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode) {
switch (opcode) {
+ case kS390_AddFloat:
+ case kS390_AddDouble:
case kS390_CmpFloat:
case kS390_CmpDouble:
+ case kS390_Float32ToDouble:
return true;
default:
return false;
@@ -306,49 +317,51 @@ ArchOpcode SelectLoadOpcode(Node* node) {
return opcode;
}
-bool AutoZeroExtendsWord32ToWord64(Node* node) {
-#if !V8_TARGET_ARCH_S390X
- return true;
-#else
- switch (node->opcode()) {
- case IrOpcode::kInt32Div:
- case IrOpcode::kUint32Div:
- case IrOpcode::kInt32MulHigh:
- case IrOpcode::kUint32MulHigh:
- case IrOpcode::kInt32Mod:
- case IrOpcode::kUint32Mod:
- case IrOpcode::kWord32Clz:
- case IrOpcode::kWord32Popcnt:
- return true;
- default:
- return false;
- }
- return false;
-#endif
-}
-
-bool ZeroExtendsWord32ToWord64(Node* node) {
+#define RESULT_IS_WORD32_LIST(V) \
+ /* Float unary op*/ \
+ V(BitcastFloat32ToInt32) \
+ /* V(TruncateFloat64ToWord32) */ \
+ /* V(RoundFloat64ToInt32) */ \
+ /* V(TruncateFloat32ToInt32) */ \
+ /* V(TruncateFloat32ToUint32) */ \
+ /* V(TruncateFloat64ToUint32) */ \
+ /* V(ChangeFloat64ToInt32) */ \
+ /* V(ChangeFloat64ToUint32) */ \
+ /* Word32 unary op */ \
+ V(Word32Clz) \
+ V(Word32Popcnt) \
+ V(Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32) \
+ /* Word32 bin op */ \
+ V(Int32Add) \
+ V(Int32Sub) \
+ V(Int32Mul) \
+ V(Int32AddWithOverflow) \
+ V(Int32SubWithOverflow) \
+ V(Int32MulWithOverflow) \
+ V(Int32MulHigh) \
+ V(Uint32MulHigh) \
+ V(Int32Div) \
+ V(Uint32Div) \
+ V(Int32Mod) \
+ V(Uint32Mod) \
+ V(Word32Ror) \
+ V(Word32And) \
+ V(Word32Or) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar)
+
+bool ProduceWord32Result(Node* node) {
#if !V8_TARGET_ARCH_S390X
return true;
#else
switch (node->opcode()) {
- case IrOpcode::kInt32Add:
- case IrOpcode::kInt32Sub:
- case IrOpcode::kWord32And:
- case IrOpcode::kWord32Or:
- case IrOpcode::kWord32Xor:
- case IrOpcode::kWord32Shl:
- case IrOpcode::kWord32Shr:
- case IrOpcode::kWord32Sar:
- case IrOpcode::kInt32Mul:
- case IrOpcode::kWord32Ror:
- case IrOpcode::kInt32Div:
- case IrOpcode::kUint32Div:
- case IrOpcode::kInt32MulHigh:
- case IrOpcode::kInt32Mod:
- case IrOpcode::kUint32Mod:
- case IrOpcode::kWord32Popcnt:
- return true;
+#define VISITOR(name) case IrOpcode::k##name:
+ RESULT_IS_WORD32_LIST(VISITOR)
+#undef VISITOR
+ return true;
// TODO(john.yan): consider the following case to be valid
// case IrOpcode::kWord32Equal:
// case IrOpcode::kInt32LessThan:
@@ -376,6 +389,11 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
switch (load_rep.representation()) {
case MachineRepresentation::kWord32:
return true;
+ case MachineRepresentation::kWord8:
+ if (load_rep.IsSigned())
+ return false;
+ else
+ return true;
default:
return false;
}
@@ -386,28 +404,20 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
#endif
}
-void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
- S390OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+static inline bool DoZeroExtForResult(Node* node) {
+#if V8_TARGET_ARCH_S390X
+ return ProduceWord32Result(node);
+#else
+ return false;
+#endif
}
-void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
- S390OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
-}
+// TODO(john.yan): Create VisiteShift to match dst = src shift (R+I)
+#if 0
+void VisitShift() { }
+#endif
#if V8_TARGET_ARCH_S390X
-void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
- OperandModes operand_mode) {
- S390OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)),
- g.UseOperand(node->InputAt(1), operand_mode));
-}
-
void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
S390OperandGenerator g(selector);
@@ -425,42 +435,153 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
}
#endif
-// Shared routine for multiple binary operations.
-template <typename Matcher>
-void VisitBinop(InstructionSelector* selector, Node* node,
+template <class CanCombineWithLoad>
+void GenerateRightOperands(InstructionSelector* selector, Node* node,
+ Node* right, InstructionCode& opcode,
+ OperandModes& operand_mode,
+ InstructionOperand* inputs, size_t& input_count,
+ CanCombineWithLoad canCombineWithLoad) {
+ S390OperandGenerator g(selector);
+
+ if ((operand_mode & OperandMode::kAllowImmediate) &&
+ g.CanBeImmediate(right, operand_mode)) {
+ inputs[input_count++] = g.UseImmediate(right);
+ // Can only be RI or RRI
+ operand_mode &= OperandMode::kAllowImmediate;
+ } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
+ NodeMatcher mright(right);
+ if (mright.IsLoad() && selector->CanCover(node, right) &&
+ canCombineWithLoad(SelectLoadOpcode(right))) {
+ AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+ right, inputs, &input_count, OpcodeImmMode(opcode));
+ opcode |= AddressingModeField::encode(mode);
+ operand_mode &= ~OperandMode::kAllowImmediate;
+ if (operand_mode & OperandMode::kAllowRM)
+ operand_mode &= ~OperandMode::kAllowDistinctOps;
+ } else if (operand_mode & OperandMode::kAllowRM) {
+ DCHECK(!(operand_mode & OperandMode::kAllowRRM));
+ inputs[input_count++] = g.UseAnyExceptImmediate(right);
+ // Can not be Immediate
+ operand_mode &=
+ ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
+ } else if (operand_mode & OperandMode::kAllowRRM) {
+ DCHECK(!(operand_mode & OperandMode::kAllowRM));
+ inputs[input_count++] = g.UseAnyExceptImmediate(right);
+ // Can not be Immediate
+ operand_mode &= ~OperandMode::kAllowImmediate;
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ inputs[input_count++] = g.UseRegister(right);
+ // Can only be RR or RRR
+ operand_mode &= OperandMode::kAllowRRR;
+ }
+}
+
+template <class CanCombineWithLoad>
+void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
+ Node* left, Node* right, InstructionCode& opcode,
+ OperandModes& operand_mode,
+ InstructionOperand* inputs, size_t& input_count,
+ CanCombineWithLoad canCombineWithLoad) {
+ S390OperandGenerator g(selector);
+ // left is always register
+ InstructionOperand const left_input = g.UseRegister(left);
+ inputs[input_count++] = left_input;
+
+ if (left == right) {
+ inputs[input_count++] = left_input;
+ // Can only be RR or RRR
+ operand_mode &= OperandMode::kAllowRRR;
+ } else {
+ GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs,
+ input_count, canCombineWithLoad);
+ }
+}
+
+template <class CanCombineWithLoad>
+void VisitUnaryOp(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, OperandModes operand_mode,
+ FlagsContinuation* cont,
+ CanCombineWithLoad canCombineWithLoad);
+
+template <class CanCombineWithLoad>
+void VisitBinOp(InstructionSelector* selector, Node* node,
InstructionCode opcode, OperandModes operand_mode,
- FlagsContinuation* cont) {
+ FlagsContinuation* cont, CanCombineWithLoad canCombineWithLoad);
+
+// Generate The following variations:
+// VisitWord32UnaryOp, VisitWord32BinOp,
+// VisitWord64UnaryOp, VisitWord64BinOp,
+// VisitFloat32UnaryOp, VisitFloat32BinOp,
+// VisitFloat64UnaryOp, VisitFloat64BinOp
+#define VISIT_OP_LIST_32(V) \
+ V(Word32, Unary, [](ArchOpcode opcode) { \
+ return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
+ }) \
+ V(Word64, Unary, \
+ [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; }) \
+ V(Float32, Unary, \
+ [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
+ V(Float64, Unary, \
+ [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; }) \
+ V(Word32, Bin, [](ArchOpcode opcode) { \
+ return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
+ }) \
+ V(Float32, Bin, \
+ [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
+ V(Float64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; })
+
+#if V8_TARGET_ARCH_S390X
+#define VISIT_OP_LIST(V) \
+ VISIT_OP_LIST_32(V) \
+ V(Word64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; })
+#else
+#define VISIT_OP_LIST VISIT_OP_LIST_32
+#endif
+
+#define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad) \
+ static inline void Visit##type1##type2##Op( \
+ InstructionSelector* selector, Node* node, InstructionCode opcode, \
+ OperandModes operand_mode, FlagsContinuation* cont) { \
+ Visit##type2##Op(selector, node, opcode, operand_mode, cont, \
+ canCombineWithLoad); \
+ } \
+ static inline void Visit##type1##type2##Op( \
+ InstructionSelector* selector, Node* node, InstructionCode opcode, \
+ OperandModes operand_mode) { \
+ FlagsContinuation cont; \
+ Visit##type1##type2##Op(selector, node, opcode, operand_mode, &cont); \
+ }
+VISIT_OP_LIST(DECLARE_VISIT_HELPER_FUNCTIONS);
+#undef DECLARE_VISIT_HELPER_FUNCTIONS
+#undef VISIT_OP_LIST_32
+#undef VISIT_OP_LIST
+
+template <class CanCombineWithLoad>
+void VisitUnaryOp(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, OperandModes operand_mode,
+ FlagsContinuation* cont,
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
- Matcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- InstructionOperand inputs[4];
+ InstructionOperand inputs[8];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
+ Node* input = node->InputAt(0);
- // TODO(turbofan): match complex addressing modes.
- if (left == right) {
- // If both inputs refer to the same operand, enforce allocating a register
- // for both of them to ensure that we don't end up generating code like
- // this:
- //
- // mov rax, [rbp-0x10]
- // add rax, [rbp-0x10]
- // jo label
- InstructionOperand const input = g.UseRegister(left);
- inputs[input_count++] = input;
- inputs[input_count++] = input;
- } else if (g.CanBeImmediate(right, operand_mode)) {
- inputs[input_count++] = g.UseRegister(left);
- inputs[input_count++] = g.UseImmediate(right);
- } else {
- if (node->op()->HasProperty(Operator::kCommutative) &&
- g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- inputs[input_count++] = g.UseRegister(left);
- inputs[input_count++] = g.UseRegister(right);
+ GenerateRightOperands(selector, node, input, opcode, operand_mode, inputs,
+ input_count, canCombineWithLoad);
+
+ bool input_is_word32 = ProduceWord32Result(input);
+
+ bool doZeroExt = DoZeroExtForResult(node);
+ bool canEliminateZeroExt = input_is_word32;
+
+ if (doZeroExt) {
+ // Add zero-ext indication
+ inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
}
if (cont->IsBranch()) {
@@ -468,14 +589,20 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block());
}
- if (cont->IsDeoptimize()) {
- // If we can deoptimize as a result of the binop, we need to make sure that
- // the deopt inputs are not overwritten by the binop result. One way
+ if (!cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure
+ // that the deopt inputs are not overwritten by the binop result. One way
// to achieve that is to declare the output register as same-as-first.
- outputs[output_count++] = g.DefineSameAsFirst(node);
+ if (doZeroExt && canEliminateZeroExt) {
+ // we have to make sure result and left use the same register
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
} else {
- outputs[output_count++] = g.DefineAsRegister(node);
+ outputs[output_count++] = g.DefineSameAsFirst(node);
}
+
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -486,6 +613,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(outputs), output_count);
opcode = cont->Encode(opcode);
+
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->kind(), cont->reason(), cont->frame_state());
@@ -497,17 +625,11 @@ void VisitBinop(InstructionSelector* selector, Node* node,
}
}
-// Shared routine for multiple binary operations.
-template <typename Matcher>
-void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
- OperandModes operand_mode) {
- FlagsContinuation cont;
- VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
-}
-
-void VisitBin32op(InstructionSelector* selector, Node* node,
- InstructionCode opcode, OperandModes operand_mode,
- FlagsContinuation* cont) {
+template <class CanCombineWithLoad>
+void VisitBinOp(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, OperandModes operand_mode,
+ FlagsContinuation* cont,
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
Int32BinopMatcher m(node);
Node* left = m.left().node();
@@ -517,98 +639,41 @@ void VisitBin32op(InstructionSelector* selector, Node* node,
InstructionOperand outputs[2];
size_t output_count = 0;
- // match left of TruncateInt64ToInt32
- if (m.left().IsTruncateInt64ToInt32() && selector->CanCover(node, left)) {
- left = left->InputAt(0);
- }
- // match right of TruncateInt64ToInt32
- if (m.right().IsTruncateInt64ToInt32() && selector->CanCover(node, right)) {
- right = right->InputAt(0);
- }
-
-#if V8_TARGET_ARCH_S390X
- if ((ZeroExtendsWord32ToWord64(right) || g.CanBeBetterLeftOperand(right)) &&
- node->op()->HasProperty(Operator::kCommutative) &&
- !g.CanBeImmediate(right, operand_mode)) {
- std::swap(left, right);
- }
-#else
if (node->op()->HasProperty(Operator::kCommutative) &&
!g.CanBeImmediate(right, operand_mode) &&
(g.CanBeBetterLeftOperand(right))) {
std::swap(left, right);
}
-#endif
- // left is always register
- InstructionOperand const left_input = g.UseRegister(left);
- inputs[input_count++] = left_input;
+ GenerateBinOpOperands(selector, node, left, right, opcode, operand_mode,
+ inputs, input_count, canCombineWithLoad);
- // TODO(turbofan): match complex addressing modes.
- if (left == right) {
- // If both inputs refer to the same operand, enforce allocating a register
- // for both of them to ensure that we don't end up generating code like
- // this:
- //
- // mov rax, [rbp-0x10]
- // add rax, [rbp-0x10]
- // jo label
- inputs[input_count++] = left_input;
- // Can only be RR or RRR
- operand_mode &= OperandMode::kAllowRRR;
- } else if ((operand_mode & OperandMode::kAllowImmediate) &&
- g.CanBeImmediate(right, operand_mode)) {
- inputs[input_count++] = g.UseImmediate(right);
- // Can only be RI or RRI
- operand_mode &= OperandMode::kAllowImmediate;
- } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
- NodeMatcher mright(right);
- if (mright.IsLoad() && selector->CanCover(node, right) &&
- SelectLoadOpcode(right) == kS390_LoadWordU32) {
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
- opcode |= AddressingModeField::encode(mode);
- operand_mode &= ~OperandMode::kAllowImmediate;
- if (operand_mode & OperandMode::kAllowRM)
- operand_mode &= ~OperandMode::kAllowDistinctOps;
- } else if (operand_mode & OperandMode::kAllowRM) {
- DCHECK(!(operand_mode & OperandMode::kAllowRRM));
- inputs[input_count++] = g.Use(right);
- // Can not be Immediate
- operand_mode &=
- ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
- } else if (operand_mode & OperandMode::kAllowRRM) {
- DCHECK(!(operand_mode & OperandMode::kAllowRM));
- inputs[input_count++] = g.Use(right);
- // Can not be Immediate
- operand_mode &= ~OperandMode::kAllowImmediate;
- } else {
- UNREACHABLE();
- }
- } else {
- inputs[input_count++] = g.UseRegister(right);
- // Can only be RR or RRR
- operand_mode &= OperandMode::kAllowRRR;
- }
+ bool left_is_word32 = ProduceWord32Result(left);
- bool doZeroExt =
- AutoZeroExtendsWord32ToWord64(node) || !ZeroExtendsWord32ToWord64(left);
+ bool doZeroExt = DoZeroExtForResult(node);
+ bool canEliminateZeroExt = left_is_word32;
- inputs[input_count++] =
- g.TempImmediate(doZeroExt && (!AutoZeroExtendsWord32ToWord64(node)));
+ if (doZeroExt) {
+ // Add zero-ext indication
+ inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
+ }
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
- if (doZeroExt && (operand_mode & OperandMode::kAllowDistinctOps) &&
+ if ((operand_mode & OperandMode::kAllowDistinctOps) &&
// If we can deoptimize as a result of the binop, we need to make sure
- // that
- // the deopt inputs are not overwritten by the binop result. One way
+ // that the deopt inputs are not overwritten by the binop result. One way
// to achieve that is to declare the output register as same-as-first.
!cont->IsDeoptimize()) {
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (doZeroExt && canEliminateZeroExt) {
+ // we have to make sure result and left use the same register
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
} else {
outputs[output_count++] = g.DefineSameAsFirst(node);
}
@@ -635,12 +700,6 @@ void VisitBin32op(InstructionSelector* selector, Node* node,
}
}
-void VisitBin32op(InstructionSelector* selector, Node* node, ArchOpcode opcode,
- OperandModes operand_mode) {
- FlagsContinuation cont;
- VisitBin32op(selector, node, opcode, operand_mode, &cont);
-}
-
} // namespace
void InstructionSelector::VisitLoad(Node* node) {
@@ -908,10 +967,6 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
}
#endif
-void InstructionSelector::VisitWord32And(Node* node) {
- VisitBin32op(this, node, kS390_And32, AndOperandMode);
-}
-
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64And(Node* node) {
S390OperandGenerator g(this);
@@ -954,46 +1009,16 @@ void InstructionSelector::VisitWord64And(Node* node) {
opcode = kS390_RotLeftAndClear64;
mask = mb;
}
- if (match) {
+ if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
g.TempImmediate(sh), g.TempImmediate(mask));
return;
}
}
}
- VisitBinop<Int64BinopMatcher>(this, node, kS390_And64,
- OperandMode::kUint32Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBin32op(this, node, kS390_Or32, OrOperandMode);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Or(Node* node) {
- Int64BinopMatcher m(node);
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64,
- OperandMode::kUint32Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Xor(Node* node) {
- VisitBin32op(this, node, kS390_Xor32, XorOperandMode);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Xor(Node* node) {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64,
- OperandMode::kUint32Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Shl(Node* node) {
- VisitBin32op(this, node, kS390_ShiftLeft32, ShiftOperandMode);
+ VisitWord64BinOp(this, node, kS390_And64, And64OperandMode);
}
-#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Shl(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1024,7 +1049,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
opcode = kS390_RotLeftAndClear64;
mask = mb;
}
- if (match) {
+ if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
g.TempImmediate(mask));
@@ -1033,15 +1058,9 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
}
}
}
- VisitRRO(this, kS390_ShiftLeft64, node, OperandMode::kShift64Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Shr(Node* node) {
- VisitBin32op(this, node, kS390_ShiftRight32, ShiftOperandMode);
+ VisitWord64BinOp(this, node, kS390_ShiftLeft64, Shift64OperandMode);
}
-#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Shr(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
@@ -1077,31 +1096,35 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
}
}
}
- VisitRRO(this, kS390_ShiftRight64, node, OperandMode::kShift64Imm);
+ VisitWord64BinOp(this, node, kS390_ShiftRight64, Shift64OperandMode);
}
#endif
-void InstructionSelector::VisitWord32Sar(Node* node) {
- S390OperandGenerator g(this);
+static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar(
+ InstructionSelector* selector, Node* node) {
+ S390OperandGenerator g(selector);
Int32BinopMatcher m(node);
- // Replace with sign extension for (x << K) >> K where K is 16 or 24.
- if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+ if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(16) && m.right().Is(16)) {
- bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
- Emit(kS390_ExtendSignWord16,
- doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
- return;
+ bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
+ selector->Emit(kS390_ExtendSignWord16,
+ canEliminateZeroExt ? g.DefineSameAsFirst(node)
+ : g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.TempImmediate(!canEliminateZeroExt));
+ return true;
} else if (mleft.right().Is(24) && m.right().Is(24)) {
- bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
- Emit(kS390_ExtendSignWord8,
- doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
- return;
+ bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
+ selector->Emit(kS390_ExtendSignWord8,
+ canEliminateZeroExt ? g.DefineSameAsFirst(node)
+ : g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.TempImmediate(!canEliminateZeroExt));
+ return true;
}
}
- VisitBin32op(this, node, kS390_ShiftRightArith32, ShiftOperandMode);
+ return false;
}
#if !V8_TARGET_ARCH_S390X
@@ -1212,51 +1235,6 @@ void InstructionSelector::VisitWord32PairSar(Node* node) {
}
#endif
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Sar(Node* node) {
- VisitRRO(this, kS390_ShiftRightArith64, node, OperandMode::kShift64Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Ror(Node* node) {
- // TODO(john): match dst = ror(src1, src2 + imm)
- VisitBin32op(this, node, kS390_RotRight32,
- OperandMode::kAllowRI | OperandMode::kAllowRRR |
- OperandMode::kAllowRRI | OperandMode::kShift32Imm);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Ror(Node* node) {
- VisitRRO(this, kS390_RotRight64, node, OperandMode::kShift64Imm);
-}
-#endif
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
- VisitRR(this, kS390_Cntlz32, node);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Clz(Node* node) {
- S390OperandGenerator g(this);
- Emit(kS390_Cntlz64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-#endif
-
-void InstructionSelector::VisitWord32Popcnt(Node* node) {
- S390OperandGenerator g(this);
- Node* value = node->InputAt(0);
- Emit(kS390_Popcnt32, g.DefineAsRegister(node), g.UseRegister(value));
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitWord64Popcnt(Node* node) {
- S390OperandGenerator g(this);
- Emit(kS390_Popcnt64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-#endif
-
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
#if V8_TARGET_ARCH_S390X
@@ -1269,6 +1247,14 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ VisitWord32UnaryOp(this, node, kS390_Abs32, OperandMode::kNone);
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ VisitWord64UnaryOp(this, node, kS390_Abs64, OperandMode::kNone);
+}
+
void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
@@ -1294,204 +1280,376 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBin32op(this, node, kS390_Add32, AddOperandMode);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
- OperandMode::kInt32Imm);
-}
-#endif
-
-void InstructionSelector::VisitInt32Sub(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.left().Is(0)) {
- Node* right = m.right().node();
- bool doZeroExt = ZeroExtendsWord32ToWord64(right);
- Emit(kS390_Neg32, g.DefineAsRegister(node), g.UseRegister(right),
- g.TempImmediate(doZeroExt));
- } else {
- VisitBin32op(this, node, kS390_Sub32, SubOperandMode);
- }
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64Sub(Node* node) {
- S390OperandGenerator g(this);
- Int64BinopMatcher m(node);
- if (m.left().Is(0)) {
- Emit(kS390_Neg64, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- } else {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
- OperandMode::kInt32Imm_Negate);
- }
-}
-#endif
-
-namespace {
-
-void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand left, InstructionOperand right,
- FlagsContinuation* cont);
-
-#if V8_TARGET_ARCH_S390X
-void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+template <class Matcher, ArchOpcode neg_opcode>
+static inline bool TryMatchNegFromSub(InstructionSelector* selector,
+ Node* node) {
S390OperandGenerator g(selector);
- Int32BinopMatcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- if (g.CanBeImmediate(right, OperandMode::kInt32Imm)) {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.UseImmediate(right));
- } else {
- if (g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
+ Matcher m(node);
+ static_assert(neg_opcode == kS390_Neg32 || neg_opcode == kS390_Neg64,
+ "Provided opcode is not a Neg opcode.");
+ if (m.left().Is(0)) {
+ Node* value = m.right().node();
+ bool doZeroExt = DoZeroExtForResult(node);
+ bool canEliminateZeroExt = ProduceWord32Result(value);
+ if (doZeroExt) {
+ selector->Emit(neg_opcode,
+ canEliminateZeroExt ? g.DefineSameAsFirst(node)
+ : g.DefineAsRegister(node),
+ g.UseRegister(value),
+ g.TempImmediate(!canEliminateZeroExt));
+ } else {
+ selector->Emit(neg_opcode, g.DefineAsRegister(node),
+ g.UseRegister(value));
}
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.Use(right));
- }
-}
-#endif
-
-} // namespace
-
-void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
- return VisitBin32op(this, node, kS390_Mul32WithOverflow,
- OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
- &cont);
+ return true;
}
- VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
-}
-
-void InstructionSelector::VisitInt32Mul(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
- base::bits::IsPowerOfTwo32(g.GetImmediate(right))) {
- int power = 31 - base::bits::CountLeadingZeros32(g.GetImmediate(right));
- bool doZeroExt = !ZeroExtendsWord32ToWord64(left);
- InstructionOperand dst =
- (doZeroExt && CpuFeatures::IsSupported(DISTINCT_OPS))
- ? g.DefineAsRegister(node)
- : g.DefineSameAsFirst(node);
-
- Emit(kS390_ShiftLeft32, dst, g.UseRegister(left), g.UseImmediate(power),
- g.TempImmediate(doZeroExt));
- return;
- }
- VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
+ return false;
}
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64Mul(Node* node) {
- S390OperandGenerator g(this);
- Int64BinopMatcher m(node);
+template <class Matcher, ArchOpcode shift_op>
+bool TryMatchShiftFromMul(InstructionSelector* selector, Node* node) {
+ S390OperandGenerator g(selector);
+ Matcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
- Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.UseImmediate(power));
- return;
+ bool doZeroExt = DoZeroExtForResult(node);
+ bool canEliminateZeroExt = ProduceWord32Result(left);
+ InstructionOperand dst = (doZeroExt && !canEliminateZeroExt &&
+ CpuFeatures::IsSupported(DISTINCT_OPS))
+ ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node);
+
+ if (doZeroExt) {
+ selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power),
+ g.TempImmediate(!canEliminateZeroExt));
+ } else {
+ selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power));
+ }
+ return true;
}
- VisitMul(this, node, kS390_Mul64);
-}
-#endif
-
-void InstructionSelector::VisitInt32MulHigh(Node* node) {
- VisitBin32op(this, node, kS390_MulHigh32,
- OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps);
+ return false;
}
-void InstructionSelector::VisitUint32MulHigh(Node* node) {
- VisitBin32op(this, node, kS390_MulHighU32,
- OperandMode::kAllowRRM | OperandMode::kAllowRRR);
+template <ArchOpcode opcode>
+static inline bool TryMatchInt32OpWithOverflow(InstructionSelector* selector,
+ Node* node, OperandModes mode) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ VisitWord32BinOp(selector, node, opcode, mode, &cont);
+ return true;
+ }
+ return false;
}
-void InstructionSelector::VisitInt32Div(Node* node) {
- VisitBin32op(this, node, kS390_Div32,
- OperandMode::kAllowRRM | OperandMode::kAllowRRR);
+static inline bool TryMatchInt32AddWithOverflow(InstructionSelector* selector,
+ Node* node) {
+ return TryMatchInt32OpWithOverflow<kS390_Add32>(selector, node,
+ AddOperandMode);
}
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64Div(Node* node) {
- VisitRRR(this, kS390_Div64, node);
+static inline bool TryMatchInt32SubWithOverflow(InstructionSelector* selector,
+ Node* node) {
+ return TryMatchInt32OpWithOverflow<kS390_Sub32>(selector, node,
+ SubOperandMode);
}
-#endif
-void InstructionSelector::VisitUint32Div(Node* node) {
- VisitBin32op(this, node, kS390_DivU32,
- OperandMode::kAllowRRM | OperandMode::kAllowRRR);
+static inline bool TryMatchInt32MulWithOverflow(InstructionSelector* selector,
+ Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ TryMatchInt32OpWithOverflow<kS390_Mul32>(
+ selector, node, OperandMode::kAllowRRR | OperandMode::kAllowRM);
+ } else {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
+ VisitWord32BinOp(selector, node, kS390_Mul32WithOverflow,
+ OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
+ &cont);
+ }
+ return true;
+ }
+ return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(selector,
+ node);
}
#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitUint64Div(Node* node) {
- VisitRRR(this, kS390_DivU64, node);
+template <ArchOpcode opcode>
+static inline bool TryMatchInt64OpWithOverflow(InstructionSelector* selector,
+ Node* node, OperandModes mode) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ VisitWord64BinOp(selector, node, opcode, mode, &cont);
+ return true;
+ }
+ return false;
}
-#endif
-void InstructionSelector::VisitInt32Mod(Node* node) {
- VisitBin32op(this, node, kS390_Mod32,
- OperandMode::kAllowRRM | OperandMode::kAllowRRR);
+static inline bool TryMatchInt64AddWithOverflow(InstructionSelector* selector,
+ Node* node) {
+ return TryMatchInt64OpWithOverflow<kS390_Add64>(selector, node,
+ AddOperandMode);
}
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64Mod(Node* node) {
- VisitRRR(this, kS390_Mod64, node);
+static inline bool TryMatchInt64SubWithOverflow(InstructionSelector* selector,
+ Node* node) {
+ return TryMatchInt64OpWithOverflow<kS390_Sub64>(selector, node,
+ SubOperandMode);
}
#endif
-void InstructionSelector::VisitUint32Mod(Node* node) {
- VisitBin32op(this, node, kS390_ModU32,
- OperandMode::kAllowRRM | OperandMode::kAllowRRR);
-}
+static inline bool TryMatchDoubleConstructFromInsert(
+ InstructionSelector* selector, Node* node) {
+ S390OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Node* lo32 = NULL;
+ Node* hi32 = NULL;
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitUint64Mod(Node* node) {
- VisitRRR(this, kS390_ModU64, node);
-}
-#endif
+ if (node->opcode() == IrOpcode::kFloat64InsertLowWord32) {
+ lo32 = right;
+ } else if (node->opcode() == IrOpcode::kFloat64InsertHighWord32) {
+ hi32 = right;
+ } else {
+ return false; // doesn't match
+ }
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- VisitRR(this, kS390_Float32ToDouble, node);
-}
+ if (left->opcode() == IrOpcode::kFloat64InsertLowWord32) {
+ lo32 = left->InputAt(1);
+ } else if (left->opcode() == IrOpcode::kFloat64InsertHighWord32) {
+ hi32 = left->InputAt(1);
+ } else {
+ return false; // doesn't match
+ }
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
- VisitRR(this, kS390_Int32ToFloat32, node);
-}
+ if (!lo32 || !hi32) return false; // doesn't match
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
- VisitRR(this, kS390_Uint32ToFloat32, node);
+ selector->Emit(kS390_DoubleConstruct, g.DefineAsRegister(node),
+ g.UseRegister(hi32), g.UseRegister(lo32));
+ return true;
}
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- VisitRR(this, kS390_Int32ToDouble, node);
-}
+#define null ([]() { return false; })
+// TODO(john.yan): place kAllowRM where available
+#define FLOAT_UNARY_OP_LIST_32(V) \
+ V(Float32, ChangeFloat32ToFloat64, kS390_Float32ToDouble, \
+ OperandMode::kAllowRM, null) \
+ V(Float32, BitcastFloat32ToInt32, kS390_BitcastFloat32ToInt32, \
+ OperandMode::kAllowRM, null) \
+ V(Float64, TruncateFloat64ToFloat32, kS390_DoubleToFloat32, \
+ OperandMode::kNone, null) \
+ V(Float64, TruncateFloat64ToWord32, kArchTruncateDoubleToI, \
+ OperandMode::kNone, null) \
+ V(Float64, RoundFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
+ null) \
+ V(Float32, TruncateFloat32ToInt32, kS390_Float32ToInt32, OperandMode::kNone, \
+ null) \
+ V(Float32, TruncateFloat32ToUint32, kS390_Float32ToUint32, \
+ OperandMode::kNone, null) \
+ V(Float64, TruncateFloat64ToUint32, kS390_DoubleToUint32, \
+ OperandMode::kNone, null) \
+ V(Float64, ChangeFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
+ null) \
+ V(Float64, ChangeFloat64ToUint32, kS390_DoubleToUint32, OperandMode::kNone, \
+ null) \
+ V(Float64, Float64SilenceNaN, kS390_Float64SilenceNaN, OperandMode::kNone, \
+ null) \
+ V(Float32, Float32Abs, kS390_AbsFloat, OperandMode::kNone, null) \
+ V(Float64, Float64Abs, kS390_AbsDouble, OperandMode::kNone, null) \
+ V(Float32, Float32Sqrt, kS390_SqrtFloat, OperandMode::kNone, null) \
+ V(Float64, Float64Sqrt, kS390_SqrtDouble, OperandMode::kNone, null) \
+ V(Float32, Float32RoundDown, kS390_FloorFloat, OperandMode::kNone, null) \
+ V(Float64, Float64RoundDown, kS390_FloorDouble, OperandMode::kNone, null) \
+ V(Float32, Float32RoundUp, kS390_CeilFloat, OperandMode::kNone, null) \
+ V(Float64, Float64RoundUp, kS390_CeilDouble, OperandMode::kNone, null) \
+ V(Float32, Float32RoundTruncate, kS390_TruncateFloat, OperandMode::kNone, \
+ null) \
+ V(Float64, Float64RoundTruncate, kS390_TruncateDouble, OperandMode::kNone, \
+ null) \
+ V(Float64, Float64RoundTiesAway, kS390_RoundDouble, OperandMode::kNone, \
+ null) \
+ V(Float32, Float32Neg, kS390_NegFloat, OperandMode::kNone, null) \
+ V(Float64, Float64Neg, kS390_NegDouble, OperandMode::kNone, null) \
+ /* TODO(john.yan): can use kAllowRM */ \
+ V(Word32, Float64ExtractLowWord32, kS390_DoubleExtractLowWord32, \
+ OperandMode::kNone, null) \
+ V(Word32, Float64ExtractHighWord32, kS390_DoubleExtractHighWord32, \
+ OperandMode::kNone, null)
+
+#define FLOAT_BIN_OP_LIST(V) \
+ V(Float32, Float32Add, kS390_AddFloat, OperandMode::kAllowRM, null) \
+ V(Float64, Float64Add, kS390_AddDouble, OperandMode::kAllowRM, null) \
+ V(Float32, Float32Sub, kS390_SubFloat, OperandMode::kAllowRM, null) \
+ V(Float64, Float64Sub, kS390_SubDouble, OperandMode::kAllowRM, null) \
+ V(Float32, Float32Mul, kS390_MulFloat, OperandMode::kAllowRM, null) \
+ V(Float64, Float64Mul, kS390_MulDouble, OperandMode::kAllowRM, null) \
+ V(Float32, Float32Div, kS390_DivFloat, OperandMode::kAllowRM, null) \
+ V(Float64, Float64Div, kS390_DivDouble, OperandMode::kAllowRM, null) \
+ V(Float32, Float32Max, kS390_MaxFloat, OperandMode::kNone, null) \
+ V(Float64, Float64Max, kS390_MaxDouble, OperandMode::kNone, null) \
+ V(Float32, Float32Min, kS390_MinFloat, OperandMode::kNone, null) \
+ V(Float64, Float64Min, kS390_MinDouble, OperandMode::kNone, null)
+
+#define WORD32_UNARY_OP_LIST_32(V) \
+ V(Word32, Word32Clz, kS390_Cntlz32, OperandMode::kNone, null) \
+ V(Word32, Word32Popcnt, kS390_Popcnt32, OperandMode::kNone, null) \
+ V(Word32, RoundInt32ToFloat32, kS390_Int32ToFloat32, OperandMode::kNone, \
+ null) \
+ V(Word32, RoundUint32ToFloat32, kS390_Uint32ToFloat32, OperandMode::kNone, \
+ null) \
+ V(Word32, ChangeInt32ToFloat64, kS390_Int32ToDouble, OperandMode::kNone, \
+ null) \
+ V(Word32, ChangeUint32ToFloat64, kS390_Uint32ToDouble, OperandMode::kNone, \
+ null) \
+ V(Word32, BitcastInt32ToFloat32, kS390_BitcastInt32ToFloat32, \
+ OperandMode::kNone, null)
+
+#ifdef V8_TARGET_ARCH_S390X
+#define FLOAT_UNARY_OP_LIST(V) \
+ FLOAT_UNARY_OP_LIST_32(V) \
+ V(Float64, ChangeFloat64ToUint64, kS390_DoubleToUint64, OperandMode::kNone, \
+ null) \
+ V(Float64, BitcastFloat64ToInt64, kS390_BitcastDoubleToInt64, \
+ OperandMode::kNone, null)
+#define WORD32_UNARY_OP_LIST(V) \
+ WORD32_UNARY_OP_LIST_32(V) \
+ V(Word32, ChangeInt32ToInt64, kS390_ExtendSignWord32, OperandMode::kNone, \
+ null) \
+ V(Word32, ChangeUint32ToUint64, kS390_Uint32ToUint64, OperandMode::kNone, \
+ [&]() -> bool { \
+ if (ProduceWord32Result(node->InputAt(0))) { \
+ EmitIdentity(node); \
+ return true; \
+ } \
+ return false; \
+ })
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- VisitRR(this, kS390_Uint32ToDouble, node);
-}
+#else
+#define FLOAT_UNARY_OP_LIST(V) FLOAT_UNARY_OP_LIST_32(V)
+#define WORD32_UNARY_OP_LIST(V) WORD32_UNARY_OP_LIST_32(V)
+#endif
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- VisitRR(this, kS390_DoubleToInt32, node);
-}
+#define WORD32_BIN_OP_LIST(V) \
+ V(Word32, Int32Add, kS390_Add32, AddOperandMode, null) \
+ V(Word32, Int32Sub, kS390_Sub32, SubOperandMode, ([&]() { \
+ return TryMatchNegFromSub<Int32BinopMatcher, kS390_Neg32>(this, node); \
+ })) \
+ V(Word32, Int32Mul, kS390_Mul32, MulOperandMode, ([&]() { \
+ return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(this, \
+ node); \
+ })) \
+ V(Word32, Int32AddWithOverflow, kS390_Add32, AddOperandMode, \
+ ([&]() { return TryMatchInt32AddWithOverflow(this, node); })) \
+ V(Word32, Int32SubWithOverflow, kS390_Sub32, SubOperandMode, \
+ ([&]() { return TryMatchInt32SubWithOverflow(this, node); })) \
+ V(Word32, Int32MulWithOverflow, kS390_Mul32, MulOperandMode, \
+ ([&]() { return TryMatchInt32MulWithOverflow(this, node); })) \
+ V(Word32, Int32MulHigh, kS390_MulHigh32, \
+ OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps, null) \
+ V(Word32, Uint32MulHigh, kS390_MulHighU32, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word32, Int32Div, kS390_Div32, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word32, Uint32Div, kS390_DivU32, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word32, Int32Mod, kS390_Mod32, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word32, Uint32Mod, kS390_ModU32, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word32, Word32Ror, kS390_RotRight32, \
+ OperandMode::kAllowRI | OperandMode::kAllowRRR | OperandMode::kAllowRRI | \
+ OperandMode::kShift32Imm, \
+ null) \
+ V(Word32, Word32And, kS390_And32, And32OperandMode, null) \
+ V(Word32, Word32Or, kS390_Or32, Or32OperandMode, null) \
+ V(Word32, Word32Xor, kS390_Xor32, Xor32OperandMode, null) \
+ V(Word32, Word32Shl, kS390_ShiftLeft32, Shift32OperandMode, null) \
+ V(Word32, Word32Shr, kS390_ShiftRight32, Shift32OperandMode, null) \
+ V(Word32, Word32Sar, kS390_ShiftRightArith32, Shift32OperandMode, \
+ [&]() { return TryMatchSignExtInt16OrInt8FromWord32Sar(this, node); }) \
+ V(Word32, Float64InsertLowWord32, kS390_DoubleInsertLowWord32, \
+ OperandMode::kAllowRRR, \
+ [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); }) \
+ V(Word32, Float64InsertHighWord32, kS390_DoubleInsertHighWord32, \
+ OperandMode::kAllowRRR, \
+ [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); })
+
+#define WORD64_UNARY_OP_LIST(V) \
+ V(Word64, Word64Popcnt, kS390_Popcnt64, OperandMode::kNone, null) \
+ V(Word64, Word64Clz, kS390_Cntlz64, OperandMode::kNone, null) \
+ V(Word64, TruncateInt64ToInt32, kS390_Int64ToInt32, OperandMode::kNone, \
+ null) \
+ V(Word64, RoundInt64ToFloat32, kS390_Int64ToFloat32, OperandMode::kNone, \
+ null) \
+ V(Word64, RoundInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
+ null) \
+ V(Word64, RoundUint64ToFloat32, kS390_Uint64ToFloat32, OperandMode::kNone, \
+ null) \
+ V(Word64, RoundUint64ToFloat64, kS390_Uint64ToDouble, OperandMode::kNone, \
+ null) \
+ V(Word64, BitcastInt64ToFloat64, kS390_BitcastInt64ToDouble, \
+ OperandMode::kNone, null)
+
+#define WORD64_BIN_OP_LIST(V) \
+ V(Word64, Int64Add, kS390_Add64, AddOperandMode, null) \
+ V(Word64, Int64Sub, kS390_Sub64, SubOperandMode, ([&]() { \
+ return TryMatchNegFromSub<Int64BinopMatcher, kS390_Neg64>(this, node); \
+ })) \
+ V(Word64, Int64AddWithOverflow, kS390_Add64, AddOperandMode, \
+ ([&]() { return TryMatchInt64AddWithOverflow(this, node); })) \
+ V(Word64, Int64SubWithOverflow, kS390_Sub64, SubOperandMode, \
+ ([&]() { return TryMatchInt64SubWithOverflow(this, node); })) \
+ V(Word64, Int64Mul, kS390_Mul64, MulOperandMode, ([&]() { \
+ return TryMatchShiftFromMul<Int64BinopMatcher, kS390_ShiftLeft64>(this, \
+ node); \
+ })) \
+ V(Word64, Int64Div, kS390_Div64, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word64, Uint64Div, kS390_DivU64, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word64, Int64Mod, kS390_Mod64, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word64, Uint64Mod, kS390_ModU64, \
+ OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
+ V(Word64, Word64Sar, kS390_ShiftRightArith64, Shift64OperandMode, null) \
+ V(Word64, Word64Ror, kS390_RotRight64, Shift64OperandMode, null) \
+ V(Word64, Word64Or, kS390_Or64, Or64OperandMode, null) \
+ V(Word64, Word64Xor, kS390_Xor64, Xor64OperandMode, null)
+
+#define DECLARE_UNARY_OP(type, name, op, mode, try_extra) \
+ void InstructionSelector::Visit##name(Node* node) { \
+ if (std::function<bool()>(try_extra)()) return; \
+ Visit##type##UnaryOp(this, node, op, mode); \
+ }
+
+#define DECLARE_BIN_OP(type, name, op, mode, try_extra) \
+ void InstructionSelector::Visit##name(Node* node) { \
+ if (std::function<bool()>(try_extra)()) return; \
+ Visit##type##BinOp(this, node, op, mode); \
+ }
+
+WORD32_BIN_OP_LIST(DECLARE_BIN_OP);
+WORD32_UNARY_OP_LIST(DECLARE_UNARY_OP);
+FLOAT_UNARY_OP_LIST(DECLARE_UNARY_OP);
+FLOAT_BIN_OP_LIST(DECLARE_BIN_OP);
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- VisitRR(this, kS390_DoubleToUint32, node);
-}
+#if V8_TARGET_ARCH_S390X
+WORD64_UNARY_OP_LIST(DECLARE_UNARY_OP)
+WORD64_BIN_OP_LIST(DECLARE_BIN_OP)
+#endif
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
- VisitRR(this, kS390_DoubleToUint32, node);
-}
+#undef DECLARE_BIN_OP
+#undef DECLARE_UNARY_OP
+#undef WORD64_BIN_OP_LIST
+#undef WORD64_UNARY_OP_LIST
+#undef WORD32_BIN_OP_LIST
+#undef WORD32_UNARY_OP_LIST
+#undef FLOAT_UNARY_OP_LIST
+#undef WORD32_UNARY_OP_LIST_32
+#undef FLOAT_BIN_OP_LIST
+#undef FLOAT_BIN_OP_LIST_32
+#undef null
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
@@ -1510,121 +1668,8 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
}
-void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
- // TODO(mbrandy): inspect input to see if nop is appropriate.
- VisitRR(this, kS390_ExtendSignWord32, node);
-}
-
-void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
- S390OperandGenerator g(this);
- Node* value = node->InputAt(0);
- if (ZeroExtendsWord32ToWord64(value)) {
- // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
- // zero-extension is a no-op.
- return EmitIdentity(node);
- }
- VisitRR(this, kS390_Uint32ToUint64, node);
-}
#endif
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- VisitRR(this, kS390_DoubleToFloat32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
- VisitRR(this, kArchTruncateDoubleToI, node);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
- VisitRR(this, kS390_DoubleToInt32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
- VisitRR(this, kS390_Float32ToInt32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
- VisitRR(this, kS390_Float32ToUint32, node);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
- // TODO(mbrandy): inspect input to see if nop is appropriate.
- VisitRR(this, kS390_Int64ToInt32, node);
-}
-
-void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
- VisitRR(this, kS390_Int64ToFloat32, node);
-}
-
-void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
- VisitRR(this, kS390_Int64ToDouble, node);
-}
-
-void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
- VisitRR(this, kS390_Uint64ToFloat32, node);
-}
-
-void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
- VisitRR(this, kS390_Uint64ToDouble, node);
-}
-#endif
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
- VisitRR(this, kS390_BitcastFloat32ToInt32, node);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
- VisitRR(this, kS390_BitcastDoubleToInt64, node);
-}
-#endif
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
- VisitRR(this, kS390_BitcastInt32ToFloat32, node);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
- VisitRR(this, kS390_BitcastInt64ToDouble, node);
-}
-#endif
-
-void InstructionSelector::VisitFloat32Add(Node* node) {
- VisitRRR(this, kS390_AddFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Add(Node* node) {
- // TODO(mbrandy): detect multiply-add
- VisitRRR(this, kS390_AddDouble, node);
-}
-
-void InstructionSelector::VisitFloat32Sub(Node* node) {
- VisitRRR(this, kS390_SubFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Sub(Node* node) {
- // TODO(mbrandy): detect multiply-subtract
- VisitRRR(this, kS390_SubDouble, node);
-}
-
-void InstructionSelector::VisitFloat32Mul(Node* node) {
- VisitRRR(this, kS390_MulFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
- // TODO(mbrandy): detect negate
- VisitRRR(this, kS390_MulDouble, node);
-}
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
- VisitRRR(this, kS390_DivFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
- VisitRRR(this, kS390_DivDouble, node);
-}
-
void InstructionSelector::VisitFloat64Mod(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
@@ -1632,38 +1677,6 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
->MarkAsCall();
}
-void InstructionSelector::VisitFloat32Max(Node* node) {
- VisitRRR(this, kS390_MaxFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Max(Node* node) {
- VisitRRR(this, kS390_MaxDouble, node);
-}
-
-void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
- VisitRR(this, kS390_Float64SilenceNaN, node);
-}
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
- VisitRRR(this, kS390_MinFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Min(Node* node) {
- VisitRRR(this, kS390_MinDouble, node);
-}
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
- VisitRR(this, kS390_AbsFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
- VisitRR(this, kS390_AbsDouble, node);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- VisitRR(this, kS390_SqrtFloat, node);
-}
-
void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
InstructionCode opcode) {
S390OperandGenerator g(this);
@@ -1679,38 +1692,6 @@ void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
->MarkAsCall();
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- VisitRR(this, kS390_SqrtDouble, node);
-}
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
- VisitRR(this, kS390_FloorFloat, node);
-}
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
- VisitRR(this, kS390_FloorDouble, node);
-}
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
- VisitRR(this, kS390_CeilFloat, node);
-}
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
- VisitRR(this, kS390_CeilDouble, node);
-}
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
- VisitRR(this, kS390_TruncateFloat, node);
-}
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- VisitRR(this, kS390_TruncateDouble, node);
-}
-
-void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
- VisitRR(this, kS390_RoundDouble, node);
-}
-
void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
UNREACHABLE();
}
@@ -1719,58 +1700,6 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
UNREACHABLE();
}
-void InstructionSelector::VisitFloat32Neg(Node* node) {
- VisitRR(this, kS390_NegFloat, node);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
- VisitRR(this, kS390_NegDouble, node);
-}
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- OperandModes mode = AddOperandMode;
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBin32op(this, node, kS390_Add32, mode, &cont);
- }
- FlagsContinuation cont;
- VisitBin32op(this, node, kS390_Add32, mode, &cont);
-}
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- OperandModes mode = SubOperandMode;
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBin32op(this, node, kS390_Sub32, mode, &cont);
- }
- FlagsContinuation cont;
- VisitBin32op(this, node, kS390_Sub32, mode, &cont);
-}
-
-#if V8_TARGET_ARCH_S390X
-void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
- OperandMode::kInt32Imm, &cont);
- }
- FlagsContinuation cont;
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, OperandMode::kInt32Imm,
- &cont);
-}
-
-void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
- OperandMode::kInt32Imm_Negate, &cont);
- }
- FlagsContinuation cont;
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
- OperandMode::kInt32Imm_Negate, &cont);
-}
-#endif
-
static bool CompareLogical(FlagsContinuation* cont) {
switch (cont->condition()) {
case kUnsignedLessThan:
@@ -2114,28 +2043,35 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBin32op(selector, node, kS390_Add32, AddOperandMode,
- cont);
+ return VisitWord32BinOp(selector, node, kS390_Add32,
+ AddOperandMode, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBin32op(selector, node, kS390_Sub32, SubOperandMode,
- cont);
+ return VisitWord32BinOp(selector, node, kS390_Sub32,
+ SubOperandMode, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kNotEqual);
- return VisitBin32op(
+ return VisitWord32BinOp(
selector, node, kS390_Mul32WithOverflow,
OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
cont);
+ case IrOpcode::kInt32AbsWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitWord32UnaryOp(selector, node, kS390_Abs32,
+ OperandMode::kNone, cont);
#if V8_TARGET_ARCH_S390X
+ case IrOpcode::kInt64AbsWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitWord64UnaryOp(selector, node, kS390_Abs64,
+ OperandMode::kNone, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(
- selector, node, kS390_Add64, OperandMode::kInt32Imm, cont);
+ return VisitWord64BinOp(selector, node, kS390_Add64,
+ AddOperandMode, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(
- selector, node, kS390_Sub64, OperandMode::kInt32Imm_Negate,
- cont);
+ return VisitWord64BinOp(selector, node, kS390_Sub64,
+ SubOperandMode, cont);
#endif
default:
break;
@@ -2165,9 +2101,15 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// can't handle overflow case.
break;
case IrOpcode::kWord32Or:
- return VisitBin32op(selector, value, kS390_Or32, OrOperandMode, cont);
+ if (fc == kNotEqual || fc == kEqual)
+ return VisitWord32BinOp(selector, value, kS390_Or32, Or32OperandMode,
+ cont);
+ break;
case IrOpcode::kWord32Xor:
- return VisitBin32op(selector, value, kS390_Xor32, XorOperandMode, cont);
+ if (fc == kNotEqual || fc == kEqual)
+ return VisitWord32BinOp(selector, value, kS390_Xor32,
+ Xor32OperandMode, cont);
+ break;
case IrOpcode::kWord32Sar:
case IrOpcode::kWord32Shl:
case IrOpcode::kWord32Shr:
@@ -2185,10 +2127,14 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// can't handle overflow case.
break;
case IrOpcode::kWord64Or:
- // TODO(john.yan): need to handle
+ if (fc == kNotEqual || fc == kEqual)
+ return VisitWord64BinOp(selector, value, kS390_Or64, Or64OperandMode,
+ cont);
break;
case IrOpcode::kWord64Xor:
- // TODO(john.yan): need to handle
+ if (fc == kNotEqual || fc == kEqual)
+ return VisitWord64BinOp(selector, value, kS390_Xor64,
+ Xor64OperandMode, cont);
break;
case IrOpcode::kWord64Sar:
case IrOpcode::kWord64Shl:
@@ -2424,48 +2370,6 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
- S390OperandGenerator g(this);
- Emit(kS390_DoubleExtractLowWord32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
- S390OperandGenerator g(this);
- Emit(kS390_DoubleExtractHighWord32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
- S390OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
- CanCover(node, left)) {
- left = left->InputAt(1);
- Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
- g.UseRegister(right));
- return;
- }
- Emit(kS390_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
- g.UseRegister(left), g.UseRegister(right));
-}
-
-void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
- S390OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
- CanCover(node, left)) {
- left = left->InputAt(1);
- Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
- g.UseRegister(left));
- return;
- }
- Emit(kS390_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
- g.UseRegister(left), g.UseRegister(right));
-}
-
void InstructionSelector::VisitAtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
S390OperandGenerator g(this);
@@ -2521,6 +2425,54 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
inputs);
}
+void InstructionSelector::VisitAtomicExchange(Node* node) {
+ S390OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
+void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2534,6 +2486,8 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord32ReverseBytes |
MachineOperatorBuilder::kWord64ReverseBytes |
+ MachineOperatorBuilder::kInt32AbsWithOverflow |
+ MachineOperatorBuilder::kInt64AbsWithOverflow |
MachineOperatorBuilder::kWord64Popcnt;
}