summaryrefslogtreecommitdiff
path: root/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/codegen/s390/macro-assembler-s390.cc')
-rw-r--r--chromium/v8/src/codegen/s390/macro-assembler-s390.cc247
1 files changed, 195 insertions, 52 deletions
diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
index 4de7f2cf4bb..9b888e50dad 100644
--- a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -416,14 +416,14 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
- Builtin builtin_index = Builtin::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
+ RecordCommentForOffHeapTrampoline(builtin);
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
b(cond, ip);
return;
}
@@ -474,21 +474,28 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
- Builtin builtin_index = Builtin::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (target_is_builtin && options().inline_offheap_trampolines) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
- Call(ip);
+ CallBuiltin(builtin);
return;
}
DCHECK(code->IsExecutable());
call(code, rmode);
}
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(ip);
+}
+
void TurboAssembler::Drop(int count) {
if (count > 0) {
int total = count * kSystemPointerSize;
@@ -1184,7 +1191,6 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1193,7 +1199,6 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
cgebr(m, dst, double_input);
}
@@ -1208,7 +1213,6 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1217,7 +1221,6 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
cgdbr(m, dst, double_input);
}
@@ -1241,7 +1244,6 @@ void TurboAssembler::ConvertDoubleToInt32(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(dst, Operand::Zero());
@@ -1268,7 +1270,6 @@ void TurboAssembler::ConvertFloat32ToInt32(const Register result,
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(result, Operand::Zero());
@@ -1286,7 +1287,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1295,7 +1295,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(result, Operand::Zero());
@@ -1313,7 +1312,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1322,7 +1320,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
break;
default:
UNIMPLEMENTED();
- break;
}
clgebr(m, Condition(0), result, double_input);
}
@@ -1337,7 +1334,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1346,7 +1342,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
break;
default:
UNIMPLEMENTED();
- break;
}
clgdbr(m, Condition(0), dst, double_input);
}
@@ -1361,7 +1356,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1370,7 +1364,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(dst, Operand::Zero());
@@ -1663,8 +1656,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- CmpS64(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
- beq(&regular_invoke);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ CmpS64(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ beq(&regular_invoke);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -1713,8 +1708,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
@@ -1736,8 +1731,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(r6, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1896,16 +1891,27 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
CmpS64(type_reg, Operand(type));
}
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, value);
+ slgfi(scratch, Operand(lower_limit));
+ CmpU64(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ CmpU64(value, Operand(higher_limit));
+ }
+}
+
void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
InstanceType lower_limit,
InstanceType higher_limit) {
DCHECK_LT(lower_limit, higher_limit);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- mov(scratch, type_reg);
- slgfi(scratch, Operand(lower_limit));
- CmpU64(scratch, Operand(higher_limit - lower_limit));
+ CompareRange(type_reg, lower_limit, higher_limit);
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
@@ -1919,14 +1925,7 @@ void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
- if (lower_limit != 0) {
- Register scratch = r0;
- mov(scratch, value);
- slgfi(scratch, Operand(lower_limit));
- CmpU64(scratch, Operand(higher_limit - lower_limit));
- } else {
- CmpU64(value, Operand(higher_limit));
- }
+ CompareRange(value, lower_limit, higher_limit);
ble(on_in_range);
}
@@ -2086,7 +2085,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
lgfi(r2, Operand(static_cast<int>(reason)));
PrepareCallCFunction(1, 0, r3);
Move(r3, ExternalReference::abort_with_reason());
@@ -2102,7 +2101,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -2123,7 +2122,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
@@ -2131,7 +2130,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
@@ -4670,10 +4669,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kSimd128Size));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
-}
-
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
larl(dst, Operand(-pc_offset() / 2));
}
@@ -4797,8 +4792,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
+ ASM_CODE_COMMENT(this);
LoadU64(ip, MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(target)));
+ IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
@@ -5276,7 +5272,37 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
#undef EMIT_SIMD_BINOP_VRR_C
#undef SIMD_BINOP_LIST_VRR_C
-// Opcodes without a 1-1 match.
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl, veslv, 3) \
+ V(I64x2ShrS, vesrav, 3) \
+ V(I64x2ShrU, vesrlv, 3) \
+ V(I32x4Shl, veslv, 2) \
+ V(I32x4ShrS, vesrav, 2) \
+ V(I32x4ShrU, vesrlv, 2) \
+ V(I16x8Shl, veslv, 1) \
+ V(I16x8ShrS, vesrav, 1) \
+ V(I16x8ShrU, vesrlv, 1) \
+ V(I8x16Shl, veslv, 0) \
+ V(I8x16ShrS, vesrav, 0) \
+ V(I8x16ShrU, vesrlv, 0)
+
+#define EMIT_SIMD_SHIFT(name, op, c1) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Register src2) { \
+ vlvg(kScratchDoubleReg, src2, MemOperand(r0, 0), Condition(c1)); \
+ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(c1)); \
+ op(dst, src1, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(c1)); \
+ } \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ const Operand& src2) { \
+ mov(ip, src2); \
+ name(dst, src1, ip); \
+ }
+SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
+#undef EMIT_SIMD_SHIFT
+#undef SIMD_SHIFT_LIST
+
void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
Register scratch_1 = r0;
@@ -5396,6 +5422,123 @@ void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
}
+// Vector LE Load and Transform instructions.
+#ifdef V8_TARGET_BIG_ENDIAN
+#define IS_BIG_ENDIAN true
+#else
+#define IS_BIG_ENDIAN false
+#endif
+
+#define CAN_LOAD_STORE_REVERSE \
+ IS_BIG_ENDIAN&& CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)
+
+#define LOAD_SPLAT_LIST(V) \
+ V(64x2, vlbrrep, LoadU64LE, 3) \
+ V(32x4, vlbrrep, LoadU32LE, 2) \
+ V(16x8, vlbrrep, LoadU16LE, 1) \
+ V(8x16, vlrep, LoadU8, 0)
+
+#define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(condition)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, 0), Condition(condition)); \
+ vrep(dst, dst, Operand(0), Condition(condition)); \
+ }
+LOAD_SPLAT_LIST(LOAD_SPLAT)
+#undef LOAD_SPLAT
+#undef LOAD_SPLAT_LIST
+
+#define LOAD_EXTEND_LIST(V) \
+ V(32x2U, vuplh, 2) \
+ V(32x2S, vuph, 2) \
+ V(16x4U, vuplh, 1) \
+ V(16x4S, vuph, 1) \
+ V(8x8U, vuplh, 0) \
+ V(8x8S, vuph, 0)
+
+#define LOAD_EXTEND(name, unpack_instr, condition) \
+ void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vlebrg(kScratchDoubleReg, mem, Condition(0)); \
+ } else { \
+ LoadU64LE(r1, mem); \
+ vlvg(kScratchDoubleReg, r1, MemOperand(r0, 0), Condition(3)); \
+ } \
+ unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(condition)); \
+ }
+LOAD_EXTEND_LIST(LOAD_EXTEND)
+#undef LOAD_EXTEND
+#undef LOAD_EXTEND
+
+void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
+ vlebrf(dst, mem, Condition(3));
+ return;
+ }
+ LoadU32LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 3), Condition(2));
+}
+
+void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
+ vlebrg(dst, mem, Condition(1));
+ return;
+ }
+ LoadU64LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 1), Condition(3));
+}
+
+#define LOAD_LANE_LIST(V) \
+ V(64, vlebrg, LoadU64LE, 3) \
+ V(32, vlebrf, LoadU32LE, 2) \
+ V(16, vlebrh, LoadU16LE, 1) \
+ V(8, vleb, LoadU8, 0)
+
+#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ const MemOperand& mem, int lane) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(lane)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, lane), Condition(condition)); \
+ }
+LOAD_LANE_LIST(LOAD_LANE)
+#undef LOAD_LANE
+#undef LOAD_LANE_LIST
+
+#define STORE_LANE_LIST(V) \
+ V(64, vstebrg, StoreU64LE, 3) \
+ V(32, vstebrf, StoreU32LE, 2) \
+ V(16, vstebrh, StoreU16LE, 1) \
+ V(8, vsteb, StoreU8, 0)
+
+#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ const MemOperand& mem, int lane) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(src, mem, Condition(lane)); \
+ return; \
+ } \
+ vlgv(r1, src, MemOperand(r0, lane), Condition(condition)); \
+ scalar_instr(r1, mem); \
+ }
+STORE_LANE_LIST(STORE_LANE)
+#undef STORE_LANE
+#undef STORE_LANE_LIST
+#undef CAN_LOAD_STORE_REVERSE
+#undef IS_BIG_ENDIAN
+
#undef kScratchDoubleReg
} // namespace internal