summaryrefslogtreecommitdiff
path: root/chromium/v8/src/codegen/s390
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/codegen/s390')
-rw-r--r--chromium/v8/src/codegen/s390/assembler-s390-inl.h4
-rw-r--r--chromium/v8/src/codegen/s390/assembler-s390.cc1
-rw-r--r--chromium/v8/src/codegen/s390/constants-s390.h20
-rw-r--r--chromium/v8/src/codegen/s390/macro-assembler-s390.cc247
-rw-r--r--chromium/v8/src/codegen/s390/macro-assembler-s390.h202
-rw-r--r--chromium/v8/src/codegen/s390/register-s390.h1
6 files changed, 342 insertions, 133 deletions
diff --git a/chromium/v8/src/codegen/s390/assembler-s390-inl.h b/chromium/v8/src/codegen/s390/assembler-s390-inl.h
index dc04acec613..6c4923194ad 100644
--- a/chromium/v8/src/codegen/s390/assembler-s390-inl.h
+++ b/chromium/v8/src/codegen/s390/assembler-s390-inl.h
@@ -153,10 +153,10 @@ HeapObject RelocInfo::target_object() {
}
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
- isolate,
+ cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
diff --git a/chromium/v8/src/codegen/s390/assembler-s390.cc b/chromium/v8/src/codegen/s390/assembler-s390.cc
index 511096e0db0..e799f8e8a46 100644
--- a/chromium/v8/src/codegen/s390/assembler-s390.cc
+++ b/chromium/v8/src/codegen/s390/assembler-s390.cc
@@ -440,7 +440,6 @@ Condition Assembler::GetCondition(Instr instr) {
default:
UNIMPLEMENTED();
}
- return al;
}
#if V8_TARGET_ARCH_S390X
diff --git a/chromium/v8/src/codegen/s390/constants-s390.h b/chromium/v8/src/codegen/s390/constants-s390.h
index b16963e52a2..23e77c93d72 100644
--- a/chromium/v8/src/codegen/s390/constants-s390.h
+++ b/chromium/v8/src/codegen/s390/constants-s390.h
@@ -1553,14 +1553,28 @@ using SixByteInstr = uint64_t;
V(vlrep, VLREP, 0xE705) /* type = VRX VECTOR LOAD AND REPLICATE */ \
V(vl, VL, 0xE706) /* type = VRX VECTOR LOAD */ \
V(vlbb, VLBB, 0xE707) /* type = VRX VECTOR LOAD TO BLOCK BOUNDARY */ \
+ V(vlbr, VLBR, 0xE606) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENTS */ \
+ V(vlbrrep, VLBRREP, \
+ 0xE605) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT AND REPLICATE */ \
+ V(vlebrh, VLEBRH, \
+ 0xE601) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (16) */ \
+ V(vlebrf, VLEBRF, \
+ 0xE603) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (32) */ \
+ V(vlebrg, VLEBRG, \
+ 0xE602) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (64) */ \
V(vsteb, VSTEB, 0xE708) /* type = VRX VECTOR STORE ELEMENT (8) */ \
V(vsteh, VSTEH, 0xE709) /* type = VRX VECTOR STORE ELEMENT (16) */ \
V(vsteg, VSTEG, 0xE70A) /* type = VRX VECTOR STORE ELEMENT (64) */ \
V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
V(vst, VST, 0xE70E) /* type = VRX VECTOR STORE */ \
- V(vlbr, VLBR, 0xE606) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENTS */ \
- V(vstbr, VSTBR, 0xE60E) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENTS \
- */
+ V(vstbr, VSTBR, \
+ 0xE60E) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENTS */ \
+ V(vstebrh, VSTEBRH, \
+ 0xE609) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (16) */ \
+ V(vstebrf, VSTEBRF, \
+ 0xE60B) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (32) */ \
+ V(vstebrg, VSTEBRG, \
+ 0xE60A) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (64) */
#define S390_RIE_G_OPCODE_LIST(V) \
V(lochi, LOCHI, \
diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
index 4de7f2cf4bb..9b888e50dad 100644
--- a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -416,14 +416,14 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
- Builtin builtin_index = Builtin::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
+ RecordCommentForOffHeapTrampoline(builtin);
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
b(cond, ip);
return;
}
@@ -474,21 +474,28 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
- Builtin builtin_index = Builtin::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (target_is_builtin && options().inline_offheap_trampolines) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
- Call(ip);
+ CallBuiltin(builtin);
return;
}
DCHECK(code->IsExecutable());
call(code, rmode);
}
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(ip);
+}
+
void TurboAssembler::Drop(int count) {
if (count > 0) {
int total = count * kSystemPointerSize;
@@ -1184,7 +1191,6 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1193,7 +1199,6 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
cgebr(m, dst, double_input);
}
@@ -1208,7 +1213,6 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1217,7 +1221,6 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
cgdbr(m, dst, double_input);
}
@@ -1241,7 +1244,6 @@ void TurboAssembler::ConvertDoubleToInt32(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(dst, Operand::Zero());
@@ -1268,7 +1270,6 @@ void TurboAssembler::ConvertFloat32ToInt32(const Register result,
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(result, Operand::Zero());
@@ -1286,7 +1287,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1295,7 +1295,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(result, Operand::Zero());
@@ -1313,7 +1312,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1322,7 +1320,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
break;
default:
UNIMPLEMENTED();
- break;
}
clgebr(m, Condition(0), result, double_input);
}
@@ -1337,7 +1334,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1346,7 +1342,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
break;
default:
UNIMPLEMENTED();
- break;
}
clgdbr(m, Condition(0), dst, double_input);
}
@@ -1361,7 +1356,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1370,7 +1364,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(dst, Operand::Zero());
@@ -1663,8 +1656,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- CmpS64(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
- beq(&regular_invoke);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ CmpS64(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ beq(&regular_invoke);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -1713,8 +1708,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
@@ -1736,8 +1731,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(r6, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1896,16 +1891,27 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
CmpS64(type_reg, Operand(type));
}
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, value);
+ slgfi(scratch, Operand(lower_limit));
+ CmpU64(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ CmpU64(value, Operand(higher_limit));
+ }
+}
+
void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
InstanceType lower_limit,
InstanceType higher_limit) {
DCHECK_LT(lower_limit, higher_limit);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- mov(scratch, type_reg);
- slgfi(scratch, Operand(lower_limit));
- CmpU64(scratch, Operand(higher_limit - lower_limit));
+ CompareRange(type_reg, lower_limit, higher_limit);
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
@@ -1919,14 +1925,7 @@ void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
- if (lower_limit != 0) {
- Register scratch = r0;
- mov(scratch, value);
- slgfi(scratch, Operand(lower_limit));
- CmpU64(scratch, Operand(higher_limit - lower_limit));
- } else {
- CmpU64(value, Operand(higher_limit));
- }
+ CompareRange(value, lower_limit, higher_limit);
ble(on_in_range);
}
@@ -2086,7 +2085,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
lgfi(r2, Operand(static_cast<int>(reason)));
PrepareCallCFunction(1, 0, r3);
Move(r3, ExternalReference::abort_with_reason());
@@ -2102,7 +2101,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -2123,7 +2122,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
@@ -2131,7 +2130,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
@@ -4670,10 +4669,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kSimd128Size));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
-}
-
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
larl(dst, Operand(-pc_offset() / 2));
}
@@ -4797,8 +4792,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
+ ASM_CODE_COMMENT(this);
LoadU64(ip, MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(target)));
+ IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
@@ -5276,7 +5272,37 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
#undef EMIT_SIMD_BINOP_VRR_C
#undef SIMD_BINOP_LIST_VRR_C
-// Opcodes without a 1-1 match.
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl, veslv, 3) \
+ V(I64x2ShrS, vesrav, 3) \
+ V(I64x2ShrU, vesrlv, 3) \
+ V(I32x4Shl, veslv, 2) \
+ V(I32x4ShrS, vesrav, 2) \
+ V(I32x4ShrU, vesrlv, 2) \
+ V(I16x8Shl, veslv, 1) \
+ V(I16x8ShrS, vesrav, 1) \
+ V(I16x8ShrU, vesrlv, 1) \
+ V(I8x16Shl, veslv, 0) \
+ V(I8x16ShrS, vesrav, 0) \
+ V(I8x16ShrU, vesrlv, 0)
+
+#define EMIT_SIMD_SHIFT(name, op, c1) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Register src2) { \
+ vlvg(kScratchDoubleReg, src2, MemOperand(r0, 0), Condition(c1)); \
+ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(c1)); \
+ op(dst, src1, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(c1)); \
+ } \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ const Operand& src2) { \
+ mov(ip, src2); \
+ name(dst, src1, ip); \
+ }
+SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
+#undef EMIT_SIMD_SHIFT
+#undef SIMD_SHIFT_LIST
+
void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
Register scratch_1 = r0;
@@ -5396,6 +5422,123 @@ void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
}
+// Vector LE Load and Transform instructions.
+#ifdef V8_TARGET_BIG_ENDIAN
+#define IS_BIG_ENDIAN true
+#else
+#define IS_BIG_ENDIAN false
+#endif
+
+#define CAN_LOAD_STORE_REVERSE \
+ IS_BIG_ENDIAN&& CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)
+
+#define LOAD_SPLAT_LIST(V) \
+ V(64x2, vlbrrep, LoadU64LE, 3) \
+ V(32x4, vlbrrep, LoadU32LE, 2) \
+ V(16x8, vlbrrep, LoadU16LE, 1) \
+ V(8x16, vlrep, LoadU8, 0)
+
+#define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(condition)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, 0), Condition(condition)); \
+ vrep(dst, dst, Operand(0), Condition(condition)); \
+ }
+LOAD_SPLAT_LIST(LOAD_SPLAT)
+#undef LOAD_SPLAT
+#undef LOAD_SPLAT_LIST
+
+#define LOAD_EXTEND_LIST(V) \
+ V(32x2U, vuplh, 2) \
+ V(32x2S, vuph, 2) \
+ V(16x4U, vuplh, 1) \
+ V(16x4S, vuph, 1) \
+ V(8x8U, vuplh, 0) \
+ V(8x8S, vuph, 0)
+
+#define LOAD_EXTEND(name, unpack_instr, condition) \
+ void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vlebrg(kScratchDoubleReg, mem, Condition(0)); \
+ } else { \
+ LoadU64LE(r1, mem); \
+ vlvg(kScratchDoubleReg, r1, MemOperand(r0, 0), Condition(3)); \
+ } \
+ unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(condition)); \
+ }
+LOAD_EXTEND_LIST(LOAD_EXTEND)
+#undef LOAD_EXTEND
+#undef LOAD_EXTEND
+
+void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
+ vlebrf(dst, mem, Condition(3));
+ return;
+ }
+ LoadU32LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 3), Condition(2));
+}
+
+void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
+ vlebrg(dst, mem, Condition(1));
+ return;
+ }
+ LoadU64LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 1), Condition(3));
+}
+
+#define LOAD_LANE_LIST(V) \
+ V(64, vlebrg, LoadU64LE, 3) \
+ V(32, vlebrf, LoadU32LE, 2) \
+ V(16, vlebrh, LoadU16LE, 1) \
+ V(8, vleb, LoadU8, 0)
+
+#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ const MemOperand& mem, int lane) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(lane)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, lane), Condition(condition)); \
+ }
+LOAD_LANE_LIST(LOAD_LANE)
+#undef LOAD_LANE
+#undef LOAD_LANE_LIST
+
+#define STORE_LANE_LIST(V) \
+ V(64, vstebrg, StoreU64LE, 3) \
+ V(32, vstebrf, StoreU32LE, 2) \
+ V(16, vstebrh, StoreU16LE, 1) \
+ V(8, vsteb, StoreU8, 0)
+
+#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ const MemOperand& mem, int lane) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(src, mem, Condition(lane)); \
+ return; \
+ } \
+ vlgv(r1, src, MemOperand(r0, lane), Condition(condition)); \
+ scalar_instr(r1, mem); \
+ }
+STORE_LANE_LIST(STORE_LANE)
+#undef STORE_LANE
+#undef STORE_LANE_LIST
+#undef CAN_LOAD_STORE_REVERSE
+#undef IS_BIG_ENDIAN
+
#undef kScratchDoubleReg
} // namespace internal
diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.h b/chromium/v8/src/codegen/s390/macro-assembler-s390.h
index 51cdb483263..e7c4e8994c3 100644
--- a/chromium/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.h
@@ -44,6 +44,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
+ void CallBuiltin(Builtin builtin);
void AtomicCmpExchangeHelper(Register addr, Register output,
Register old_value, Register new_value,
int start, int end, int shift_amount, int offset,
@@ -392,6 +393,27 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch1);
void LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
void LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
+ // Vector LE Load and Transform instructions.
+ void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem);
+ void LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem);
+ void LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane);
// Load And Test
void LoadAndTest32(Register dst, Register src);
@@ -999,6 +1021,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
lgfr(dst, dst);
}
+ void SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+ }
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
// Activation support.
void EnterFrame(StackFrame::Type type,
@@ -1015,7 +1048,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
- void ResetSpeculationPoisonRegister();
void ComputeCodeStartAddress(Register dst);
void LoadPC(Register dst);
@@ -1071,75 +1103,99 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx);
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Mul) \
- V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4GtU) \
- V(I32x4GeU) \
- V(I32x4MinS) \
- V(I32x4MinU) \
- V(I32x4MaxS) \
- V(I32x4MaxU) \
- V(I16x8Add) \
- V(I16x8Sub) \
- V(I16x8Mul) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8GtS) \
- V(I16x8GeS) \
- V(I16x8GtU) \
- V(I16x8GeU) \
- V(I16x8MinS) \
- V(I16x8MinU) \
- V(I16x8MaxS) \
- V(I16x8MaxU) \
- V(I8x16Add) \
- V(I8x16Sub) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16GtS) \
- V(I8x16GeS) \
- V(I8x16GtU) \
- V(I8x16GeU) \
- V(I8x16MinS) \
- V(I8x16MinU) \
- V(I8x16MaxS) \
- V(I8x16MaxU)
-
-#define PROTOTYPE_SIMD_BINOP(name) \
- void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, Simd128Register) \
+ V(F64x2Sub, Simd128Register) \
+ V(F64x2Mul, Simd128Register) \
+ V(F64x2Div, Simd128Register) \
+ V(F64x2Min, Simd128Register) \
+ V(F64x2Max, Simd128Register) \
+ V(F64x2Eq, Simd128Register) \
+ V(F64x2Ne, Simd128Register) \
+ V(F64x2Lt, Simd128Register) \
+ V(F64x2Le, Simd128Register) \
+ V(F32x4Add, Simd128Register) \
+ V(F32x4Sub, Simd128Register) \
+ V(F32x4Mul, Simd128Register) \
+ V(F32x4Div, Simd128Register) \
+ V(F32x4Min, Simd128Register) \
+ V(F32x4Max, Simd128Register) \
+ V(F32x4Eq, Simd128Register) \
+ V(F32x4Ne, Simd128Register) \
+ V(F32x4Lt, Simd128Register) \
+ V(F32x4Le, Simd128Register) \
+ V(I64x2Add, Simd128Register) \
+ V(I64x2Sub, Simd128Register) \
+ V(I64x2Mul, Simd128Register) \
+ V(I64x2Eq, Simd128Register) \
+ V(I64x2Ne, Simd128Register) \
+ V(I64x2GtS, Simd128Register) \
+ V(I64x2GeS, Simd128Register) \
+ V(I64x2Shl, Register) \
+ V(I64x2ShrS, Register) \
+ V(I64x2ShrU, Register) \
+ V(I64x2Shl, const Operand&) \
+ V(I64x2ShrS, const Operand&) \
+ V(I64x2ShrU, const Operand&) \
+ V(I32x4Add, Simd128Register) \
+ V(I32x4Sub, Simd128Register) \
+ V(I32x4Mul, Simd128Register) \
+ V(I32x4Eq, Simd128Register) \
+ V(I32x4Ne, Simd128Register) \
+ V(I32x4GtS, Simd128Register) \
+ V(I32x4GeS, Simd128Register) \
+ V(I32x4GtU, Simd128Register) \
+ V(I32x4GeU, Simd128Register) \
+ V(I32x4MinS, Simd128Register) \
+ V(I32x4MinU, Simd128Register) \
+ V(I32x4MaxS, Simd128Register) \
+ V(I32x4MaxU, Simd128Register) \
+ V(I32x4Shl, Register) \
+ V(I32x4ShrS, Register) \
+ V(I32x4ShrU, Register) \
+ V(I32x4Shl, const Operand&) \
+ V(I32x4ShrS, const Operand&) \
+ V(I32x4ShrU, const Operand&) \
+ V(I16x8Add, Simd128Register) \
+ V(I16x8Sub, Simd128Register) \
+ V(I16x8Mul, Simd128Register) \
+ V(I16x8Eq, Simd128Register) \
+ V(I16x8Ne, Simd128Register) \
+ V(I16x8GtS, Simd128Register) \
+ V(I16x8GeS, Simd128Register) \
+ V(I16x8GtU, Simd128Register) \
+ V(I16x8GeU, Simd128Register) \
+ V(I16x8MinS, Simd128Register) \
+ V(I16x8MinU, Simd128Register) \
+ V(I16x8MaxS, Simd128Register) \
+ V(I16x8MaxU, Simd128Register) \
+ V(I16x8Shl, Register) \
+ V(I16x8ShrS, Register) \
+ V(I16x8ShrU, Register) \
+ V(I16x8Shl, const Operand&) \
+ V(I16x8ShrS, const Operand&) \
+ V(I16x8ShrU, const Operand&) \
+ V(I8x16Add, Simd128Register) \
+ V(I8x16Sub, Simd128Register) \
+ V(I8x16Eq, Simd128Register) \
+ V(I8x16Ne, Simd128Register) \
+ V(I8x16GtS, Simd128Register) \
+ V(I8x16GeS, Simd128Register) \
+ V(I8x16GtU, Simd128Register) \
+ V(I8x16GeU, Simd128Register) \
+ V(I8x16MinS, Simd128Register) \
+ V(I8x16MinU, Simd128Register) \
+ V(I8x16MaxS, Simd128Register) \
+ V(I8x16MaxU, Simd128Register) \
+ V(I8x16Shl, Register) \
+ V(I8x16ShrS, Register) \
+ V(I8x16ShrU, Register) \
+ V(I8x16Shl, const Operand&) \
+ V(I8x16ShrS, const Operand&) \
+ V(I8x16ShrU, const Operand&)
+
+#define PROTOTYPE_SIMD_BINOP(name, stype) \
+ void name(Simd128Register dst, Simd128Register src1, stype src2);
SIMD_BINOP_LIST(PROTOTYPE_SIMD_BINOP)
#undef PROTOTYPE_SIMD_BINOP
#undef SIMD_BINOP_LIST
@@ -1309,6 +1365,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Checks if value is in range [lower_limit, higher_limit] using a single
// comparison.
+ void CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
@@ -1416,10 +1474,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bne(not_smi_label /*, cr0*/);
}
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
diff --git a/chromium/v8/src/codegen/s390/register-s390.h b/chromium/v8/src/codegen/s390/register-s390.h
index 48accf08c5d..6e3b6a3e2b2 100644
--- a/chromium/v8/src/codegen/s390/register-s390.h
+++ b/chromium/v8/src/codegen/s390/register-s390.h
@@ -253,7 +253,6 @@ constexpr Register kReturnRegister2 = r4;
constexpr Register kJSFunctionRegister = r3;
constexpr Register kContextRegister = r13;
constexpr Register kAllocateSizeRegister = r3;
-constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r2;
constexpr Register kInterpreterBytecodeOffsetRegister = r6;
constexpr Register kInterpreterBytecodeArrayRegister = r7;